before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
@torch.no_grad() def test_model_on_batch(self, batch_x, batch_y, batch_tt): self.backend.eval() <DeepExtract> batch_x = self.convert_batch_x_on_default_device(batch_x) batch_y = self.convert_batch_y_on_default_device(batch_y) batch_tt = self.convert_batch_tt_on_default_device(batch_tt) (outputs_classification, outputs_translation) = self.backend(batch_x, batch_tt) losses = [] total_loss = None for i in range(len(batch_y)): feature_outputs = outputs_classification[i].view(-1, outputs_classification[i].shape[2]) feature_batch_y = batch_y[i].view(-1) loss = self.classification_criterion(feature_outputs, feature_batch_y) losses.append(loss.item()) if total_loss is None: total_loss = loss else: total_loss = total_loss + loss if len(batch_tt) > 0: outputs_translation[0][0] = outputs_translation[0][0].contiguous() translation_output = outputs_translation[0][0].view(-1, outputs_translation[0][0].shape[2]) translation_batch_tt = batch_tt[0][0].view(-1) loss = self.translation_criterion(translation_output, translation_batch_tt) losses.append(loss.item()) if total_loss is None: total_loss = loss else: total_loss = total_loss + loss (losses, total_loss) = (losses, total_loss) </DeepExtract> return losses
@torch.no_grad() def test_model_on_batch(self, batch_x, batch_y, batch_tt): self.backend.eval() batch_x = self.convert_batch_x_on_default_device(batch_x) batch_y = self.convert_batch_y_on_default_device(batch_y) batch_tt = self.convert_batch_tt_on_default_device(batch_tt) (outputs_classification, outputs_translation) = self.backend(batch_x, batch_tt) losses = [] total_loss = None for i in range(len(batch_y)): feature_outputs = outputs_classification[i].view(-1, outputs_classification[i].shape[2]) feature_batch_y = batch_y[i].view(-1) loss = self.classification_criterion(feature_outputs, feature_batch_y) losses.append(loss.item()) if total_loss is None: total_loss = loss else: total_loss = total_loss + loss if len(batch_tt) > 0: outputs_translation[0][0] = outputs_translation[0][0].contiguous() translation_output = outputs_translation[0][0].view(-1, outputs_translation[0][0].shape[2]) translation_batch_tt = batch_tt[0][0].view(-1) loss = self.translation_criterion(translation_output, translation_batch_tt) losses.append(loss.item()) if total_loss is None: total_loss = loss else: total_loss = total_loss + loss (losses, total_loss) = (losses, total_loss) return losses
disambiguate
positive
def radial_forward(self, ell, rank, data_in, data_out): if timing: start_time = time.time() if rank == 0: np.copyto(data_out, self.forward_component(ell, 0, data_in[0])) return <DeepExtract> degs = ball.spins(rank) </DeepExtract> N = self.N_max - self.N_min(ell - self.R_max) + 1 data_in = np.einsum('ij,j...->i...', self.Q[ell, rank].T, data_in) for i in range(3 ** rank): <DeepExtract> N = self.N_max - self.N_min(ell - self.R_max) + 1 if ell + degs[i] >= 0: data_out[i * N:(i + 1) * N] = self.pushW[ell + degs[i]][:N, :].dot(data_in[i]) else: shape = np.array(data_in[i].shape) shape[0] = N data_out[i * N:(i + 1) * N] = np.zeros(shape) </DeepExtract> if timing: end_time = time.time() self.radial_transform_time += end_time - start_time
def radial_forward(self, ell, rank, data_in, data_out): if timing: start_time = time.time() if rank == 0: np.copyto(data_out, self.forward_component(ell, 0, data_in[0])) return degs = ball.spins(rank) N = self.N_max - self.N_min(ell - self.R_max) + 1 data_in = np.einsum('ij,j...->i...', self.Q[ell, rank].T, data_in) for i in range(3 ** rank): N = self.N_max - self.N_min(ell - self.R_max) + 1 if ell + degs[i] >= 0: data_out[i * N:(i + 1) * N] = self.pushW[ell + degs[i]][:N, :].dot(data_in[i]) else: shape = np.array(data_in[i].shape) shape[0] = N data_out[i * N:(i + 1) * N] = np.zeros(shape) if timing: end_time = time.time() self.radial_transform_time += end_time - start_time
dedalus
positive
def __init__(self): <DeepExtract> data = list(sys.stdin.read().strip().split()) adj = self.DeBrujin(int(data[0]), data[1:]) self.adj = adj </DeepExtract> self.path = EulerianPath(self.adj).calculateEulerianPath() print(self.reconstructFromPath(self.path))
def __init__(self): data = list(sys.stdin.read().strip().split()) adj = self.DeBrujin(int(data[0]), data[1:]) self.adj = adj self.path = EulerianPath(self.adj).calculateEulerianPath() print(self.reconstructFromPath(self.path))
Coursera-Bioinformatics
positive
def metric(self, metric_name): """ Reads a metric config from the database. Parameters ---------- metric_name : :obj:`str` name of the metric to read Returns ------- :obj:`GraspQualityConfig` configuration of grasp metric, None if metric does not exist """ if self.metrics is None: logging.warning('No metrics available') return None if metric_name not in self.metrics.keys(): logging.warning('Metric %s does not exist. Aborting...' % metric_name) return None <DeepExtract> if metric_name in self.metrics.keys(): metric_group = self.metrics[metric_name] metric_group = None </DeepExtract> metric_config = {} for key in metric_group.keys(): metric_config[key] = {} for (k, v) in metric_group[key].attrs.iteritems(): metric_config[key][k] = v for (key, value) in metric_group.attrs.iteritems(): metric_config[key] = value return metric_config
def metric(self, metric_name): """ Reads a metric config from the database. Parameters ---------- metric_name : :obj:`str` name of the metric to read Returns ------- :obj:`GraspQualityConfig` configuration of grasp metric, None if metric does not exist """ if self.metrics is None: logging.warning('No metrics available') return None if metric_name not in self.metrics.keys(): logging.warning('Metric %s does not exist. Aborting...' % metric_name) return None if metric_name in self.metrics.keys(): metric_group = self.metrics[metric_name] metric_group = None metric_config = {} for key in metric_group.keys(): metric_config[key] = {} for (k, v) in metric_group[key].attrs.iteritems(): metric_config[key][k] = v for (key, value) in metric_group.attrs.iteritems(): metric_config[key] = value return metric_config
dex-net
positive
@patch('publicweb.views.Organization.objects', new=MagicMock(spec=Organization.objects, get=create_fake_organization, filter=create_fake_organization)) def test_notification_settings_view_uses_a_form(self): user = UserFactory.build(id=1) <DeepExtract> organization = OrganizationFactory.build(**kwargs) </DeepExtract> request = RequestFactory().get('/') request.user = user context = UserNotificationSettings.as_view()(request, org_slug=organization.slug).context_data self.assertIn('form', context)
@patch('publicweb.views.Organization.objects', new=MagicMock(spec=Organization.objects, get=create_fake_organization, filter=create_fake_organization)) def test_notification_settings_view_uses_a_form(self): user = UserFactory.build(id=1) organization = OrganizationFactory.build(**kwargs) request = RequestFactory().get('/') request.user = user context = UserNotificationSettings.as_view()(request, org_slug=organization.slug).context_data self.assertIn('form', context)
econsensus
positive
def __sort(lst, low, high): if high <= low: return (lt, i, gt, val) = (low, low + 1, high, lst[low]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 <DeepExtract> if lt - 1 <= low: return (lt, i, gt, val) = (low, low + 1, lt - 1, lst[low]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 self.__sort(lst, low, lt - 1) self.__sort(lst, gt + 1, lt - 1) </DeepExtract> <DeepExtract> if high <= gt + 1: return (lt, i, gt, val) = (gt + 1, gt + 1 + 1, high, lst[gt + 1]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 self.__sort(lst, gt + 1, lt - 1) self.__sort(lst, gt + 1, high) </DeepExtract>
def __sort(lst, low, high): if high <= low: return (lt, i, gt, val) = (low, low + 1, high, lst[low]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 if lt - 1 <= low: return (lt, i, gt, val) = (low, low + 1, lt - 1, lst[low]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 self.__sort(lst, low, lt - 1) self.__sort(lst, gt + 1, lt - 1) if high <= gt + 1: return (lt, i, gt, val) = (gt + 1, gt + 1 + 1, high, lst[gt + 1]) while i <= gt: if lst[i] < val: (lst[lt], lst[i]) = (lst[i], lst[lt]) lt += 1 i += 1 elif lst[i] > val: (lst[gt], lst[i]) = (lst[i], lst[gt]) gt -= 1 else: i += 1 self.__sort(lst, gt + 1, lt - 1) self.__sort(lst, gt + 1, high) </DeepExtract>
algorithms-sedgewick-python
positive
def call(self, v, k, q, mask): """call function""" batch_size = tf.shape(q)[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) <DeepExtract> q = tf.reshape(q, (batch_size, -1, self.num_heads, self.depth)) q = tf.transpose(q, perm=[0, 2, 1, 3]) </DeepExtract> <DeepExtract> k = tf.reshape(k, (batch_size, -1, self.num_heads, self.depth)) k = tf.transpose(k, perm=[0, 2, 1, 3]) </DeepExtract> <DeepExtract> v = tf.reshape(v, (batch_size, -1, self.num_heads, self.depth)) v = tf.transpose(v, perm=[0, 2, 1, 3]) </DeepExtract> (scaled_attention, attention_weights) = self.attention(q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) output = self.dense(concat_attention) return (output, attention_weights)
def call(self, v, k, q, mask): """call function""" batch_size = tf.shape(q)[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = tf.reshape(q, (batch_size, -1, self.num_heads, self.depth)) q = tf.transpose(q, perm=[0, 2, 1, 3]) k = tf.reshape(k, (batch_size, -1, self.num_heads, self.depth)) k = tf.transpose(k, perm=[0, 2, 1, 3]) v = tf.reshape(v, (batch_size, -1, self.num_heads, self.depth)) v = tf.transpose(v, perm=[0, 2, 1, 3]) (scaled_attention, attention_weights) = self.attention(q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) output = self.dense(concat_attention) return (output, attention_weights)
athena
positive
def _insert(r, key, *values): <DeepExtract> redis_key = self._name + key </DeepExtract> r.hset(self._name, key, redis_key) r.rpush(redis_key, *values)
def _insert(r, key, *values): redis_key = self._name + key r.hset(self._name, key, redis_key) r.rpush(redis_key, *values)
datasketch
positive
def testA1_SubscriptionsWithNoRouteTosynthesize(self): <DeepExtract> worker = mock.Mock(spec=Worker) worker.name = 'Worker-1' worker.enqueue = mock.Mock() worker1 = worker </DeepExtract> <DeepExtract> for rt in [RT1, RT2]: subscribe = Subscription(afi, safi, rt, worker1) self.routeTableManager.enqueue(subscribe) </DeepExtract> self._wait() <DeepExtract> for match in [MATCH1, MATCH2]: subs = self.routeTableManager.getWorkerSubscriptions(worker1) self.assertIn(match, subs, 'Subscription not found') </DeepExtract>
def testA1_SubscriptionsWithNoRouteTosynthesize(self): worker = mock.Mock(spec=Worker) worker.name = 'Worker-1' worker.enqueue = mock.Mock() worker1 = worker for rt in [RT1, RT2]: subscribe = Subscription(afi, safi, rt, worker1) self.routeTableManager.enqueue(subscribe) self._wait() for match in [MATCH1, MATCH2]: subs = self.routeTableManager.getWorkerSubscriptions(worker1) self.assertIn(match, subs, 'Subscription not found') </DeepExtract>
bagpipe-bgp
positive
def _get_lr_scheduler(args, kv): if 'lr_factor' not in args or args.lr_factor >= 1: return (args.lr, None) <DeepExtract> epoch_size = math.ceil(int(args.num_examples / kv.num_workers) / args.batch_size) </DeepExtract> begin_epoch = args.load_epoch if args.load_epoch else 0 if 'pow' in args.lr_step_epochs: lr = args.lr max_up = args.num_epochs * epoch_size pwr = float(re.sub('pow[- ]*', '', args.lr_step_epochs)) poly_sched = mx.lr_scheduler.PolyScheduler(max_up, lr, pwr) return (lr, poly_sched) step_epochs = [int(l) for l in args.lr_step_epochs.split(',')] lr = args.lr for s in step_epochs: if begin_epoch >= s: lr *= args.lr_factor if lr != args.lr: logging.info('Adjust learning rate to %e for epoch %d', lr, begin_epoch) steps = [epoch_size * (x - begin_epoch) for x in step_epochs if x - begin_epoch > 0] if steps: return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor, base_lr=args.lr)) else: return (lr, None)
def _get_lr_scheduler(args, kv): if 'lr_factor' not in args or args.lr_factor >= 1: return (args.lr, None) epoch_size = math.ceil(int(args.num_examples / kv.num_workers) / args.batch_size) begin_epoch = args.load_epoch if args.load_epoch else 0 if 'pow' in args.lr_step_epochs: lr = args.lr max_up = args.num_epochs * epoch_size pwr = float(re.sub('pow[- ]*', '', args.lr_step_epochs)) poly_sched = mx.lr_scheduler.PolyScheduler(max_up, lr, pwr) return (lr, poly_sched) step_epochs = [int(l) for l in args.lr_step_epochs.split(',')] lr = args.lr for s in step_epochs: if begin_epoch >= s: lr *= args.lr_factor if lr != args.lr: logging.info('Adjust learning rate to %e for epoch %d', lr, begin_epoch) steps = [epoch_size * (x - begin_epoch) for x in step_epochs if x - begin_epoch > 0] if steps: return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor, base_lr=args.lr)) else: return (lr, None)
byteps
positive
def __init__(self, stmt_data, auth_info, payload_sha2s): if self.__class__.__name__ == 'StatementManager': <DeepExtract> if auth_info['agent']: stmt_data['authority'] = auth_info['agent'] stmt_data['full_statement']['authority'] = auth_info['agent'].to_dict() elif 'authority' in stmt_data: auth_info['agent'] = stmt_data['authority'] = Agent.objects.retrieve_or_create(**stmt_data['full_statement']['authority'])[0] else: auth_info['agent'] = None </DeepExtract> <DeepExtract> if self.__class__.__name__ == 'StatementManager': stmt_data['voided'] = False self.build_verb(stmt_data) self.build_statement_object(auth_info, stmt_data) stmt_data['actor'] = Agent.objects.retrieve_or_create(**stmt_data['actor'])[0] self.build_context(stmt_data) self.build_result(stmt_data) if 'timestamp' in stmt_data: stmt_data['timestamp'] = convert_to_datetime_object(stmt_data['timestamp']) attachment_data = stmt_data.pop('attachments', None) if self.__class__.__name__ == 'StatementManager': self.model_object = self.build_statement(auth_info, stmt_data) else: self.model_object = self.build_substatement(auth_info, stmt_data) if attachment_data: self.build_attachments(auth_info, attachment_data, payload_sha2s) </DeepExtract>
def __init__(self, stmt_data, auth_info, payload_sha2s): if self.__class__.__name__ == 'StatementManager': if auth_info['agent']: stmt_data['authority'] = auth_info['agent'] stmt_data['full_statement']['authority'] = auth_info['agent'].to_dict() elif 'authority' in stmt_data: auth_info['agent'] = stmt_data['authority'] = Agent.objects.retrieve_or_create(**stmt_data['full_statement']['authority'])[0] else: auth_info['agent'] = None if self.__class__.__name__ == 'StatementManager': stmt_data['voided'] = False self.build_verb(stmt_data) self.build_statement_object(auth_info, stmt_data) stmt_data['actor'] = Agent.objects.retrieve_or_create(**stmt_data['actor'])[0] self.build_context(stmt_data) self.build_result(stmt_data) if 'timestamp' in stmt_data: stmt_data['timestamp'] = convert_to_datetime_object(stmt_data['timestamp']) attachment_data = stmt_data.pop('attachments', None) if self.__class__.__name__ == 'StatementManager': self.model_object = self.build_statement(auth_info, stmt_data) else: self.model_object = self.build_substatement(auth_info, stmt_data) if attachment_data: self.build_attachments(auth_info, attachment_data, payload_sha2s) </DeepExtract>
ADL_LRS
positive
def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = mujoco_py.MjViewer(self.sim) elif mode == 'rgb_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, device_id=-1) <DeepExtract> pass </DeepExtract> self._viewers[mode] = self.viewer return self.viewer
def _get_viewer(self, mode): self.viewer = self._viewers.get(mode) if self.viewer is None: if mode == 'human': self.viewer = mujoco_py.MjViewer(self.sim) elif mode == 'rgb_array': self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, device_id=-1) pass self._viewers[mode] = self.viewer return self.viewer
DQN-DDPG_Stock_Trading
positive
def _check_valid(sym, s): if sym.orig_type not in (INT, HEX): return True base = 10 if sym.orig_type == INT else 16 try: int(s, base) except ValueError: <DeepExtract> _msg('Error', "'{}' is a malformed {} value".format(s, TYPE_TO_STR[sym.orig_type])) </DeepExtract> return False for (low_sym, high_sym, cond) in sym.ranges: if expr_value(cond): low_s = low_sym.str_value high_s = high_sym.str_value if not int(low_s, base) <= int(s, base) <= int(high_s, base): <DeepExtract> _msg('Error', '{} is outside the range {}-{}'.format(s, low_s, high_s)) </DeepExtract> return False break return True
def _check_valid(sym, s): if sym.orig_type not in (INT, HEX): return True base = 10 if sym.orig_type == INT else 16 try: int(s, base) except ValueError: _msg('Error', "'{}' is a malformed {} value".format(s, TYPE_TO_STR[sym.orig_type])) return False for (low_sym, high_sym, cond) in sym.ranges: if expr_value(cond): low_s = low_sym.str_value high_s = high_sym.str_value if not int(low_s, base) <= int(s, base) <= int(high_s, base): _msg('Error', '{} is outside the range {}-{}'.format(s, low_s, high_s)) return False break return True
cello
positive
@micropython.viper def runtest_idle(count: int, duration_ms: int): leave = int(ticks_ms()) + duration_ms for n in range(count): <DeepExtract> if 0: tms.on() else: tms.off() tck.off() tck.on() </DeepExtract> while int(ticks_ms()) - leave < 0: <DeepExtract> if 0: tms.on() else: tms.off() tck.off() tck.on() </DeepExtract> <DeepExtract> if 1: tms.on() else: tms.off() tck.off() tck.on() </DeepExtract>
@micropython.viper def runtest_idle(count: int, duration_ms: int): leave = int(ticks_ms()) + duration_ms for n in range(count): if 0: tms.on() else: tms.off() tck.off() tck.on() while int(ticks_ms()) - leave < 0: if 0: tms.on() else: tms.off() tck.off() tck.on() if 1: tms.on() else: tms.off() tck.off() tck.on() </DeepExtract>
esp32ecp5
positive
@property def kernel_object(self) -> 'KernelObject': """ Return the object used to provide addresses to syscalls. Accessing this property will load this object into memory if it was not previously present. """ if self._kernel_object is None: self._kernel_object = KernelObject(self) <DeepExtract> obj_size = self._kernel_object.max_addr - self._kernel_object.min_addr + 1 if self._kernel_object.pic: if self._kernel_object._custom_base_addr is not None and self._is_range_free(self._kernel_object._custom_base_addr, obj_size): base_addr = self._kernel_object._custom_base_addr elif self._kernel_object.linked_base and self._is_range_free(self._kernel_object.linked_base, obj_size): base_addr = self._kernel_object.linked_base elif not self._kernel_object.is_main_bin: base_addr = self._find_safe_rebase_addr(obj_size) else: log.debug('The main binary is a position-independent executable. It is being loaded with a base address of 0x400000.') base_addr = 4194304 self._kernel_object.rebase(base_addr) else: if self._kernel_object._custom_base_addr is not None and self._kernel_object.linked_base != self._kernel_object._custom_base_addr and (not isinstance(self._kernel_object, Blob)): log.warning('%s: base_addr was specified but the object is not PIC. specify force_rebase=True to override', self._kernel_object.binary_basename) base_addr = self._kernel_object.linked_base if not self._is_range_free(self._kernel_object.linked_base, obj_size): raise CLEError(f'Position-DEPENDENT object {self._kernel_object.binary} cannot be loaded at {base_addr:#x}') assert self._kernel_object.mapped_base >= 0 if self._kernel_object.has_memory: assert self._kernel_object.min_addr <= self._kernel_object.max_addr log.info('Mapping %s at %#x', self._kernel_object.binary, base_addr) self.memory.add_backer(base_addr, self._kernel_object.memory) self._kernel_object._is_mapped = True key_bisect_insort_right(self.all_objects, self._kernel_object, keyfunc=lambda o: o.min_addr) </DeepExtract> return self._kernel_object
@property def kernel_object(self) -> 'KernelObject': """ Return the object used to provide addresses to syscalls. Accessing this property will load this object into memory if it was not previously present. """ if self._kernel_object is None: self._kernel_object = KernelObject(self) obj_size = self._kernel_object.max_addr - self._kernel_object.min_addr + 1 if self._kernel_object.pic: if self._kernel_object._custom_base_addr is not None and self._is_range_free(self._kernel_object._custom_base_addr, obj_size): base_addr = self._kernel_object._custom_base_addr elif self._kernel_object.linked_base and self._is_range_free(self._kernel_object.linked_base, obj_size): base_addr = self._kernel_object.linked_base elif not self._kernel_object.is_main_bin: base_addr = self._find_safe_rebase_addr(obj_size) else: log.debug('The main binary is a position-independent executable. It is being loaded with a base address of 0x400000.') base_addr = 4194304 self._kernel_object.rebase(base_addr) else: if self._kernel_object._custom_base_addr is not None and self._kernel_object.linked_base != self._kernel_object._custom_base_addr and (not isinstance(self._kernel_object, Blob)): log.warning('%s: base_addr was specified but the object is not PIC. specify force_rebase=True to override', self._kernel_object.binary_basename) base_addr = self._kernel_object.linked_base if not self._is_range_free(self._kernel_object.linked_base, obj_size): raise CLEError(f'Position-DEPENDENT object {self._kernel_object.binary} cannot be loaded at {base_addr:#x}') assert self._kernel_object.mapped_base >= 0 if self._kernel_object.has_memory: assert self._kernel_object.min_addr <= self._kernel_object.max_addr log.info('Mapping %s at %#x', self._kernel_object.binary, base_addr) self.memory.add_backer(base_addr, self._kernel_object.memory) self._kernel_object._is_mapped = True key_bisect_insort_right(self.all_objects, self._kernel_object, keyfunc=lambda o: o.min_addr) return self._kernel_object
cle
positive
def fgcnn_dnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, config, model_desc): """ FGCNN with DNN as deep classifier """ <DeepExtract> fgcnn_emb_concat_index = counter.next_num('concat_fgcnn_embedding') fgcnn_emb_concat = _concat_embeddings(embeddings, f'concat_fgcnn_embedding_{fgcnn_emb_concat_index}') if fgcnn_emb_concat is None: model_desc.add_net('fgcnn', None, None) fg_output = None fg_inputs = tf.expand_dims(fgcnn_emb_concat, axis=-1) fg_filters = config.fgcnn_params.get('fg_filters', (14, 16)) fg_heights = config.fgcnn_params.get('fg_heights', (7, 7)) fg_pool_heights = config.fgcnn_params.get('fg_pool_heights', (2, 2)) fg_new_feat_filters = config.fgcnn_params.get('fg_new_feat_filters', (2, 2)) new_features = list() for (filters, width, pool, new_filters) in zip(fg_filters, fg_heights, fg_pool_heights, fg_new_feat_filters): (fg_inputs, new_feats) = layers.FGCNN(filters=filters, kernel_height=width, pool_height=pool, new_filters=new_filters)(fg_inputs) new_features.append(new_feats) concat_all_features = Concatenate(axis=1)(new_features + [fgcnn_emb_concat]) model_desc.add_net('fg', fgcnn_emb_concat.shape, concat_all_features.shape) fg_output = concat_all_features </DeepExtract> if fg_output is None: return None if dense_layer is not None: dnn_input = Concatenate()([Flatten()(fg_output), dense_layer]) else: dnn_input = Flatten()(fg_output) <DeepExtract> custom_dnn_fn = config.dnn_params.get('custom_dnn_fn') if custom_dnn_fn is not None: dnn_out = custom_dnn_fn(dnn_input, config.dnn_params, 'fgcnn_dnn' + '_custom') hidden_units = config.dnn_params.get('hidden_units', ((128, 0, True), (64, 0, False))) activation = config.dnn_params.get('activation', 'relu') kernel_initializer = config.dnn_params.get('kernel_initializer', 'he_uniform') kernel_regularizer = config.dnn_params.get('kernel_regularizer') activity_regularizer = config.dnn_params.get('activity_regularizer') if len(hidden_units) <= 0: raise ValueError('[hidden_units] must be a list of tuple([units],[dropout_rate],[use_bn]) and at least one tuple.') index = 1 for (units, dropout, batch_norm) in hidden_units: dnn_input = Dense(units, use_bias=not batch_norm, name=f"{'fgcnn_dnn'}_dense_{index}", kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, activity_regularizer=activity_regularizer)(dnn_input) if batch_norm: dnn_input = BatchNormalization(name=f"{'fgcnn_dnn'}_bn_{index}")(dnn_input) dnn_input = Activation(activation=activation, name=f"{'fgcnn_dnn'}_activation_{index}")(dnn_input) if dropout > 0: dnn_input = Dropout(dropout, name=f"{'fgcnn_dnn'}_dropout_{index}")(dnn_input) index += 1 dnn_out = dnn_input </DeepExtract> model_desc.add_net('fgcnn-ipnn', fg_output.shape, dnn_out.shape) return dnn_out
def fgcnn_dnn_nets(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, config, model_desc): """ FGCNN with DNN as deep classifier """ fgcnn_emb_concat_index = counter.next_num('concat_fgcnn_embedding') fgcnn_emb_concat = _concat_embeddings(embeddings, f'concat_fgcnn_embedding_{fgcnn_emb_concat_index}') if fgcnn_emb_concat is None: model_desc.add_net('fgcnn', None, None) fg_output = None fg_inputs = tf.expand_dims(fgcnn_emb_concat, axis=-1) fg_filters = config.fgcnn_params.get('fg_filters', (14, 16)) fg_heights = config.fgcnn_params.get('fg_heights', (7, 7)) fg_pool_heights = config.fgcnn_params.get('fg_pool_heights', (2, 2)) fg_new_feat_filters = config.fgcnn_params.get('fg_new_feat_filters', (2, 2)) new_features = list() for (filters, width, pool, new_filters) in zip(fg_filters, fg_heights, fg_pool_heights, fg_new_feat_filters): (fg_inputs, new_feats) = layers.FGCNN(filters=filters, kernel_height=width, pool_height=pool, new_filters=new_filters)(fg_inputs) new_features.append(new_feats) concat_all_features = Concatenate(axis=1)(new_features + [fgcnn_emb_concat]) model_desc.add_net('fg', fgcnn_emb_concat.shape, concat_all_features.shape) fg_output = concat_all_features if fg_output is None: return None if dense_layer is not None: dnn_input = Concatenate()([Flatten()(fg_output), dense_layer]) else: dnn_input = Flatten()(fg_output) custom_dnn_fn = config.dnn_params.get('custom_dnn_fn') if custom_dnn_fn is not None: dnn_out = custom_dnn_fn(dnn_input, config.dnn_params, 'fgcnn_dnn' + '_custom') hidden_units = config.dnn_params.get('hidden_units', ((128, 0, True), (64, 0, False))) activation = config.dnn_params.get('activation', 'relu') kernel_initializer = config.dnn_params.get('kernel_initializer', 'he_uniform') kernel_regularizer = config.dnn_params.get('kernel_regularizer') activity_regularizer = config.dnn_params.get('activity_regularizer') if len(hidden_units) <= 0: raise ValueError('[hidden_units] must be a list of tuple([units],[dropout_rate],[use_bn]) and at least one tuple.') index = 1 for (units, dropout, batch_norm) in hidden_units: dnn_input = Dense(units, use_bias=not batch_norm, name=f"{'fgcnn_dnn'}_dense_{index}", kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, activity_regularizer=activity_regularizer)(dnn_input) if batch_norm: dnn_input = BatchNormalization(name=f"{'fgcnn_dnn'}_bn_{index}")(dnn_input) dnn_input = Activation(activation=activation, name=f"{'fgcnn_dnn'}_activation_{index}")(dnn_input) if dropout > 0: dnn_input = Dropout(dropout, name=f"{'fgcnn_dnn'}_dropout_{index}")(dnn_input) index += 1 dnn_out = dnn_input model_desc.add_net('fgcnn-ipnn', fg_output.shape, dnn_out.shape) return dnn_out
DeepTables
positive
@contextmanager def prepare_and_wait_for_event(self, *event_types, cond=lambda evt: True): """Context manager that waits for the indicated event(s) like wait_for_event after running. If cond is given, waits until cond(event) is true. Raises a ShutdownError if the core is shutdown while waiting. This also happens when 'shutdown' is in event_types. Compared to wait_for_event this handles the case where a thread waits for an event it itself causes in a thread-safe way. An example from the testsuite is: with self.m.prepare_and_wait_for_event('client_message'): self.m.keypress(key) Using just wait_for_event it would be impossible to ensure the event is caught since it may already have been handled in the interval between keypress(...) running and a subsequent wait_for_event(...) call. """ sema = threading.Semaphore(value=0) @self.event_callback('shutdown') def shutdown_handler(event): sema.release() @self.event_callback(*event_types) def target_handler(evt): if cond(evt): sema.release() yield sema.acquire() <DeepExtract> if self._core_shutdown: raise ShutdownError('libmpv core has been shutdown') </DeepExtract> shutdown_handler.unregister_mpv_events() target_handler.unregister_mpv_events()
@contextmanager def prepare_and_wait_for_event(self, *event_types, cond=lambda evt: True): """Context manager that waits for the indicated event(s) like wait_for_event after running. If cond is given, waits until cond(event) is true. Raises a ShutdownError if the core is shutdown while waiting. This also happens when 'shutdown' is in event_types. Compared to wait_for_event this handles the case where a thread waits for an event it itself causes in a thread-safe way. An example from the testsuite is: with self.m.prepare_and_wait_for_event('client_message'): self.m.keypress(key) Using just wait_for_event it would be impossible to ensure the event is caught since it may already have been handled in the interval between keypress(...) running and a subsequent wait_for_event(...) call. """ sema = threading.Semaphore(value=0) @self.event_callback('shutdown') def shutdown_handler(event): sema.release() @self.event_callback(*event_types) def target_handler(evt): if cond(evt): sema.release() yield sema.acquire() if self._core_shutdown: raise ShutdownError('libmpv core has been shutdown') shutdown_handler.unregister_mpv_events() target_handler.unregister_mpv_events()
BilibiliTools
positive
@pytest.mark.parametrize('space', spaces, ids=[space.__class__.__name__ for space in spaces]) def test_read_from_shared_memory(space): def assert_nested_equal(lhs, rhs, space, n): assert isinstance(rhs, list) if isinstance(space, Tuple): assert isinstance(lhs, tuple) for i in range(len(lhs)): <DeepExtract> assert isinstance([rhs_[i] for rhs_ in rhs], list) if isinstance(lhs[i], (list, tuple)): for i in range(len(lhs[i])): assert_nested_equal(lhs[i][i], [rhs_[i] for rhs_ in [rhs_[i] for rhs_ in rhs]]) elif isinstance(lhs[i], (dict, OrderedDict)): for key in lhs[i].keys(): assert_nested_equal(lhs[i][key], [rhs_[key] for rhs_ in [rhs_[i] for rhs_ in rhs]]) elif isinstance(lhs[i], SynchronizedArray): assert np.all(np.array(lhs[i][:]) == np.stack([rhs_[i] for rhs_ in rhs], axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(lhs[i]))) </DeepExtract> elif isinstance(space, Dict): assert isinstance(lhs, OrderedDict) for key in lhs.keys(): <DeepExtract> assert isinstance([rhs_[key] for rhs_ in rhs], list) if isinstance(lhs[key], (list, tuple)): for i in range(len(lhs[key])): assert_nested_equal(lhs[key][i], [rhs_[i] for rhs_ in [rhs_[key] for rhs_ in rhs]]) elif isinstance(lhs[key], (dict, OrderedDict)): for key in lhs[key].keys(): assert_nested_equal(lhs[key][key], [rhs_[key] for rhs_ in [rhs_[key] for rhs_ in rhs]]) elif isinstance(lhs[key], SynchronizedArray): assert np.all(np.array(lhs[key][:]) == np.stack([rhs_[key] for rhs_ in rhs], axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(lhs[key]))) </DeepExtract> elif isinstance(space, _BaseGymSpaces): assert isinstance(lhs, np.ndarray) assert lhs.shape == (n,) + space.shape assert lhs.dtype == space.dtype assert np.all(lhs == np.stack(rhs, axis=0)) else: raise TypeError('Got unknown type `{0}`'.format(type(space))) def write(i, shared_memory, sample): write_to_shared_memory(i, sample, shared_memory, space) shared_memory_n8 = create_shared_memory(space, n=8) memory_view_n8 = read_from_shared_memory(shared_memory_n8, space, n=8) samples = [space.sample() for _ in range(8)] processes = [Process(target=write, args=(i, shared_memory_n8, samples[i])) for i in range(8)] for process in processes: process.start() for process in processes: process.join() <DeepExtract> assert isinstance(samples, list) if isinstance(memory_view_n8, (list, tuple)): for i in range(len(memory_view_n8)): assert_nested_equal(memory_view_n8[i], [rhs_[i] for rhs_ in samples]) elif isinstance(memory_view_n8, (dict, OrderedDict)): for key in memory_view_n8.keys(): assert_nested_equal(memory_view_n8[key], [rhs_[key] for rhs_ in samples]) elif isinstance(memory_view_n8, SynchronizedArray): assert np.all(np.array(memory_view_n8[:]) == np.stack(samples, axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(memory_view_n8))) </DeepExtract>
@pytest.mark.parametrize('space', spaces, ids=[space.__class__.__name__ for space in spaces]) def test_read_from_shared_memory(space): def assert_nested_equal(lhs, rhs, space, n): assert isinstance(rhs, list) if isinstance(space, Tuple): assert isinstance(lhs, tuple) for i in range(len(lhs)): assert isinstance([rhs_[i] for rhs_ in rhs], list) if isinstance(lhs[i], (list, tuple)): for i in range(len(lhs[i])): assert_nested_equal(lhs[i][i], [rhs_[i] for rhs_ in [rhs_[i] for rhs_ in rhs]]) elif isinstance(lhs[i], (dict, OrderedDict)): for key in lhs[i].keys(): assert_nested_equal(lhs[i][key], [rhs_[key] for rhs_ in [rhs_[i] for rhs_ in rhs]]) elif isinstance(lhs[i], SynchronizedArray): assert np.all(np.array(lhs[i][:]) == np.stack([rhs_[i] for rhs_ in rhs], axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(lhs[i]))) elif isinstance(space, Dict): assert isinstance(lhs, OrderedDict) for key in lhs.keys(): assert isinstance([rhs_[key] for rhs_ in rhs], list) if isinstance(lhs[key], (list, tuple)): for i in range(len(lhs[key])): assert_nested_equal(lhs[key][i], [rhs_[i] for rhs_ in [rhs_[key] for rhs_ in rhs]]) elif isinstance(lhs[key], (dict, OrderedDict)): for key in lhs[key].keys(): assert_nested_equal(lhs[key][key], [rhs_[key] for rhs_ in [rhs_[key] for rhs_ in rhs]]) elif isinstance(lhs[key], SynchronizedArray): assert np.all(np.array(lhs[key][:]) == np.stack([rhs_[key] for rhs_ in rhs], axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(lhs[key]))) elif isinstance(space, _BaseGymSpaces): assert isinstance(lhs, np.ndarray) assert lhs.shape == (n,) + space.shape assert lhs.dtype == space.dtype assert np.all(lhs == np.stack(rhs, axis=0)) else: raise TypeError('Got unknown type `{0}`'.format(type(space))) def write(i, shared_memory, sample): write_to_shared_memory(i, sample, shared_memory, space) shared_memory_n8 = create_shared_memory(space, n=8) memory_view_n8 = read_from_shared_memory(shared_memory_n8, space, n=8) samples = [space.sample() for _ in range(8)] processes = [Process(target=write, args=(i, shared_memory_n8, samples[i])) for i in range(8)] for process in processes: process.start() for process in processes: process.join() assert isinstance(samples, list) if isinstance(memory_view_n8, (list, tuple)): for i in range(len(memory_view_n8)): assert_nested_equal(memory_view_n8[i], [rhs_[i] for rhs_ in samples]) elif isinstance(memory_view_n8, (dict, OrderedDict)): for key in memory_view_n8.keys(): assert_nested_equal(memory_view_n8[key], [rhs_[key] for rhs_ in samples]) elif isinstance(memory_view_n8, SynchronizedArray): assert np.all(np.array(memory_view_n8[:]) == np.stack(samples, axis=0).flatten()) else: raise TypeError('Got unknown type `{0}`.'.format(type(memory_view_n8))) </DeepExtract>
DQN-DDPG_Stock_Trading
positive
def removeQuads(self, quads, ntriples=False): """ Remove enumerated quads from this repository. Each quad can be a list or a tuple of :class:`Value` objects. :param quads: List of quads. Each element can be either a statement or a list or tuple of :class:`Value` objects or strings. :type quads: Iterable[list[string|Value]|tuple[string|Value]] :param ntriples: If ``True``, parts of the quads are assumed to be strings in N-Triples format and are sent to the server without any conversion. :type ntriples: bool """ removeQuads = [] for q in quads: quad = [None] * 4 if ntriples: quad[0] = q[0] quad[1] = q[1] quad[2] = q[2] quad[3] = q[3] elif isinstance(quad, (list, tuple)): predicate = q[1] obj = self.getValueFactory().object_position_term_to_openrdf_term(q[2], predicate=predicate) <DeepExtract> if q[0] is None or isinstance(q[0], basestring): quad[0] = q[0] elif hasattr(q[0], 'toNTriples'): quad[0] = q[0].toNTriples() else: quad[0] = Literal(q[0]).toNTriples() </DeepExtract> <DeepExtract> if predicate is None or isinstance(predicate, basestring): quad[1] = predicate elif hasattr(predicate, 'toNTriples'): quad[1] = predicate.toNTriples() else: quad[1] = Literal(predicate).toNTriples() </DeepExtract> <DeepExtract> if obj is None or isinstance(obj, basestring): quad[2] = obj elif hasattr(obj, 'toNTriples'): quad[2] = obj.toNTriples() else: quad[2] = Literal(obj).toNTriples() </DeepExtract> <DeepExtract> if q[3] is None or isinstance(q[3], basestring): quad[3] = q[3] elif hasattr(q[3], 'toNTriples'): quad[3] = q[3].toNTriples() else: quad[3] = Literal(q[3]).toNTriples() </DeepExtract> else: predicate = q.getPredicate() obj = self.getValueFactory().object_position_term_to_openrdf_term(q.getObject(), predicate=predicate) <DeepExtract> if q.getSubject() is None or isinstance(q.getSubject(), basestring): quad[0] = q.getSubject() elif hasattr(q.getSubject(), 'toNTriples'): quad[0] = q.getSubject().toNTriples() else: quad[0] = Literal(q.getSubject()).toNTriples() </DeepExtract> <DeepExtract> if predicate is None or isinstance(predicate, basestring): quad[1] = predicate elif hasattr(predicate, 'toNTriples'): quad[1] = predicate.toNTriples() else: quad[1] = Literal(predicate).toNTriples() </DeepExtract> <DeepExtract> if obj is None or isinstance(obj, basestring): quad[2] = obj elif hasattr(obj, 'toNTriples'): quad[2] = obj.toNTriples() else: quad[2] = Literal(obj).toNTriples() </DeepExtract> <DeepExtract> if q.getContext() is None or isinstance(q.getContext(), basestring): quad[3] = q.getContext() elif hasattr(q.getContext(), 'toNTriples'): quad[3] = q.getContext().toNTriples() else: quad[3] = Literal(q.getContext()).toNTriples() </DeepExtract> removeQuads.append(quad) self._get_mini_repository().deleteStatements(removeQuads)
def removeQuads(self, quads, ntriples=False): """ Remove enumerated quads from this repository. Each quad can be a list or a tuple of :class:`Value` objects. :param quads: List of quads. Each element can be either a statement or a list or tuple of :class:`Value` objects or strings. :type quads: Iterable[list[string|Value]|tuple[string|Value]] :param ntriples: If ``True``, parts of the quads are assumed to be strings in N-Triples format and are sent to the server without any conversion. :type ntriples: bool """ removeQuads = [] for q in quads: quad = [None] * 4 if ntriples: quad[0] = q[0] quad[1] = q[1] quad[2] = q[2] quad[3] = q[3] elif isinstance(quad, (list, tuple)): predicate = q[1] obj = self.getValueFactory().object_position_term_to_openrdf_term(q[2], predicate=predicate) if q[0] is None or isinstance(q[0], basestring): quad[0] = q[0] elif hasattr(q[0], 'toNTriples'): quad[0] = q[0].toNTriples() else: quad[0] = Literal(q[0]).toNTriples() if predicate is None or isinstance(predicate, basestring): quad[1] = predicate elif hasattr(predicate, 'toNTriples'): quad[1] = predicate.toNTriples() else: quad[1] = Literal(predicate).toNTriples() if obj is None or isinstance(obj, basestring): quad[2] = obj elif hasattr(obj, 'toNTriples'): quad[2] = obj.toNTriples() else: quad[2] = Literal(obj).toNTriples() if q[3] is None or isinstance(q[3], basestring): quad[3] = q[3] elif hasattr(q[3], 'toNTriples'): quad[3] = q[3].toNTriples() else: quad[3] = Literal(q[3]).toNTriples() else: predicate = q.getPredicate() obj = self.getValueFactory().object_position_term_to_openrdf_term(q.getObject(), predicate=predicate) if q.getSubject() is None or isinstance(q.getSubject(), basestring): quad[0] = q.getSubject() elif hasattr(q.getSubject(), 'toNTriples'): quad[0] = q.getSubject().toNTriples() else: quad[0] = Literal(q.getSubject()).toNTriples() if predicate is None or isinstance(predicate, basestring): quad[1] = predicate elif hasattr(predicate, 'toNTriples'): quad[1] = predicate.toNTriples() else: quad[1] = Literal(predicate).toNTriples() if obj is None or isinstance(obj, basestring): quad[2] = obj elif hasattr(obj, 'toNTriples'): quad[2] = obj.toNTriples() else: quad[2] = Literal(obj).toNTriples() if q.getContext() is None or isinstance(q.getContext(), basestring): quad[3] = q.getContext() elif hasattr(q.getContext(), 'toNTriples'): quad[3] = q.getContext().toNTriples() else: quad[3] = Literal(q.getContext()).toNTriples() removeQuads.append(quad) self._get_mini_repository().deleteStatements(removeQuads)
agraph-python
positive
def getOrganizations(apiKey): endpoint = '/organizations' <DeepExtract> if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: query = '?' + urlencode(p_queryItems, True) url = API_BASE_URL + endpoint + query verb = 'GET'.upper() session = NoRebuildAuthSession() try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb == 'GET': r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'PUT': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'POST': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'DELETE': r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = 2 if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) </DeepExtract> return (success, errors, response)
def getOrganizations(apiKey): endpoint = '/organizations' if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: query = '?' + urlencode(p_queryItems, True) url = API_BASE_URL + endpoint + query verb = 'GET'.upper() session = NoRebuildAuthSession() try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb == 'GET': r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'PUT': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'POST': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'DELETE': r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = 2 if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) return (success, errors, response)
automation-scripts
positive
def getNetworkApplianceTrafficShaping(apiKey, networkId): url = '/networks/' + str(networkId) + '/appliance/trafficShaping' <DeepExtract> if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'get'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not p_requestBody is None): r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) </DeepExtract> return (success, errors, response)
def getNetworkApplianceTrafficShaping(apiKey, networkId): url = '/networks/' + str(networkId) + '/appliance/trafficShaping' if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'get'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not p_requestBody is None): r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) return (success, errors, response)
automation-scripts
positive
def test_create_one_from_schema_and_join(self): <DeepExtract> result = self._upload_from_schema('good_eats.schema.json') self._test_summary_built(result) </DeepExtract> left_dataset_id = self.dataset_id right_dataset_id = self._post_file('good_eats_aux.csv') on = 'food_type' dataset_id_tuples = [(left_dataset_id, right_dataset_id), (right_dataset_id, left_dataset_id)] for dataset_ids in dataset_id_tuples: result = json.loads(self.controller.join(*dataset_ids, on=on)) expected_schema_keys = set(sum([Dataset.find_one(dataset_id).schema.keys() for dataset_id in dataset_ids], [])) self.assertTrue(isinstance(result, dict)) self.assertTrue(Dataset.ID in result) merge_dataset_id = result[Dataset.ID] dataset = Dataset.find_one(merge_dataset_id) self.assertEqual(dataset.num_rows, 0) self.assertEqual(dataset.num_columns, len(expected_schema_keys)) schema_keys = set(dataset.schema.keys()) self.assertEqual(schema_keys, expected_schema_keys)
def test_create_one_from_schema_and_join(self): result = self._upload_from_schema('good_eats.schema.json') self._test_summary_built(result) left_dataset_id = self.dataset_id right_dataset_id = self._post_file('good_eats_aux.csv') on = 'food_type' dataset_id_tuples = [(left_dataset_id, right_dataset_id), (right_dataset_id, left_dataset_id)] for dataset_ids in dataset_id_tuples: result = json.loads(self.controller.join(*dataset_ids, on=on)) expected_schema_keys = set(sum([Dataset.find_one(dataset_id).schema.keys() for dataset_id in dataset_ids], [])) self.assertTrue(isinstance(result, dict)) self.assertTrue(Dataset.ID in result) merge_dataset_id = result[Dataset.ID] dataset = Dataset.find_one(merge_dataset_id) self.assertEqual(dataset.num_rows, 0) self.assertEqual(dataset.num_columns, len(expected_schema_keys)) schema_keys = set(dataset.schema.keys()) self.assertEqual(schema_keys, expected_schema_keys)
bamboo
positive
def calculate_qparams(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='max', keepdim=False, true_zero=False, per_ch_input=False, quant_mode='maxmin'): alpha_gaus = {1: 1.24, 2: 1.71, 3: 2.215, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92} alpha_gaus_positive = {1: 1.71, 2: 2.215, 3: 2.55, 4: 2.93, 5: 3.28, 6: 3.61, 7: 3.92, 8: 4.2} alpha_laplas = {1: 1.05, 2: 1.86, 3: 2.83, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89} alpha_laplas_positive = {1: 1.86, 2: 2.83, 3: 5.03, 4: 6.2, 5: 7.41, 6: 8.64, 7: 9.89, 8: 11.16} if per_ch_input: x = x.transpose(0, 1) with torch.no_grad(): x_flat = x.flatten(*flatten_dims) if quant_mode == 'mean_std' and num_bits < 8: mu = x_flat.mean() if x_flat.dim() == 1 else x_flat.mean(-1) std = x_flat.std() if x_flat.dim() == 1 else x_flat.std(-1) b = torch.abs(x_flat - mu).mean() if x_flat.dim() == 1 else torch.mean(torch.abs(x_flat - mu.unsqueeze(1)), -1) minv = x_flat.min() if x_flat.dim() == 1 else x_flat.min(-1)[0] maxv = x_flat.max() if x_flat.dim() == 1 else x_flat.max(-1)[0] <DeepExtract> shape = list(torch.max(mu - 6 * std, minv).shape) + [1] * (x.dim() - torch.max(mu - 6 * std, minv).dim()) min_values = torch.max(mu - 6 * std, minv).view(*shape) </DeepExtract> <DeepExtract> shape = list(torch.min(mu + 6 * std, maxv).shape) + [1] * (x.dim() - torch.min(mu + 6 * std, maxv).dim()) max_values = torch.min(mu + 6 * std, maxv).view(*shape) </DeepExtract> elif x_flat.dim() == 1: <DeepExtract> shape = list(x_flat.min().shape) + [1] * (x.dim() - x_flat.min().dim()) min_values = x_flat.min().view(*shape) </DeepExtract> <DeepExtract> shape = list(x_flat.max().shape) + [1] * (x.dim() - x_flat.max().dim()) max_values = x_flat.max().view(*shape) </DeepExtract> else: <DeepExtract> shape = list(x_flat.min(-1)[0].shape) + [1] * (x.dim() - x_flat.min(-1)[0].dim()) min_values = x_flat.min(-1)[0].view(*shape) </DeepExtract> <DeepExtract> shape = list(x_flat.max(-1)[0].shape) + [1] * (x.dim() - x_flat.max(-1)[0].dim()) max_values = x_flat.max(-1)[0].view(*shape) </DeepExtract> if reduce_dim is not None: if reduce_type == 'mean': min_values = min_values.mean(reduce_dim, keepdim=keepdim) max_values = max_values.mean(reduce_dim, keepdim=keepdim) elif isinstance(reduce_dim, list) and len(reduce_dim) > 1: C = min_values.shape[-1] min_values = min_values.view(-1).min(reduce_dim[0], keepdim=keepdim)[0] max_values = max_values.view(-1).max(reduce_dim[0], keepdim=keepdim)[0] else: min_values = min_values.min(reduce_dim, keepdim=keepdim)[0] max_values = max_values.max(reduce_dim, keepdim=keepdim)[0] min_values[min_values > 0] = 0 max_values[max_values < 0] = 0 range_values = max_values - min_values range_values[range_values == 0] = 1 return QParams(range=range_values, zero_point=min_values, num_bits=num_bits)
def calculate_qparams(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='max', keepdim=False, true_zero=False, per_ch_input=False, quant_mode='maxmin'): alpha_gaus = {1: 1.24, 2: 1.71, 3: 2.215, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92} alpha_gaus_positive = {1: 1.71, 2: 2.215, 3: 2.55, 4: 2.93, 5: 3.28, 6: 3.61, 7: 3.92, 8: 4.2} alpha_laplas = {1: 1.05, 2: 1.86, 3: 2.83, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89} alpha_laplas_positive = {1: 1.86, 2: 2.83, 3: 5.03, 4: 6.2, 5: 7.41, 6: 8.64, 7: 9.89, 8: 11.16} if per_ch_input: x = x.transpose(0, 1) with torch.no_grad(): x_flat = x.flatten(*flatten_dims) if quant_mode == 'mean_std' and num_bits < 8: mu = x_flat.mean() if x_flat.dim() == 1 else x_flat.mean(-1) std = x_flat.std() if x_flat.dim() == 1 else x_flat.std(-1) b = torch.abs(x_flat - mu).mean() if x_flat.dim() == 1 else torch.mean(torch.abs(x_flat - mu.unsqueeze(1)), -1) minv = x_flat.min() if x_flat.dim() == 1 else x_flat.min(-1)[0] maxv = x_flat.max() if x_flat.dim() == 1 else x_flat.max(-1)[0] shape = list(torch.max(mu - 6 * std, minv).shape) + [1] * (x.dim() - torch.max(mu - 6 * std, minv).dim()) min_values = torch.max(mu - 6 * std, minv).view(*shape) shape = list(torch.min(mu + 6 * std, maxv).shape) + [1] * (x.dim() - torch.min(mu + 6 * std, maxv).dim()) max_values = torch.min(mu + 6 * std, maxv).view(*shape) elif x_flat.dim() == 1: shape = list(x_flat.min().shape) + [1] * (x.dim() - x_flat.min().dim()) min_values = x_flat.min().view(*shape) shape = list(x_flat.max().shape) + [1] * (x.dim() - x_flat.max().dim()) max_values = x_flat.max().view(*shape) else: shape = list(x_flat.min(-1)[0].shape) + [1] * (x.dim() - x_flat.min(-1)[0].dim()) min_values = x_flat.min(-1)[0].view(*shape) shape = list(x_flat.max(-1)[0].shape) + [1] * (x.dim() - x_flat.max(-1)[0].dim()) max_values = x_flat.max(-1)[0].view(*shape) if reduce_dim is not None: if reduce_type == 'mean': min_values = min_values.mean(reduce_dim, keepdim=keepdim) max_values = max_values.mean(reduce_dim, keepdim=keepdim) elif isinstance(reduce_dim, list) and len(reduce_dim) > 1: C = min_values.shape[-1] min_values = min_values.view(-1).min(reduce_dim[0], keepdim=keepdim)[0] max_values = max_values.view(-1).max(reduce_dim[0], keepdim=keepdim)[0] else: min_values = min_values.min(reduce_dim, keepdim=keepdim)[0] max_values = max_values.max(reduce_dim, keepdim=keepdim)[0] min_values[min_values > 0] = 0 max_values[max_values < 0] = 0 range_values = max_values - min_values range_values[range_values == 0] = 1 return QParams(range=range_values, zero_point=min_values, num_bits=num_bits)
CalibTIP
positive
def __callback(self, frame, event, arg): """trace event callback""" if frame.f_globals['__name__'] not in self.ignoreMods: if event == 'call': self.depth += 1 elif event == 'return': self.depth -= 1 if self.depth < 0: self.depth = 0 if event in Trace.__logEvents: <DeepExtract> lineno = frame.f_lineno fname = frame.f_globals['__file__'] if fname.endswith('.pyc') or fname.endswith('.pyo'): fname = fname[:-1] name = frame.f_globals['__name__'] line = linecache.getline(fname, lineno) self.log(name, ':', lineno, self.__getIndent(), line.rstrip()) </DeepExtract> return self.__callback
def __callback(self, frame, event, arg): """trace event callback""" if frame.f_globals['__name__'] not in self.ignoreMods: if event == 'call': self.depth += 1 elif event == 'return': self.depth -= 1 if self.depth < 0: self.depth = 0 if event in Trace.__logEvents: lineno = frame.f_lineno fname = frame.f_globals['__file__'] if fname.endswith('.pyc') or fname.endswith('.pyo'): fname = fname[:-1] name = frame.f_globals['__name__'] line = linecache.getline(fname, lineno) self.log(name, ':', lineno, self.__getIndent(), line.rstrip()) return self.__callback
Comparative-Annotation-Toolkit
positive
def run(self): <DeepExtract> if 'versioneer' in sys.modules: del sys.modules['versioneer'] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg' handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS True = True or cfg.verbose assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source' assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix' versionfile_abs = os.path.join(root, cfg.versionfile_source) get_keywords_f = handlers.get('get_keywords') from_keywords_f = handlers.get('keywords') if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, True) if True: print('got version from expanded keyword %s' % ver) vers = ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if True: print('got version from file %s %s' % (versionfile_abs, ver)) vers = ver except NotThisMethod: pass from_vcs_f = handlers.get('pieces_from_vcs') if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, True) ver = render(pieces, cfg.style) if True: print('got version from VCS %s' % ver) vers = ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, True) if True: print('got version from parentdir %s' % ver) vers = ver except NotThisMethod: pass if True: print('unable to compute version') vers = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None} </DeepExtract> print('Version: %s' % vers['version']) print(' full-revisionid: %s' % vers.get('full-revisionid')) print(' dirty: %s' % vers.get('dirty')) print(' date: %s' % vers.get('date')) if vers['error']: print(' error: %s' % vers['error'])
def run(self): if 'versioneer' in sys.modules: del sys.modules['versioneer'] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg' handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS True = True or cfg.verbose assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source' assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix' versionfile_abs = os.path.join(root, cfg.versionfile_source) get_keywords_f = handlers.get('get_keywords') from_keywords_f = handlers.get('keywords') if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, True) if True: print('got version from expanded keyword %s' % ver) vers = ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if True: print('got version from file %s %s' % (versionfile_abs, ver)) vers = ver except NotThisMethod: pass from_vcs_f = handlers.get('pieces_from_vcs') if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, True) ver = render(pieces, cfg.style) if True: print('got version from VCS %s' % ver) vers = ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, True) if True: print('got version from parentdir %s' % ver) vers = ver except NotThisMethod: pass if True: print('unable to compute version') vers = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None} print('Version: %s' % vers['version']) print(' full-revisionid: %s' % vers.get('full-revisionid')) print(' dirty: %s' % vers.get('dirty')) print(' date: %s' % vers.get('date')) if vers['error']: print(' error: %s' % vers['error'])
eliot
positive
def find_formatter_class(name): <DeepExtract> if _formatter_alias_cache: return for cls in get_all_formatters(): for alias in cls.aliases: _formatter_alias_cache[alias] = cls for fn in cls.filenames: _formatter_filename_cache.append((fn, cls)) </DeepExtract> cls = _formatter_alias_cache.get(name, None) return cls
def find_formatter_class(name): if _formatter_alias_cache: return for cls in get_all_formatters(): for alias in cls.aliases: _formatter_alias_cache[alias] = cls for fn in cls.filenames: _formatter_filename_cache.append((fn, cls)) cls = _formatter_alias_cache.get(name, None) return cls
AnkiCSS
positive
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None): if nserver == 0: pscmd = None envs = {'DMLC_NUM_WORKER': nworker, 'DMLC_NUM_SERVER': nserver} <DeepExtract> if hostIP is None or hostIP == 'auto': hostIP = 'ip' if hostIP == 'dns': hostIP = socket.getfqdn() elif hostIP == 'ip': from socket import gaierror try: hostIP = socket.gethostbyname(socket.getfqdn()) except gaierror: logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()') hostIP = socket.gethostbyname(socket.gethostname()) if hostIP.startswith('127.'): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('10.255.255.255', 1)) hostIP = s.getsockname()[0] hostIP = hostIP </DeepExtract> if nserver == 0: rabit = RabitTracker(hostIP=hostIP, nslave=nworker) envs.update(rabit.slave_envs()) rabit.start(nworker) else: pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs) envs.update(pserver.slave_envs()) fun_submit(nworker, nserver, envs) if nserver == 0: rabit.join() else: pserver.join()
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None): if nserver == 0: pscmd = None envs = {'DMLC_NUM_WORKER': nworker, 'DMLC_NUM_SERVER': nserver} if hostIP is None or hostIP == 'auto': hostIP = 'ip' if hostIP == 'dns': hostIP = socket.getfqdn() elif hostIP == 'ip': from socket import gaierror try: hostIP = socket.gethostbyname(socket.getfqdn()) except gaierror: logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()') hostIP = socket.gethostbyname(socket.gethostname()) if hostIP.startswith('127.'): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('10.255.255.255', 1)) hostIP = s.getsockname()[0] hostIP = hostIP if nserver == 0: rabit = RabitTracker(hostIP=hostIP, nslave=nworker) envs.update(rabit.slave_envs()) rabit.start(nworker) else: pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs) envs.update(pserver.slave_envs()) fun_submit(nworker, nserver, envs) if nserver == 0: rabit.join() else: pserver.join()
dlbench
positive
def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = numpy.array(point, dtype=numpy.float64, copy=True) a = numpy.array(axis, dtype=numpy.float64, copy=True) v -= a * numpy.dot(a, v) <DeepExtract> v = numpy.array(v, dtype=numpy.float64, copy=True) if out is None: if v.ndim == 1: n = math.sqrt(numpy.dot(v, v)) v *= v out = numpy.atleast_1d(numpy.sum(v, axis=axis)) numpy.sqrt(out, out) n = out else: v *= v numpy.sum(v, axis=axis, out=out) numpy.sqrt(out, out) </DeepExtract> if n > _EPS: if v[2] < 0.0: numpy.negative(v, v) v /= n return v if a[2] == 1.0: return numpy.array([1.0, 0.0, 0.0]) return unit_vector([-a[1], a[0], 0.0])
def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = numpy.array(point, dtype=numpy.float64, copy=True) a = numpy.array(axis, dtype=numpy.float64, copy=True) v -= a * numpy.dot(a, v) v = numpy.array(v, dtype=numpy.float64, copy=True) if out is None: if v.ndim == 1: n = math.sqrt(numpy.dot(v, v)) v *= v out = numpy.atleast_1d(numpy.sum(v, axis=axis)) numpy.sqrt(out, out) n = out else: v *= v numpy.sum(v, axis=axis, out=out) numpy.sqrt(out, out) if n > _EPS: if v[2] < 0.0: numpy.negative(v, v) v /= n return v if a[2] == 1.0: return numpy.array([1.0, 0.0, 0.0]) return unit_vector([-a[1], a[0], 0.0])
AugmentedAutoencoder
positive
def FunctionCall(pyname, wrapper, doc, catch, call, postcall_init, typepostconversion, func_ast, lineno, prepend_self=None): """Generate PyCFunction wrapper from AST.FuncDecl func_ast. Args: pyname: str - Python function name (may be special: ends with @) wrapper: str - generated function name doc: str - C++ signature catch: bool - catch C++ exceptions call: str | [str] - C++ command(s) to call the wrapped function (without "(params);" part). postcall_init: str - C++ command; to (re)set ret0. typepostconversion: dict(pytype, index) to convert to pytype func_ast: AST.FuncDecl protobuf lineno: int - .clif line number where func_ast defined prepend_self: AST.Param - Use self as 1st parameter. Yields: Source code for wrapped function. Raises: ValueError: for non-supported default arguments """ ctxmgr = pyname.endswith('@') if ctxmgr: ctxmgr = pyname assert ctxmgr in ('__enter__@', '__exit__@'), 'Invalid context manager name ' + pyname pyname = pyname.rstrip('@') nret = len(func_ast.returns) return_type = astutils.FuncReturnType(func_ast) void_return_type = 'void' == return_type xouts = nret > (0 if void_return_type else 1) params = [] nargs = len(func_ast.params) is_ternaryfunc_slot = pyname == '__call__' yield '' if func_ast.classmethod: yield ('// @classmethod ' + doc) arg0 = 'cls' else: yield ('// ' + doc) arg0 = 'self' needs_kw = nargs or is_ternaryfunc_slot yield ('static PyObject* %s(PyObject* %s%s) {' % (wrapper, arg0, ', PyObject* args, PyObject* kw' if needs_kw else '')) if is_ternaryfunc_slot and (not nargs): yield (I + 'if (!ensure_no_args_and_kw_args("%s", args, kw)) return nullptr;' % pyname) if prepend_self: <DeepExtract> ptype = prepend_self.type ctype = ptype.cpp_type smartptr = ctype.startswith('::std::unique_ptr') or ctype.startswith('::std::shared_ptr') if not ctype: assert ptype.callable, 'Non-callable param has empty cpp_type' if len(ptype.callable.returns) > 1: raise ValueError('Callbacks may not have any output parameters, %s param %s has %d' % (pyname + ' line %d' % lineno, prepend_self.name.native, len(ptype.callable.returns) - 1)) params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, 'std::function<%s> %s;' % (astutils.StdFuncParamStr(ptype.callable), 'arg0')) if ptype.cpp_raw_pointer: if ptype.cpp_toptr_conversion: params.append('arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) t = ctype[:-1] if ctype.endswith('*'): if ptype.cpp_abstract: if ptype.cpp_touniqptr_conversion: params.append('arg0' + '.get()') (unused_check_nullptr, out) = (False, '::std::unique_ptr<%s> %s;' % (t, 'arg0')) elif ptype.cpp_has_public_dtor: if ptype.cpp_has_def_ctor: params.append('&' + 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (t, 'arg0')) else: params.append('&%s.value()' % 'arg0') (unused_check_nullptr, out) = (False, '::std::optional<%s> %s;' % (t, 'arg0')) raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype)) if (smartptr or ptype.cpp_abstract) and (not ptype.cpp_touniqptr_conversion) and (not (ctype.startswith('::std::unique_ptr') and prepend_self.default_value == 'default')) and (not (ctype.startswith('::std::shared_ptr') and ptype.lang_type in _BUILTIN_TYPES_WITH_SHARED_PTR_CONVERSION)): raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s, no valid conversion defined' % (prepend_self.name.native, ctype, pyname + ' line %d' % lineno)) if smartptr: params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) if ptype.cpp_toptr_conversion: params.append('*' + 'arg0') (unused_check_nullptr, out) = (True, '%s* %s;' % (ctype, 'arg0')) if ptype.cpp_abstract: params.append('*' + 'arg0') (unused_check_nullptr, out) = (False, 'std::unique_ptr<%s> %s;' % (ctype, 'arg0')) if ptype.cpp_has_def_ctor: params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) else: params.append('std::move(%s).value()' % 'arg0') (unused_check_nullptr, out) = (False, '::std::optional<%s> %s;' % (ctype, 'arg0')) </DeepExtract> yield (I + out) yield (I + 'if (!Clif_PyObjAs(self, &arg0)) return nullptr;') minargs = sum((1 for p in func_ast.params if not p.default_value)) if nargs: yield (I + 'PyObject* a[%d]%s;' % (nargs, '' if minargs == nargs else '{}')) yield (I + 'const char* names[] = {') for p in func_ast.params: yield (I + I + I + '"%s",' % p.name.native) yield (I + I + I + 'nullptr') yield (I + '};') yield (I + 'if (!PyArg_ParseTupleAndKeywords(args, kw, "%s:%s", const_cast<char**>(names), %s)) return nullptr;' % ('O' * nargs if minargs == nargs else 'O' * minargs + '|' + 'O' * (nargs - minargs), pyname, ', '.join(('&a[%d]' % i for i in range(nargs))))) if minargs < nargs and (not xouts): yield (I + 'int nargs; // Find how many args actually passed in.') yield (I + 'for (nargs = %d; nargs > %d; --nargs) {' % (nargs, minargs)) yield (I + I + 'if (a[nargs-1] != nullptr) break;') yield (I + '}') for (i, p) in enumerate(func_ast.params): n = i + 1 arg = 'arg%d' % n <DeepExtract> ptype = p.type ctype = ptype.cpp_type smartptr = ctype.startswith('::std::unique_ptr') or ctype.startswith('::std::shared_ptr') if not ctype: assert ptype.callable, 'Non-callable param has empty cpp_type' if len(ptype.callable.returns) > 1: raise ValueError('Callbacks may not have any output parameters, %s param %s has %d' % (pyname + ' line %d' % lineno, p.name.native, len(ptype.callable.returns) - 1)) params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, 'std::function<%s> %s;' % (astutils.StdFuncParamStr(ptype.callable), arg)) if ptype.cpp_raw_pointer: if ptype.cpp_toptr_conversion: params.append(arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) t = ctype[:-1] if ctype.endswith('*'): if ptype.cpp_abstract: if ptype.cpp_touniqptr_conversion: params.append(arg + '.get()') (check_nullptr, out) = (False, '::std::unique_ptr<%s> %s;' % (t, arg)) elif ptype.cpp_has_public_dtor: if ptype.cpp_has_def_ctor: params.append('&' + arg) (check_nullptr, out) = (False, '%s %s;' % (t, arg)) else: params.append('&%s.value()' % arg) (check_nullptr, out) = (False, '::std::optional<%s> %s;' % (t, arg)) raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype)) if (smartptr or ptype.cpp_abstract) and (not ptype.cpp_touniqptr_conversion) and (not (ctype.startswith('::std::unique_ptr') and p.default_value == 'default')) and (not (ctype.startswith('::std::shared_ptr') and ptype.lang_type in _BUILTIN_TYPES_WITH_SHARED_PTR_CONVERSION)): raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s, no valid conversion defined' % (p.name.native, ctype, pyname + ' line %d' % lineno)) if smartptr: params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) if ptype.cpp_toptr_conversion: params.append('*' + arg) (check_nullptr, out) = (True, '%s* %s;' % (ctype, arg)) if ptype.cpp_abstract: params.append('*' + arg) (check_nullptr, out) = (False, 'std::unique_ptr<%s> %s;' % (ctype, arg)) if ptype.cpp_has_def_ctor: params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) else: params.append('std::move(%s).value()' % arg) (check_nullptr, out) = (False, '::std::optional<%s> %s;' % (ctype, arg)) </DeepExtract> yield (I + out) return_arg_err = 'return ArgError("{func_name}", names[{i}], "{ctype}", a[{i}]);'.format(i=i, func_name=pyname, ctype=astutils.Type(p)) cvt = 'if (!Clif_PyObjAs(a[{i}], &{cvar}{postconv})) {return_arg_err}'.format(i=i, cvar=arg, return_arg_err=return_arg_err, postconv='' if p.type.cpp_type else ', {%s}' % ', '.join((postconv.Initializer(t.type, typepostconversion) for t in p.type.callable.params))) def YieldCheckNullptr(ii): if check_nullptr: yield (ii + 'if (%s == nullptr) {' % arg) yield (ii + I + return_arg_err) yield (ii + '}') if i < minargs: yield (I + cvt) for s in YieldCheckNullptr(I): yield s else: if xouts: _I = '' else: _I = I yield (I + 'if (nargs > %d) {' % i) if p.default_value == 'default' or 'inf' in p.default_value: if xouts: raise ValueError("Can't supply the default for C++ function argument. Drop =default in def %s(%s)." % (pyname, p.name.native)) if n < nargs: if p.type.cpp_type.startswith('::std::unique_ptr'): yield (I + I + 'if (!a[%d]) { /* default-constructed smartptr */ }' % i) yield (I + I + 'else ' + cvt) else: yield (I + I + 'if (!a[{i}]) return DefaultArgMissedError("{}", names[{i}]);'.format(pyname, i=i)) yield (I + I + cvt) else: yield (I + I + cvt) for s in YieldCheckNullptr(I + I): yield s elif p.default_value and params[-1].startswith('&') and p.type.cpp_raw_pointer: raise ValueError('A default for integral type pointer argument is not supported. Drop =default in def %s(%s).' % (pyname, p.name.native)) else: yield (_I + I + 'if (!a[%d]) %s = (%s)%s;' % (i, arg, astutils.Type(p), p.default_value)) yield (_I + I + 'else ' + cvt) for s in YieldCheckNullptr(_I + I): yield s if not xouts: yield (I + '}') for (n, p) in enumerate(func_ast.returns): if n or void_return_type: yield (I + '%s ret%d{};' % (astutils.Type(p), n)) params.append('&ret%d' % n) yield (I + '// Call actual C++ method.') if isinstance(call, list): for s in call[:-1]: yield (I + s) call = call[-1] if not func_ast.py_keep_gil: if nargs: yield (I + 'Py_INCREF(args);') yield (I + 'Py_XINCREF(kw);') yield (I + 'PyThreadState* _save;') yield (I + 'Py_UNBLOCK_THREADS') optional_ret0 = False convert_ref_to_ptr = False if (minargs < nargs or catch) and (not void_return_type): if catch and return_type.rstrip().endswith('&'): convert_ref_to_ptr = True idx = return_type.rindex('&') return_type = return_type[:idx] + '*' if func_ast.returns[0].type.cpp_has_def_ctor: yield (I + return_type + ' ret0;') else: yield (I + '::std::optional<%s> ret0;' % return_type) optional_ret0 = True if catch: for s in _GenExceptionTry(): yield s if minargs < nargs and (not xouts): if not void_return_type: call = 'ret0 = ' + call yield (I + 'switch (nargs) {') for n in range(minargs, nargs + 1): yield (I + 'case %d:' % n) if func_ast.is_extend_method and func_ast.constructor: call_with_params = call % (func_ast.name.cpp_name, astutils.TupleStr(params[:n])) else: num_params = n if func_ast.is_extend_method: num_params += 1 call_with_params = call + astutils.TupleStr(params[:num_params]) yield (I + I + '%s; break;' % call_with_params) yield (I + '}') else: if func_ast.is_extend_method and func_ast.constructor: call = call % (func_ast.name.cpp_name, astutils.TupleStr(params)) else: call += astutils.TupleStr(params) _I = I if catch else '' if void_return_type: yield (_I + I + call + ';') elif catch: if convert_ref_to_ptr: yield (_I + I + 'ret0 = &' + call + ';') else: yield (_I + I + 'ret0 = ' + call + ';') else: yield (_I + I + return_type + ' ret0 = ' + call + ';') if catch: for s in _GenExceptionCatch(): yield s if postcall_init: if void_return_type: yield (I + postcall_init) else: yield (I + 'ret0' + postcall_init) if not func_ast.py_keep_gil: yield (I + 'Py_BLOCK_THREADS') if nargs: yield (I + 'Py_DECREF(args);') yield (I + 'Py_XDECREF(kw);') if catch: for s in _GenExceptionRaise(): yield s if func_ast.postproc == '->self': func_ast.postproc = '' return_self = True assert nret == 0, '-> self must have no other output parameters' else: return_self = False ret = '*ret' if convert_ref_to_ptr else 'ret' if nret > 1 or ((func_ast.postproc or ctxmgr) and nret): yield (I + '// Convert return values to Python.') yield (I + 'PyObject* p, * result_tuple = PyTuple_New(%d);' % nret) yield (I + 'if (result_tuple == nullptr) return nullptr;') for i in range(nret): yield (I + 'if ((p=Clif_PyObjFrom(std::move(%s%d), %s)) == nullptr) {' % (ret, i, postconv.Initializer(func_ast.returns[i].type, typepostconversion, marked_non_raising=func_ast.marked_non_raising))) yield (I + I + 'Py_DECREF(result_tuple);') yield (I + I + 'return nullptr;') yield (I + '}') yield (I + 'PyTuple_SET_ITEM(result_tuple, %d, p);' % i) if func_ast.postproc: yield (I + 'PyObject* pyproc = ImportFQName("%s");' % func_ast.postproc) yield (I + 'if (pyproc == nullptr) {') yield (I + I + 'Py_DECREF(result_tuple);') yield (I + I + 'return nullptr;') yield (I + '}') yield (I + 'p = PyObject_CallObject(pyproc, result_tuple);') yield (I + 'Py_DECREF(pyproc);') yield (I + 'Py_CLEAR(result_tuple);') if ctxmgr: yield (I + 'if (p == nullptr) return nullptr;') yield (I + 'Py_DECREF(p); // Not needed by the context manager.') else: yield (I + 'result_tuple = p;') if ctxmgr == '__enter__@': yield (I + 'Py_XDECREF(result_tuple);') yield (I + 'Py_INCREF(self);') yield (I + 'return self;') elif ctxmgr == '__exit__@': yield (I + 'Py_XDECREF(result_tuple);') yield (I + 'Py_RETURN_NONE;') else: yield (I + 'return result_tuple;') elif nret: yield (I + 'return Clif_PyObjFrom(std::move(%s0%s), %s);' % (ret, '.value()' if optional_ret0 else '', postconv.Initializer(func_ast.returns[0].type, typepostconversion, marked_non_raising=func_ast.marked_non_raising))) elif return_self or ctxmgr == '__enter__@': yield (I + 'Py_INCREF(self);') yield (I + 'return self;') else: yield (I + 'Py_RETURN_NONE;') yield '}'
def FunctionCall(pyname, wrapper, doc, catch, call, postcall_init, typepostconversion, func_ast, lineno, prepend_self=None): """Generate PyCFunction wrapper from AST.FuncDecl func_ast. Args: pyname: str - Python function name (may be special: ends with @) wrapper: str - generated function name doc: str - C++ signature catch: bool - catch C++ exceptions call: str | [str] - C++ command(s) to call the wrapped function (without "(params);" part). postcall_init: str - C++ command; to (re)set ret0. typepostconversion: dict(pytype, index) to convert to pytype func_ast: AST.FuncDecl protobuf lineno: int - .clif line number where func_ast defined prepend_self: AST.Param - Use self as 1st parameter. Yields: Source code for wrapped function. Raises: ValueError: for non-supported default arguments """ ctxmgr = pyname.endswith('@') if ctxmgr: ctxmgr = pyname assert ctxmgr in ('__enter__@', '__exit__@'), 'Invalid context manager name ' + pyname pyname = pyname.rstrip('@') nret = len(func_ast.returns) return_type = astutils.FuncReturnType(func_ast) void_return_type = 'void' == return_type xouts = nret > (0 if void_return_type else 1) params = [] nargs = len(func_ast.params) is_ternaryfunc_slot = pyname == '__call__' yield '' if func_ast.classmethod: yield ('// @classmethod ' + doc) arg0 = 'cls' else: yield ('// ' + doc) arg0 = 'self' needs_kw = nargs or is_ternaryfunc_slot yield ('static PyObject* %s(PyObject* %s%s) {' % (wrapper, arg0, ', PyObject* args, PyObject* kw' if needs_kw else '')) if is_ternaryfunc_slot and (not nargs): yield (I + 'if (!ensure_no_args_and_kw_args("%s", args, kw)) return nullptr;' % pyname) if prepend_self: ptype = prepend_self.type ctype = ptype.cpp_type smartptr = ctype.startswith('::std::unique_ptr') or ctype.startswith('::std::shared_ptr') if not ctype: assert ptype.callable, 'Non-callable param has empty cpp_type' if len(ptype.callable.returns) > 1: raise ValueError('Callbacks may not have any output parameters, %s param %s has %d' % (pyname + ' line %d' % lineno, prepend_self.name.native, len(ptype.callable.returns) - 1)) params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, 'std::function<%s> %s;' % (astutils.StdFuncParamStr(ptype.callable), 'arg0')) if ptype.cpp_raw_pointer: if ptype.cpp_toptr_conversion: params.append('arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) t = ctype[:-1] if ctype.endswith('*'): if ptype.cpp_abstract: if ptype.cpp_touniqptr_conversion: params.append('arg0' + '.get()') (unused_check_nullptr, out) = (False, '::std::unique_ptr<%s> %s;' % (t, 'arg0')) elif ptype.cpp_has_public_dtor: if ptype.cpp_has_def_ctor: params.append('&' + 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (t, 'arg0')) else: params.append('&%s.value()' % 'arg0') (unused_check_nullptr, out) = (False, '::std::optional<%s> %s;' % (t, 'arg0')) raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype)) if (smartptr or ptype.cpp_abstract) and (not ptype.cpp_touniqptr_conversion) and (not (ctype.startswith('::std::unique_ptr') and prepend_self.default_value == 'default')) and (not (ctype.startswith('::std::shared_ptr') and ptype.lang_type in _BUILTIN_TYPES_WITH_SHARED_PTR_CONVERSION)): raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s, no valid conversion defined' % (prepend_self.name.native, ctype, pyname + ' line %d' % lineno)) if smartptr: params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) if ptype.cpp_toptr_conversion: params.append('*' + 'arg0') (unused_check_nullptr, out) = (True, '%s* %s;' % (ctype, 'arg0')) if ptype.cpp_abstract: params.append('*' + 'arg0') (unused_check_nullptr, out) = (False, 'std::unique_ptr<%s> %s;' % (ctype, 'arg0')) if ptype.cpp_has_def_ctor: params.append('std::move(%s)' % 'arg0') (unused_check_nullptr, out) = (False, '%s %s;' % (ctype, 'arg0')) else: params.append('std::move(%s).value()' % 'arg0') (unused_check_nullptr, out) = (False, '::std::optional<%s> %s;' % (ctype, 'arg0')) yield (I + out) yield (I + 'if (!Clif_PyObjAs(self, &arg0)) return nullptr;') minargs = sum((1 for p in func_ast.params if not p.default_value)) if nargs: yield (I + 'PyObject* a[%d]%s;' % (nargs, '' if minargs == nargs else '{}')) yield (I + 'const char* names[] = {') for p in func_ast.params: yield (I + I + I + '"%s",' % p.name.native) yield (I + I + I + 'nullptr') yield (I + '};') yield (I + 'if (!PyArg_ParseTupleAndKeywords(args, kw, "%s:%s", const_cast<char**>(names), %s)) return nullptr;' % ('O' * nargs if minargs == nargs else 'O' * minargs + '|' + 'O' * (nargs - minargs), pyname, ', '.join(('&a[%d]' % i for i in range(nargs))))) if minargs < nargs and (not xouts): yield (I + 'int nargs; // Find how many args actually passed in.') yield (I + 'for (nargs = %d; nargs > %d; --nargs) {' % (nargs, minargs)) yield (I + I + 'if (a[nargs-1] != nullptr) break;') yield (I + '}') for (i, p) in enumerate(func_ast.params): n = i + 1 arg = 'arg%d' % n ptype = p.type ctype = ptype.cpp_type smartptr = ctype.startswith('::std::unique_ptr') or ctype.startswith('::std::shared_ptr') if not ctype: assert ptype.callable, 'Non-callable param has empty cpp_type' if len(ptype.callable.returns) > 1: raise ValueError('Callbacks may not have any output parameters, %s param %s has %d' % (pyname + ' line %d' % lineno, p.name.native, len(ptype.callable.returns) - 1)) params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, 'std::function<%s> %s;' % (astutils.StdFuncParamStr(ptype.callable), arg)) if ptype.cpp_raw_pointer: if ptype.cpp_toptr_conversion: params.append(arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) t = ctype[:-1] if ctype.endswith('*'): if ptype.cpp_abstract: if ptype.cpp_touniqptr_conversion: params.append(arg + '.get()') (check_nullptr, out) = (False, '::std::unique_ptr<%s> %s;' % (t, arg)) elif ptype.cpp_has_public_dtor: if ptype.cpp_has_def_ctor: params.append('&' + arg) (check_nullptr, out) = (False, '%s %s;' % (t, arg)) else: params.append('&%s.value()' % arg) (check_nullptr, out) = (False, '::std::optional<%s> %s;' % (t, arg)) raise TypeError("Can't convert %s to %s" % (ptype.lang_type, ctype)) if (smartptr or ptype.cpp_abstract) and (not ptype.cpp_touniqptr_conversion) and (not (ctype.startswith('::std::unique_ptr') and p.default_value == 'default')) and (not (ctype.startswith('::std::shared_ptr') and ptype.lang_type in _BUILTIN_TYPES_WITH_SHARED_PTR_CONVERSION)): raise TypeError('Can\'t create "%s" variable (C++ type %s) in function %s, no valid conversion defined' % (p.name.native, ctype, pyname + ' line %d' % lineno)) if smartptr: params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) if ptype.cpp_toptr_conversion: params.append('*' + arg) (check_nullptr, out) = (True, '%s* %s;' % (ctype, arg)) if ptype.cpp_abstract: params.append('*' + arg) (check_nullptr, out) = (False, 'std::unique_ptr<%s> %s;' % (ctype, arg)) if ptype.cpp_has_def_ctor: params.append('std::move(%s)' % arg) (check_nullptr, out) = (False, '%s %s;' % (ctype, arg)) else: params.append('std::move(%s).value()' % arg) (check_nullptr, out) = (False, '::std::optional<%s> %s;' % (ctype, arg)) yield (I + out) return_arg_err = 'return ArgError("{func_name}", names[{i}], "{ctype}", a[{i}]);'.format(i=i, func_name=pyname, ctype=astutils.Type(p)) cvt = 'if (!Clif_PyObjAs(a[{i}], &{cvar}{postconv})) {return_arg_err}'.format(i=i, cvar=arg, return_arg_err=return_arg_err, postconv='' if p.type.cpp_type else ', {%s}' % ', '.join((postconv.Initializer(t.type, typepostconversion) for t in p.type.callable.params))) def YieldCheckNullptr(ii): if check_nullptr: yield (ii + 'if (%s == nullptr) {' % arg) yield (ii + I + return_arg_err) yield (ii + '}') if i < minargs: yield (I + cvt) for s in YieldCheckNullptr(I): yield s else: if xouts: _I = '' else: _I = I yield (I + 'if (nargs > %d) {' % i) if p.default_value == 'default' or 'inf' in p.default_value: if xouts: raise ValueError("Can't supply the default for C++ function argument. Drop =default in def %s(%s)." % (pyname, p.name.native)) if n < nargs: if p.type.cpp_type.startswith('::std::unique_ptr'): yield (I + I + 'if (!a[%d]) { /* default-constructed smartptr */ }' % i) yield (I + I + 'else ' + cvt) else: yield (I + I + 'if (!a[{i}]) return DefaultArgMissedError("{}", names[{i}]);'.format(pyname, i=i)) yield (I + I + cvt) else: yield (I + I + cvt) for s in YieldCheckNullptr(I + I): yield s elif p.default_value and params[-1].startswith('&') and p.type.cpp_raw_pointer: raise ValueError('A default for integral type pointer argument is not supported. Drop =default in def %s(%s).' % (pyname, p.name.native)) else: yield (_I + I + 'if (!a[%d]) %s = (%s)%s;' % (i, arg, astutils.Type(p), p.default_value)) yield (_I + I + 'else ' + cvt) for s in YieldCheckNullptr(_I + I): yield s if not xouts: yield (I + '}') for (n, p) in enumerate(func_ast.returns): if n or void_return_type: yield (I + '%s ret%d{};' % (astutils.Type(p), n)) params.append('&ret%d' % n) yield (I + '// Call actual C++ method.') if isinstance(call, list): for s in call[:-1]: yield (I + s) call = call[-1] if not func_ast.py_keep_gil: if nargs: yield (I + 'Py_INCREF(args);') yield (I + 'Py_XINCREF(kw);') yield (I + 'PyThreadState* _save;') yield (I + 'Py_UNBLOCK_THREADS') optional_ret0 = False convert_ref_to_ptr = False if (minargs < nargs or catch) and (not void_return_type): if catch and return_type.rstrip().endswith('&'): convert_ref_to_ptr = True idx = return_type.rindex('&') return_type = return_type[:idx] + '*' if func_ast.returns[0].type.cpp_has_def_ctor: yield (I + return_type + ' ret0;') else: yield (I + '::std::optional<%s> ret0;' % return_type) optional_ret0 = True if catch: for s in _GenExceptionTry(): yield s if minargs < nargs and (not xouts): if not void_return_type: call = 'ret0 = ' + call yield (I + 'switch (nargs) {') for n in range(minargs, nargs + 1): yield (I + 'case %d:' % n) if func_ast.is_extend_method and func_ast.constructor: call_with_params = call % (func_ast.name.cpp_name, astutils.TupleStr(params[:n])) else: num_params = n if func_ast.is_extend_method: num_params += 1 call_with_params = call + astutils.TupleStr(params[:num_params]) yield (I + I + '%s; break;' % call_with_params) yield (I + '}') else: if func_ast.is_extend_method and func_ast.constructor: call = call % (func_ast.name.cpp_name, astutils.TupleStr(params)) else: call += astutils.TupleStr(params) _I = I if catch else '' if void_return_type: yield (_I + I + call + ';') elif catch: if convert_ref_to_ptr: yield (_I + I + 'ret0 = &' + call + ';') else: yield (_I + I + 'ret0 = ' + call + ';') else: yield (_I + I + return_type + ' ret0 = ' + call + ';') if catch: for s in _GenExceptionCatch(): yield s if postcall_init: if void_return_type: yield (I + postcall_init) else: yield (I + 'ret0' + postcall_init) if not func_ast.py_keep_gil: yield (I + 'Py_BLOCK_THREADS') if nargs: yield (I + 'Py_DECREF(args);') yield (I + 'Py_XDECREF(kw);') if catch: for s in _GenExceptionRaise(): yield s if func_ast.postproc == '->self': func_ast.postproc = '' return_self = True assert nret == 0, '-> self must have no other output parameters' else: return_self = False ret = '*ret' if convert_ref_to_ptr else 'ret' if nret > 1 or ((func_ast.postproc or ctxmgr) and nret): yield (I + '// Convert return values to Python.') yield (I + 'PyObject* p, * result_tuple = PyTuple_New(%d);' % nret) yield (I + 'if (result_tuple == nullptr) return nullptr;') for i in range(nret): yield (I + 'if ((p=Clif_PyObjFrom(std::move(%s%d), %s)) == nullptr) {' % (ret, i, postconv.Initializer(func_ast.returns[i].type, typepostconversion, marked_non_raising=func_ast.marked_non_raising))) yield (I + I + 'Py_DECREF(result_tuple);') yield (I + I + 'return nullptr;') yield (I + '}') yield (I + 'PyTuple_SET_ITEM(result_tuple, %d, p);' % i) if func_ast.postproc: yield (I + 'PyObject* pyproc = ImportFQName("%s");' % func_ast.postproc) yield (I + 'if (pyproc == nullptr) {') yield (I + I + 'Py_DECREF(result_tuple);') yield (I + I + 'return nullptr;') yield (I + '}') yield (I + 'p = PyObject_CallObject(pyproc, result_tuple);') yield (I + 'Py_DECREF(pyproc);') yield (I + 'Py_CLEAR(result_tuple);') if ctxmgr: yield (I + 'if (p == nullptr) return nullptr;') yield (I + 'Py_DECREF(p); // Not needed by the context manager.') else: yield (I + 'result_tuple = p;') if ctxmgr == '__enter__@': yield (I + 'Py_XDECREF(result_tuple);') yield (I + 'Py_INCREF(self);') yield (I + 'return self;') elif ctxmgr == '__exit__@': yield (I + 'Py_XDECREF(result_tuple);') yield (I + 'Py_RETURN_NONE;') else: yield (I + 'return result_tuple;') elif nret: yield (I + 'return Clif_PyObjFrom(std::move(%s0%s), %s);' % (ret, '.value()' if optional_ret0 else '', postconv.Initializer(func_ast.returns[0].type, typepostconversion, marked_non_raising=func_ast.marked_non_raising))) elif return_self or ctxmgr == '__enter__@': yield (I + 'Py_INCREF(self);') yield (I + 'return self;') else: yield (I + 'Py_RETURN_NONE;') yield '}'
clif
positive
def main(_): tf.logging.set_verbosity(tf.logging.INFO) tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) input_files = [] for input_pattern in FLAGS.input_file.split(','): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info('*** Reading from input files ***') for input_file in input_files: tf.logging.info(' %s', input_file) rng = random.Random(FLAGS.random_seed) <DeepExtract> all_documents = [[]] for input_file in input_files: with tf.gfile.GFile(input_file, 'r') as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(FLAGS.dupe_factor): for document_index in range(len(all_documents)): instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) instances = instances </DeepExtract> output_files = FLAGS.output_file.split(',') tf.logging.info('*** Writing to output files ***') for output_file in output_files: tf.logging.info(' %s', output_file) <DeepExtract> writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = [1] * len(input_ids) segment_ids = list(instance.segment_ids) assert len(input_ids) <= FLAGS.max_seq_length while len(input_ids) < FLAGS.max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == FLAGS.max_seq_length assert len(input_mask) == FLAGS.max_seq_length assert len(segment_ids) == FLAGS.max_seq_length masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = [1.0] * len(masked_lm_ids) while len(masked_lm_positions) < FLAGS.max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = 1 if instance.is_random_next else 0 features = collections.OrderedDict() features['input_ids'] = create_int_feature(input_ids) features['input_mask'] = create_int_feature(input_mask) features['segment_ids'] = create_int_feature(segment_ids) features['masked_lm_positions'] = create_int_feature(masked_lm_positions) features['masked_lm_ids'] = create_int_feature(masked_lm_ids) features['masked_lm_weights'] = create_float_feature(masked_lm_weights) features['next_sentence_labels'] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = (writer_index + 1) % len(writers) total_written += 1 if inst_index < 20: tf.logging.info('*** Example ***') tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens])) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values]))) for writer in writers: writer.close() tf.logging.info('Wrote %d total instances', total_written) </DeepExtract>
def main(_): tf.logging.set_verbosity(tf.logging.INFO) tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) input_files = [] for input_pattern in FLAGS.input_file.split(','): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info('*** Reading from input files ***') for input_file in input_files: tf.logging.info(' %s', input_file) rng = random.Random(FLAGS.random_seed) all_documents = [[]] for input_file in input_files: with tf.gfile.GFile(input_file, 'r') as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if not line: break line = line.strip() if not line: all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[-1].append(tokens) all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(FLAGS.dupe_factor): for document_index in range(len(all_documents)): instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) instances = instances output_files = FLAGS.output_file.split(',') tf.logging.info('*** Writing to output files ***') for output_file in output_files: tf.logging.info(' %s', output_file) writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = [1] * len(input_ids) segment_ids = list(instance.segment_ids) assert len(input_ids) <= FLAGS.max_seq_length while len(input_ids) < FLAGS.max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == FLAGS.max_seq_length assert len(input_mask) == FLAGS.max_seq_length assert len(segment_ids) == FLAGS.max_seq_length masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = [1.0] * len(masked_lm_ids) while len(masked_lm_positions) < FLAGS.max_predictions_per_seq: masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = 1 if instance.is_random_next else 0 features = collections.OrderedDict() features['input_ids'] = create_int_feature(input_ids) features['input_mask'] = create_int_feature(input_mask) features['segment_ids'] = create_int_feature(segment_ids) features['masked_lm_positions'] = create_int_feature(masked_lm_positions) features['masked_lm_ids'] = create_int_feature(masked_lm_ids) features['masked_lm_weights'] = create_float_feature(masked_lm_weights) features['next_sentence_labels'] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = (writer_index + 1) % len(writers) total_written += 1 if inst_index < 20: tf.logging.info('*** Example ***') tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens])) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values]))) for writer in writers: writer.close() tf.logging.info('Wrote %d total instances', total_written) </DeepExtract>
bert-for-task
positive
def list_nodes(self): params = {'Action': 'DescribeInstances'} <DeepExtract> nodes = [self._to_node(el) for el in self.connection.request('/', params=params).object.findall(self._fixxpath('reservationSet/item/instancesSet/item'))] </DeepExtract> return nodes
def list_nodes(self): params = {'Action': 'DescribeInstances'} nodes = [self._to_node(el) for el in self.connection.request('/', params=params).object.findall(self._fixxpath('reservationSet/item/instancesSet/item'))] return nodes
AEServmon
positive
def test_to_json(self): <DeepExtract> tc = TemporalClustering() for t in range(10): g = nx.erdos_renyi_graph(100, 0.05) coms = algorithms.louvain(g) nc = NamedClustering({i: c for (i, c) in enumerate(coms.communities)}, g, coms.method_name) tc.add_clustering(nc, t) tc = tc </DeepExtract> js = tc.to_json() self.assertIsInstance(js, str) res = json.loads(js) self.assertIsNone(res['matchings']) <DeepExtract> tc = TemporalClustering() for t in range(10): g = nx.erdos_renyi_graph(100, 0.05) coms = algorithms.louvain(g) nc = NamedClustering({i: c for (i, c) in enumerate(coms.communities)}, g, coms.method_name) tc.add_clustering(nc, t) tc = tc </DeepExtract> tc.lifecycle_polytree(lambda x, y: len(set(x) & set(y)) / len(set(x) | set(y)), True) js = tc.to_json() self.assertIsInstance(js, str) res = json.loads(js) self.assertIsNotNone(res['matchings'])
def test_to_json(self): tc = TemporalClustering() for t in range(10): g = nx.erdos_renyi_graph(100, 0.05) coms = algorithms.louvain(g) nc = NamedClustering({i: c for (i, c) in enumerate(coms.communities)}, g, coms.method_name) tc.add_clustering(nc, t) tc = tc js = tc.to_json() self.assertIsInstance(js, str) res = json.loads(js) self.assertIsNone(res['matchings']) tc = TemporalClustering() for t in range(10): g = nx.erdos_renyi_graph(100, 0.05) coms = algorithms.louvain(g) nc = NamedClustering({i: c for (i, c) in enumerate(coms.communities)}, g, coms.method_name) tc.add_clustering(nc, t) tc = tc tc.lifecycle_polytree(lambda x, y: len(set(x) & set(y)) / len(set(x) | set(y)), True) js = tc.to_json() self.assertIsInstance(js, str) res = json.loads(js) self.assertIsNotNone(res['matchings'])
cdlib
positive
def test_deserialize_valid(self): <DeepExtract> from colander import Invalid exc = Invalid(node, msg, val) typ = exc </DeepExtract> node = DummySchemaNode(typ) result = typ.deserialize(node, ('a',)) self.assertEqual(result, {'a'})
def test_deserialize_valid(self): from colander import Invalid exc = Invalid(node, msg, val) typ = exc node = DummySchemaNode(typ) result = typ.deserialize(node, ('a',)) self.assertEqual(result, {'a'})
colander
positive
def _update_chunk_length(self): if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b';', 1)[0] try: self.chunk_left = int(line, 16) except ValueError: <DeepExtract> if not self.closed: self._fp.close() if self._connection: self._connection.close() if not self.auto_close: io.IOBase.close(self) </DeepExtract> raise httplib.IncompleteRead(line)
def _update_chunk_length(self): if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b';', 1)[0] try: self.chunk_left = int(line, 16) except ValueError: if not self.closed: self._fp.close() if self._connection: self._connection.close() if not self.auto_close: io.IOBase.close(self) raise httplib.IncompleteRead(line)
BaiduPanFilesTransfers
positive
def fetch_callback(self, cookie, pending_channels, **kwargs): status = kwargs.get('status') count = kwargs.get('count') <DeepExtract> elapsed = datetime.now() - (cookie.last_edit or cookie.start) edit_to = None base = '#%s: ' % channel.name if status is FetchStatus.STARTING: edit_to = base + 'initializing...' elif status is FetchStatus.EXCEPTION: edit_to = base + 'error after %i messages.' % count if isinstance(exception, Exception): ename = type(exception).__name__ estr = str(exception) edit_to += ': %s: %s' % (ename, estr) elif status is FetchStatus.CANCELLED: edit_to = base + 'cancelled after %i messages.' % count elif status is FetchStatus.COMPLETED: edit_to = base + 'fetched %i messages.' % count elif status is FetchStatus.FETCHING: if elapsed > EDIT_TIMEDELTA: edit_to = base + '%i messages retrieved so far...' % count format_line = edit_to </DeepExtract> if format_line: rows = cookie.completed_messages + [format_line] rows.extend(['#%s: pending' % c.name for c in pending_channels]) cookie.last_edit = datetime.now() task = self.cookie_edit_task(cookie, content='\n'.join(rows)) self.bot.loop.create_task(task) if status is FetchStatus.COMPLETED: cookie.total_messages += count cookie.completed_messages.append(format_line) if not pending_channels: dest = cookie.ctx.message.channel elapsed = datetime.now() - cookie.start msg = 'Fetched a total of %i messages in %s.' % (cookie.total_messages, elapsed) self.bot.loop.create_task(self.bot.send_message(dest, msg))
def fetch_callback(self, cookie, pending_channels, **kwargs): status = kwargs.get('status') count = kwargs.get('count') elapsed = datetime.now() - (cookie.last_edit or cookie.start) edit_to = None base = '#%s: ' % channel.name if status is FetchStatus.STARTING: edit_to = base + 'initializing...' elif status is FetchStatus.EXCEPTION: edit_to = base + 'error after %i messages.' % count if isinstance(exception, Exception): ename = type(exception).__name__ estr = str(exception) edit_to += ': %s: %s' % (ename, estr) elif status is FetchStatus.CANCELLED: edit_to = base + 'cancelled after %i messages.' % count elif status is FetchStatus.COMPLETED: edit_to = base + 'fetched %i messages.' % count elif status is FetchStatus.FETCHING: if elapsed > EDIT_TIMEDELTA: edit_to = base + '%i messages retrieved so far...' % count format_line = edit_to if format_line: rows = cookie.completed_messages + [format_line] rows.extend(['#%s: pending' % c.name for c in pending_channels]) cookie.last_edit = datetime.now() task = self.cookie_edit_task(cookie, content='\n'.join(rows)) self.bot.loop.create_task(task) if status is FetchStatus.COMPLETED: cookie.total_messages += count cookie.completed_messages.append(format_line) if not pending_channels: dest = cookie.ctx.message.channel elapsed = datetime.now() - cookie.start msg = 'Fetched a total of %i messages in %s.' % (cookie.total_messages, elapsed) self.bot.loop.create_task(self.bot.send_message(dest, msg))
calebj-cogs
positive
def annotate_phylotree_parents(tree): <DeepExtract> parents = {} for clade in tree.find_clades(order='level'): for child in clade: parents[child] = clade parents_by_node = parents </DeepExtract> for node in tree.find_clades(): if node == tree.root: node.parent = None else: node.parent = parents_by_node[node] return tree
def annotate_phylotree_parents(tree): parents = {} for clade in tree.find_clades(order='level'): for child in clade: parents[child] = clade parents_by_node = parents for node in tree.find_clades(): if node == tree.root: node.parent = None else: node.parent = parents_by_node[node] return tree
augur
positive
def test_scenario_outline2_fr_from_string(): """ Language: FR -> Scenario.from_string, with scenario outline, second case """ <DeepExtract> feature_str = u'\n Fonctionnalité: parse_scenario\n ' feature_str += OUTLINED_SCENARIO2 feature = Feature.from_string(feature_str, language='fr') scenario = feature.scenarios[0] </DeepExtract> assert_equal(scenario.name, 'Ajouter 2 nombres') assert_equal(scenario.outlines, ({u'input_1': u'20', u'input_2': u'30', u'bouton': u'add', u'output': u'50'}, {u'input_1': u'2', u'input_2': u'5', u'bouton': u'add', u'output': u'7'}, {u'input_1': u'0', u'input_2': u'40', u'bouton': u'add', u'output': u'40'}))
def test_scenario_outline2_fr_from_string(): """ Language: FR -> Scenario.from_string, with scenario outline, second case """ feature_str = u'\n Fonctionnalité: parse_scenario\n ' feature_str += OUTLINED_SCENARIO2 feature = Feature.from_string(feature_str, language='fr') scenario = feature.scenarios[0] assert_equal(scenario.name, 'Ajouter 2 nombres') assert_equal(scenario.outlines, ({u'input_1': u'20', u'input_2': u'30', u'bouton': u'add', u'output': u'50'}, {u'input_1': u'2', u'input_2': u'5', u'bouton': u'add', u'output': u'7'}, {u'input_1': u'0', u'input_2': u'40', u'bouton': u'add', u'output': u'40'}))
aloe
positive
def __consumers_get_any_guest(self): self.guest_lock.acquire() try: least_conn = None <DeepExtract> usable_guests = [g for g in self.guests if g['state'] in ['using', 'used']] </DeepExtract> for guest in usable_guests: if not least_conn or guest['connected'] < least_conn['connected']: least_conn = guest return least_conn finally: self.guest_lock.release()
def __consumers_get_any_guest(self): self.guest_lock.acquire() try: least_conn = None usable_guests = [g for g in self.guests if g['state'] in ['using', 'used']] for guest in usable_guests: if not least_conn or guest['connected'] < least_conn['connected']: least_conn = guest return least_conn finally: self.guest_lock.release()
cowrie
positive
def forward(self, base_target_emb, input_from_dec, encoder_out_top, encoder_out_combine): """ Args: base_target_emb: target emb tensor input_from_dec: output of decode conv encoder_out_top: the key matrix for calculation of attetion weight, which is the top output of encode conv encoder_out_combine: the value matrix for the attention-weighted sum, which is the combination of base emb and top output of encode """ (batch, _, height, _) = base_target_emb.size() (batch_, _, height_, _) = input_from_dec.size() aeq(batch, batch_) aeq(height, height_) (enc_batch, _, enc_height) = encoder_out_top.size() (enc_batch_, _, enc_height_) = encoder_out_combine.size() aeq(enc_batch, enc_batch_) aeq(enc_height, enc_height_) <DeepExtract> (batch, hidden_size, length, _) = input_from_dec.size() h = self.linear_in(torch.transpose(input_from_dec, 1, 2).contiguous().view(batch * length, hidden_size)) preatt = torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) </DeepExtract> target = (base_target_emb + preatt) * SCALE_WEIGHT target = torch.squeeze(target, 3) target = torch.transpose(target, 1, 2) pre_attn = torch.bmm(target, encoder_out_top) if self.mask is not None: pre_attn.data.masked_fill_(self.mask, -float('inf')) attn = F.softmax(pre_attn, dim=2) context_output = torch.bmm(attn, torch.transpose(encoder_out_combine, 1, 2)) context_output = torch.transpose(torch.unsqueeze(context_output, 3), 1, 2) return (context_output, attn)
def forward(self, base_target_emb, input_from_dec, encoder_out_top, encoder_out_combine): """ Args: base_target_emb: target emb tensor input_from_dec: output of decode conv encoder_out_top: the key matrix for calculation of attetion weight, which is the top output of encode conv encoder_out_combine: the value matrix for the attention-weighted sum, which is the combination of base emb and top output of encode """ (batch, _, height, _) = base_target_emb.size() (batch_, _, height_, _) = input_from_dec.size() aeq(batch, batch_) aeq(height, height_) (enc_batch, _, enc_height) = encoder_out_top.size() (enc_batch_, _, enc_height_) = encoder_out_combine.size() aeq(enc_batch, enc_batch_) aeq(enc_height, enc_height_) (batch, hidden_size, length, _) = input_from_dec.size() h = self.linear_in(torch.transpose(input_from_dec, 1, 2).contiguous().view(batch * length, hidden_size)) preatt = torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) target = (base_target_emb + preatt) * SCALE_WEIGHT target = torch.squeeze(target, 3) target = torch.transpose(target, 1, 2) pre_attn = torch.bmm(target, encoder_out_top) if self.mask is not None: pre_attn.data.masked_fill_(self.mask, -float('inf')) attn = F.softmax(pre_attn, dim=2) context_output = torch.bmm(attn, torch.transpose(encoder_out_combine, 1, 2)) context_output = torch.transpose(torch.unsqueeze(context_output, 3), 1, 2) return (context_output, attn)
disambiguate
positive
def test_game_state_serialized_era_is_from_worksheet(self): <DeepExtract> if world_map is None: world_map = EmptyMap() world_map.grid = generate_grid() if avatar_manager is None: avatar_manager = DummyAvatarManager() avatar = DummyAvatar(1) other_avatar = DummyAvatar(2) other_avatar.marked = True avatar_manager.avatars_by_id[1] = avatar avatar_manager.avatars_by_id[2] = other_avatar game_state = GameState(world_map, avatar_manager) (game_state, _, _, _) = (game_state, avatar, world_map, avatar_manager) </DeepExtract> game_state.worksheet = WorksheetData(worksheet_id=1, era='test era', number_of_obstacle_textures=1, map_updaters=[]) assert game_state.serialize()['era'] == 'test era'
def test_game_state_serialized_era_is_from_worksheet(self): if world_map is None: world_map = EmptyMap() world_map.grid = generate_grid() if avatar_manager is None: avatar_manager = DummyAvatarManager() avatar = DummyAvatar(1) other_avatar = DummyAvatar(2) other_avatar.marked = True avatar_manager.avatars_by_id[1] = avatar avatar_manager.avatars_by_id[2] = other_avatar game_state = GameState(world_map, avatar_manager) (game_state, _, _, _) = (game_state, avatar, world_map, avatar_manager) game_state.worksheet = WorksheetData(worksheet_id=1, era='test era', number_of_obstacle_textures=1, map_updaters=[]) assert game_state.serialize()['era'] == 'test era'
aimmo
positive
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) <DeepExtract> downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) </DeepExtract> self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for i in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for i in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for i in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False), nn.BatchNorm2d(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for i in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2.0 / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
DA_Detection
positive
def make_ciphertext_ballot(object_id: str, style_id: str, manifest_hash: ElementModQ, code_seed: Optional[ElementModQ], contests: List[CiphertextBallotContest], nonce: Optional[ElementModQ]=None, timestamp: Optional[int]=None, ballot_code: Optional[ElementModQ]=None) -> CiphertextBallot: """ Makes a `CiphertextBallot`, initially in the state where it's neither been cast nor spoiled. :param object_id: the object_id of this specific ballot :param style_id: The `object_id` of the `BallotStyle` in the `Election` Manifest :param manifest_hash: Hash of the election manifest :param crypto_base_hash: Hash of the cryptographic election context :param contests: List of contests for this ballot :param timestamp: Timestamp at which the ballot encryption is generated in tick :param code_seed: Seed for ballot code :param nonce: optional nonce used as part of the encryption process """ if len(contests) == 0: log_warning('ciphertext ballot with no contests') <DeepExtract> contest_hashes = [contest.crypto_hash for contest in sequence_order_sort(contests)] contest_hash = hash_elems(object_id, manifest_hash, *contest_hashes) </DeepExtract> timestamp = to_ticks(datetime.now()) if timestamp is None else timestamp if code_seed is None: code_seed = manifest_hash if ballot_code is None: ballot_code = get_ballot_code(code_seed, timestamp, contest_hash) return CiphertextBallot(object_id, style_id, manifest_hash, code_seed, contests, ballot_code, timestamp, contest_hash, nonce)
def make_ciphertext_ballot(object_id: str, style_id: str, manifest_hash: ElementModQ, code_seed: Optional[ElementModQ], contests: List[CiphertextBallotContest], nonce: Optional[ElementModQ]=None, timestamp: Optional[int]=None, ballot_code: Optional[ElementModQ]=None) -> CiphertextBallot: """ Makes a `CiphertextBallot`, initially in the state where it's neither been cast nor spoiled. :param object_id: the object_id of this specific ballot :param style_id: The `object_id` of the `BallotStyle` in the `Election` Manifest :param manifest_hash: Hash of the election manifest :param crypto_base_hash: Hash of the cryptographic election context :param contests: List of contests for this ballot :param timestamp: Timestamp at which the ballot encryption is generated in tick :param code_seed: Seed for ballot code :param nonce: optional nonce used as part of the encryption process """ if len(contests) == 0: log_warning('ciphertext ballot with no contests') contest_hashes = [contest.crypto_hash for contest in sequence_order_sort(contests)] contest_hash = hash_elems(object_id, manifest_hash, *contest_hashes) timestamp = to_ticks(datetime.now()) if timestamp is None else timestamp if code_seed is None: code_seed = manifest_hash if ballot_code is None: ballot_code = get_ballot_code(code_seed, timestamp, contest_hash) return CiphertextBallot(object_id, style_id, manifest_hash, code_seed, contests, ballot_code, timestamp, contest_hash, nonce)
electionguard-python
positive
def _init_default_cluster(initdb_options=None): global _default_cluster if _default_cluster is None: pg_host = os.environ.get('PGHOST') if pg_host: _default_cluster = pg_cluster.RunningCluster() else: <DeepExtract> cluster = pg_cluster.TempCluster(**{}) cluster.init(**_get_initdb_options(initdb_options) or {}) cluster.trust_local_connections() atexit.register(_shutdown_cluster, cluster) _default_cluster = cluster </DeepExtract> return _default_cluster
def _init_default_cluster(initdb_options=None): global _default_cluster if _default_cluster is None: pg_host = os.environ.get('PGHOST') if pg_host: _default_cluster = pg_cluster.RunningCluster() else: cluster = pg_cluster.TempCluster(**{}) cluster.init(**_get_initdb_options(initdb_options) or {}) cluster.trust_local_connections() atexit.register(_shutdown_cluster, cluster) _default_cluster = cluster return _default_cluster
asyncpg
positive
def test_multi(self): """Translate images using StarGAN trained on multiple datasets.""" <DeepExtract> print('Loading the trained models from step {}...'.format(self.test_iters)) G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(self.test_iters)) D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(self.test_iters)) self.load_model_weights(self.G, G_path) self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage)) </DeepExtract> with torch.no_grad(): for (i, (x_real, c_org)) in enumerate(self.celeba_loader): x_real = x_real.to(self.device) <DeepExtract> if 'CelebA' == 'CelebA': hair_color_indices = [] for (i, attr_name) in enumerate(self.selected_attrs): if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']: hair_color_indices.append(i) c_trg_list = [] for i in range(self.c_dim): if 'CelebA' == 'CelebA': c_trg = c_org.clone() if i in hair_color_indices: c_trg[:, i] = 1 for j in hair_color_indices: if j != i: c_trg[:, j] = 0 else: c_trg[:, i] = c_trg[:, i] == 0 elif 'CelebA' == 'RaFD': c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, self.c_dim) c_trg_list.append(c_trg.to(self.device)) c_celeba_list = c_trg_list </DeepExtract> <DeepExtract> if 'RaFD' == 'CelebA': hair_color_indices = [] for (i, attr_name) in enumerate(selected_attrs): if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']: hair_color_indices.append(i) c_trg_list = [] for i in range(self.c2_dim): if 'RaFD' == 'CelebA': c_trg = c_org.clone() if i in hair_color_indices: c_trg[:, i] = 1 for j in hair_color_indices: if j != i: c_trg[:, j] = 0 else: c_trg[:, i] = c_trg[:, i] == 0 elif 'RaFD' == 'RaFD': c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, self.c2_dim) c_trg_list.append(c_trg.to(self.device)) c_rafd_list = c_trg_list </DeepExtract> zero_celeba = torch.zeros(x_real.size(0), self.c_dim).to(self.device) zero_rafd = torch.zeros(x_real.size(0), self.c2_dim).to(self.device) mask_celeba = self.label2onehot(torch.zeros(x_real.size(0)), 2).to(self.device) mask_rafd = self.label2onehot(torch.ones(x_real.size(0)), 2).to(self.device) x_fake_list = [x_real] for c_celeba in c_celeba_list: c_trg = torch.cat([c_celeba, zero_rafd, mask_celeba], dim=1) x_fake_list.append(self.G(x_real, c_trg)) for c_rafd in c_rafd_list: c_trg = torch.cat([zero_celeba, c_rafd, mask_rafd], dim=1) x_fake_list.append(self.G(x_real, c_trg)) x_concat = torch.cat(x_fake_list, dim=3) result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i + 1)) save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0) print('Saved real and fake images into {}...'.format(result_path))
def test_multi(self): """Translate images using StarGAN trained on multiple datasets.""" print('Loading the trained models from step {}...'.format(self.test_iters)) G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(self.test_iters)) D_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(self.test_iters)) self.load_model_weights(self.G, G_path) self.D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage)) with torch.no_grad(): for (i, (x_real, c_org)) in enumerate(self.celeba_loader): x_real = x_real.to(self.device) if 'CelebA' == 'CelebA': hair_color_indices = [] for (i, attr_name) in enumerate(self.selected_attrs): if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']: hair_color_indices.append(i) c_trg_list = [] for i in range(self.c_dim): if 'CelebA' == 'CelebA': c_trg = c_org.clone() if i in hair_color_indices: c_trg[:, i] = 1 for j in hair_color_indices: if j != i: c_trg[:, j] = 0 else: c_trg[:, i] = c_trg[:, i] == 0 elif 'CelebA' == 'RaFD': c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, self.c_dim) c_trg_list.append(c_trg.to(self.device)) c_celeba_list = c_trg_list if 'RaFD' == 'CelebA': hair_color_indices = [] for (i, attr_name) in enumerate(selected_attrs): if attr_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']: hair_color_indices.append(i) c_trg_list = [] for i in range(self.c2_dim): if 'RaFD' == 'CelebA': c_trg = c_org.clone() if i in hair_color_indices: c_trg[:, i] = 1 for j in hair_color_indices: if j != i: c_trg[:, j] = 0 else: c_trg[:, i] = c_trg[:, i] == 0 elif 'RaFD' == 'RaFD': c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, self.c2_dim) c_trg_list.append(c_trg.to(self.device)) c_rafd_list = c_trg_list zero_celeba = torch.zeros(x_real.size(0), self.c_dim).to(self.device) zero_rafd = torch.zeros(x_real.size(0), self.c2_dim).to(self.device) mask_celeba = self.label2onehot(torch.zeros(x_real.size(0)), 2).to(self.device) mask_rafd = self.label2onehot(torch.ones(x_real.size(0)), 2).to(self.device) x_fake_list = [x_real] for c_celeba in c_celeba_list: c_trg = torch.cat([c_celeba, zero_rafd, mask_celeba], dim=1) x_fake_list.append(self.G(x_real, c_trg)) for c_rafd in c_rafd_list: c_trg = torch.cat([zero_celeba, c_rafd, mask_rafd], dim=1) x_fake_list.append(self.G(x_real, c_trg)) x_concat = torch.cat(x_fake_list, dim=3) result_path = os.path.join(self.result_dir, '{}-images.jpg'.format(i + 1)) save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0) print('Saved real and fake images into {}...'.format(result_path))
disrupting-deepfakes
positive
def EDA_classify_features(train, target, idcols, nlp_char_limit=50): test_labeler = defaultdict(list) if isinstance(target, str): features = [x for x in list(train) if x not in [target] + idcols] else: features = [x for x in list(train) if x not in target + idcols] <DeepExtract> catcols = train[features].select_dtypes(include='object').columns.tolist() + train[features].select_dtypes(include='category').columns.tolist() cats = copy.deepcopy(catcols) nlpcols = [] for each_cat in cats: try: train[features][[each_cat]] = train[features][[each_cat]].fillna('missing') if train[features][each_cat].map(len).max() >= nlp_char_limit: nlpcols.append(each_cat) catcols.remove(each_cat) except: continue intcols = train[features].select_dtypes(include='integer').columns.tolist() int_cats = [x for x in intcols if train[features][x].nunique() <= 30 and x not in idcols] intcols = left_subtract(intcols, int_cats) floatcols = train[features].select_dtypes(include='float').columns.tolist() (cats, int_cats, ints, floats, nlps) = (catcols, int_cats, intcols, floatcols, nlpcols) </DeepExtract> numeric_features = ints + floats categoricals_features = copy.deepcopy(cats) nlp_features = copy.deepcopy(nlps) test_labeler['categoricals_features'] = categoricals_features test_labeler['numeric_features'] = numeric_features test_labeler['nlp_features'] = nlp_features return (cats, int_cats, ints, floats, nlps)
def EDA_classify_features(train, target, idcols, nlp_char_limit=50): test_labeler = defaultdict(list) if isinstance(target, str): features = [x for x in list(train) if x not in [target] + idcols] else: features = [x for x in list(train) if x not in target + idcols] catcols = train[features].select_dtypes(include='object').columns.tolist() + train[features].select_dtypes(include='category').columns.tolist() cats = copy.deepcopy(catcols) nlpcols = [] for each_cat in cats: try: train[features][[each_cat]] = train[features][[each_cat]].fillna('missing') if train[features][each_cat].map(len).max() >= nlp_char_limit: nlpcols.append(each_cat) catcols.remove(each_cat) except: continue intcols = train[features].select_dtypes(include='integer').columns.tolist() int_cats = [x for x in intcols if train[features][x].nunique() <= 30 and x not in idcols] intcols = left_subtract(intcols, int_cats) floatcols = train[features].select_dtypes(include='float').columns.tolist() (cats, int_cats, ints, floats, nlps) = (catcols, int_cats, intcols, floatcols, nlpcols) numeric_features = ints + floats categoricals_features = copy.deepcopy(cats) nlp_features = copy.deepcopy(nlps) test_labeler['categoricals_features'] = categoricals_features test_labeler['numeric_features'] = numeric_features test_labeler['nlp_features'] = nlp_features return (cats, int_cats, ints, floats, nlps)
deep_autoviml
positive
def train_model_part(conf, train_part='filterbank', pretrained_filterbank=None): <DeepExtract> train_set = WhamDataset(conf['data']['train_dir'], conf['data']['task'], sample_rate=conf['data']['sample_rate'], nondefault_nsrc=conf['data']['nondefault_nsrc'], normalize_audio=True) val_set = WhamDataset(conf['data']['valid_dir'], conf['data']['task'], sample_rate=conf['data']['sample_rate'], nondefault_nsrc=conf['data']['nondefault_nsrc'], normalize_audio=True) if train_part not in ['filterbank', 'separator']: raise ValueError('Part to train: {} is not available.'.format(train_part)) train_loader = DataLoader(train_set, shuffle=True, drop_last=True, batch_size=conf[train_part + '_training'][train_part[0] + '_batch_size'], num_workers=conf[train_part + '_training'][train_part[0] + '_num_workers']) val_loader = DataLoader(val_set, shuffle=False, drop_last=True, batch_size=conf[train_part + '_training'][train_part[0] + '_batch_size'], num_workers=conf[train_part + '_training'][train_part[0] + '_num_workers']) conf['masknet'].update({'n_src': train_set.n_src}) (train_loader, val_loader) = (train_loader, val_loader) </DeepExtract> (model, optimizer) = make_model_and_optimizer(conf, model_part=train_part, pretrained_filterbank=pretrained_filterbank) scheduler = None if conf[train_part + '_training'][train_part[0] + '_half_lr']: scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5) (exp_dir, checkpoint_dir) = get_encoded_paths(conf, train_part) os.makedirs(exp_dir, exist_ok=True) conf_path = os.path.join(exp_dir, 'conf.yml') with open(conf_path, 'w') as outfile: yaml.safe_dump(conf, outfile) loss_func = PITLossWrapper(PairwiseNegSDR('sisdr', zero_mean=False), pit_from='pw_mtx') system = SystemTwoStep(model=model, loss_func=loss_func, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, scheduler=scheduler, config=conf, module=train_part) callbacks = [] checkpoint_dir = os.path.join(exp_dir, 'checkpoints/') checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss', mode='min', save_top_k=1, verbose=True) callbacks.append(checkpoint) if conf[train_part + '_training'][train_part[0] + '_early_stop']: callbacks.append(EarlyStopping(monitor='val_loss', patience=30, verbose=True)) trainer = pl.Trainer(max_epochs=conf[train_part + '_training'][train_part[0] + '_epochs'], callbacks=callbacks, default_root_dir=exp_dir, accelerator='gpu' if torch.cuda.is_available() else 'cpu', strategy='ddp', devices='auto', limit_train_batches=1.0, gradient_clip_val=5.0) trainer.fit(system) with open(os.path.join(checkpoint_dir, 'best_k_models.json'), 'w') as file: json.dump(checkpoint.best_k_models, file, indent=0)
def train_model_part(conf, train_part='filterbank', pretrained_filterbank=None): train_set = WhamDataset(conf['data']['train_dir'], conf['data']['task'], sample_rate=conf['data']['sample_rate'], nondefault_nsrc=conf['data']['nondefault_nsrc'], normalize_audio=True) val_set = WhamDataset(conf['data']['valid_dir'], conf['data']['task'], sample_rate=conf['data']['sample_rate'], nondefault_nsrc=conf['data']['nondefault_nsrc'], normalize_audio=True) if train_part not in ['filterbank', 'separator']: raise ValueError('Part to train: {} is not available.'.format(train_part)) train_loader = DataLoader(train_set, shuffle=True, drop_last=True, batch_size=conf[train_part + '_training'][train_part[0] + '_batch_size'], num_workers=conf[train_part + '_training'][train_part[0] + '_num_workers']) val_loader = DataLoader(val_set, shuffle=False, drop_last=True, batch_size=conf[train_part + '_training'][train_part[0] + '_batch_size'], num_workers=conf[train_part + '_training'][train_part[0] + '_num_workers']) conf['masknet'].update({'n_src': train_set.n_src}) (train_loader, val_loader) = (train_loader, val_loader) (model, optimizer) = make_model_and_optimizer(conf, model_part=train_part, pretrained_filterbank=pretrained_filterbank) scheduler = None if conf[train_part + '_training'][train_part[0] + '_half_lr']: scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5, patience=5) (exp_dir, checkpoint_dir) = get_encoded_paths(conf, train_part) os.makedirs(exp_dir, exist_ok=True) conf_path = os.path.join(exp_dir, 'conf.yml') with open(conf_path, 'w') as outfile: yaml.safe_dump(conf, outfile) loss_func = PITLossWrapper(PairwiseNegSDR('sisdr', zero_mean=False), pit_from='pw_mtx') system = SystemTwoStep(model=model, loss_func=loss_func, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader, scheduler=scheduler, config=conf, module=train_part) callbacks = [] checkpoint_dir = os.path.join(exp_dir, 'checkpoints/') checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss', mode='min', save_top_k=1, verbose=True) callbacks.append(checkpoint) if conf[train_part + '_training'][train_part[0] + '_early_stop']: callbacks.append(EarlyStopping(monitor='val_loss', patience=30, verbose=True)) trainer = pl.Trainer(max_epochs=conf[train_part + '_training'][train_part[0] + '_epochs'], callbacks=callbacks, default_root_dir=exp_dir, accelerator='gpu' if torch.cuda.is_available() else 'cpu', strategy='ddp', devices='auto', limit_train_batches=1.0, gradient_clip_val=5.0) trainer.fit(system) with open(os.path.join(checkpoint_dir, 'best_k_models.json'), 'w') as file: json.dump(checkpoint.best_k_models, file, indent=0)
asteroid
positive
def encode_to_hidden(examples): if not isinstance(examples, list): examples = [examples] batch_size = len(examples) sorted_example_ids = sorted(range(batch_size), key=lambda x: -len(examples[x].tgt)) example_old_pos_map = [-1] * batch_size sorted_examples = [examples[i] for i in sorted_example_ids] syntax_word = [e.tgt for e in sorted_examples] syntax_var = to_input_variable(syntax_word, self.vocab.tgt, training=False, cuda=self.args.cuda, batch_first=True) length = [len(e.tgt) for e in sorted_examples] <DeepExtract> (syntax_outputs, syntax_hidden) = self.syntax_encoder.forward(syntax_var, length) (syntax_output, syntax_hidden) = (syntax_outputs, syntax_hidden) </DeepExtract> sent_words = [e.src for e in sorted_examples] <DeepExtract> batch_size = len(sent_words) sent_lengths = [len(sent_word) for sent_word in sent_words] sorted_example_ids = sorted(range(batch_size), key=lambda x: -sent_lengths[x]) example_old_pos_map = [-1] * batch_size for (new_pos, old_pos) in enumerate(sorted_example_ids): example_old_pos_map[old_pos] = new_pos sorted_sent_words = [sent_words[i] for i in sorted_example_ids] sorted_sent_var = to_input_variable(sorted_sent_words, self.vocab.src, cuda=self.args.cuda, batch_first=True) if self.training and self.args.src_wd: sorted_sent_var = unk_replace(sorted_sent_var, self.step_unk_rate, self.vocab.src) sorted_sent_lengths = [len(sent_word) for sent_word in sorted_sent_words] (_, sent_hidden) = self.encoder.forward(sorted_sent_var, sorted_sent_lengths) hidden = sent_hidden[:, example_old_pos_map, :] sentence_hidden = hidden </DeepExtract> tgt_var = to_input_variable(sent_words, self.vocab.src, training=False, cuda=self.args.cuda, append_boundary_sym=True, batch_first=True) for (new_pos, old_pos) in enumerate(sorted_example_ids): example_old_pos_map[old_pos] = new_pos return {'hidden': sentence_hidden, 'syn_output': syntax_output, 'syn_hidden': syntax_hidden, 'tgt_var': tgt_var, 'old_pos': example_old_pos_map}
def encode_to_hidden(examples): if not isinstance(examples, list): examples = [examples] batch_size = len(examples) sorted_example_ids = sorted(range(batch_size), key=lambda x: -len(examples[x].tgt)) example_old_pos_map = [-1] * batch_size sorted_examples = [examples[i] for i in sorted_example_ids] syntax_word = [e.tgt for e in sorted_examples] syntax_var = to_input_variable(syntax_word, self.vocab.tgt, training=False, cuda=self.args.cuda, batch_first=True) length = [len(e.tgt) for e in sorted_examples] (syntax_outputs, syntax_hidden) = self.syntax_encoder.forward(syntax_var, length) (syntax_output, syntax_hidden) = (syntax_outputs, syntax_hidden) sent_words = [e.src for e in sorted_examples] batch_size = len(sent_words) sent_lengths = [len(sent_word) for sent_word in sent_words] sorted_example_ids = sorted(range(batch_size), key=lambda x: -sent_lengths[x]) example_old_pos_map = [-1] * batch_size for (new_pos, old_pos) in enumerate(sorted_example_ids): example_old_pos_map[old_pos] = new_pos sorted_sent_words = [sent_words[i] for i in sorted_example_ids] sorted_sent_var = to_input_variable(sorted_sent_words, self.vocab.src, cuda=self.args.cuda, batch_first=True) if self.training and self.args.src_wd: sorted_sent_var = unk_replace(sorted_sent_var, self.step_unk_rate, self.vocab.src) sorted_sent_lengths = [len(sent_word) for sent_word in sorted_sent_words] (_, sent_hidden) = self.encoder.forward(sorted_sent_var, sorted_sent_lengths) hidden = sent_hidden[:, example_old_pos_map, :] sentence_hidden = hidden tgt_var = to_input_variable(sent_words, self.vocab.src, training=False, cuda=self.args.cuda, append_boundary_sym=True, batch_first=True) for (new_pos, old_pos) in enumerate(sorted_example_ids): example_old_pos_map[old_pos] = new_pos return {'hidden': sentence_hidden, 'syn_output': syntax_output, 'syn_hidden': syntax_hidden, 'tgt_var': tgt_var, 'old_pos': example_old_pos_map}
DSS-VAE
positive
def __iadd__(self, value): self._parts[-1] += value <DeepExtract> old = self new = None if not self._parts: new = Level(1) elif 0 in self._parts: new = Level((1 if not n else n for n in self._parts)) if new: msg = 'minimum level reached, reseting: {} -> {}'.format(old, new) log.warning(msg) self._parts = list(new.value) </DeepExtract> return self
def __iadd__(self, value): self._parts[-1] += value old = self new = None if not self._parts: new = Level(1) elif 0 in self._parts: new = Level((1 if not n else n for n in self._parts)) if new: msg = 'minimum level reached, reseting: {} -> {}'.format(old, new) log.warning(msg) self._parts = list(new.value) return self
doorstop
positive
def double_comprehension(varname): <DeepExtract> ls = find_lines(varname) line = ls[0] if len(ls) else None </DeepExtract> return ast.dump(ast.parse(line)).count('comprehension') == 2
def double_comprehension(varname): ls = find_lines(varname) line = ls[0] if len(ls) else None return ast.dump(ast.parse(line)).count('comprehension') == 2
coding-the-matrix
positive
def displayList(self): msgs = [] for file in glob.glob(self.maildir + '/*.*'): stats = os.stat(file) lastmod_date = time.localtime(stats[8]) date_file_tuple = (lastmod_date, file) msgs.append(date_file_tuple) if len(msgs) == 0: print('No messages.') else: msgs.sort() msgs.reverse() for x in range(len(msgs)): (date, fname) = msgs[x] frm = fname.split('/')[-1].split('-')[0] modtime = time.strftime('%m/%d/%Y %I:%M%p', date) <DeepExtract> for name in self.book.keys(): key = self.book[name]['pubkey'] if key == frm: frmName = name frmName = None </DeepExtract> if not frmName: frmName = frm print(str(x + 1) + ': ' + frmName + ' ' + modtime) return msgs
def displayList(self): msgs = [] for file in glob.glob(self.maildir + '/*.*'): stats = os.stat(file) lastmod_date = time.localtime(stats[8]) date_file_tuple = (lastmod_date, file) msgs.append(date_file_tuple) if len(msgs) == 0: print('No messages.') else: msgs.sort() msgs.reverse() for x in range(len(msgs)): (date, fname) = msgs[x] frm = fname.split('/')[-1].split('-')[0] modtime = time.strftime('%m/%d/%Y %I:%M%p', date) for name in self.book.keys(): key = self.book[name]['pubkey'] if key == frm: frmName = name frmName = None if not frmName: frmName = frm print(str(x + 1) + ': ' + frmName + ' ' + modtime) return msgs
Dust
positive
def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False): """ Args: t_prof (TrainingProfile) chief_handle (class instance or ray ActorHandle) evaluator_name (str): Name of the evaluator """ super().__init__(t_prof=t_prof) self._eval_env_bldr = eval_env_bldr self._chief_handle = chief_handle self._is_multi_stack = len(self._t_prof.eval_stack_sizes) > 1 self._log_conf_interval = log_conf_interval self._evaluator_name = evaluator_name <DeepExtract> if self._log_conf_interval: exp_names_conf = {eval_mode: [self._ray.get([self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + '_stack_' + str(stack_size[0]) + ': ' + evaluator_name + ' Conf_' + bound_end) for bound_end in ['lower95', 'upper95']]) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo} else: exp_names_conf = None exp_name_total = {eval_mode: [self._ray.get(self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + '_stack_' + str(stack_size[0]) + ': ' + evaluator_name + ' Total')) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo} (self._exp_name_total, self._exp_names_conf) = (exp_name_total, exp_names_conf) </DeepExtract> if self._is_multi_stack: self._exp_name_multi_stack = {eval_mode: self._ray.get(self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + 'Multi_Stack' + ': ' + evaluator_name + ' Averaged Total')) for eval_mode in self._t_prof.eval_modes_of_algo} if self._log_conf_interval: self._exp_names_multi_stack_conf = {eval_mode: self._ray.get([self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + ': ' + evaluator_name + ' Conf_' + bound_end) for bound_end in ['lower95', 'upper95']]) for eval_mode in self._t_prof.eval_modes_of_algo}
def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False): """ Args: t_prof (TrainingProfile) chief_handle (class instance or ray ActorHandle) evaluator_name (str): Name of the evaluator """ super().__init__(t_prof=t_prof) self._eval_env_bldr = eval_env_bldr self._chief_handle = chief_handle self._is_multi_stack = len(self._t_prof.eval_stack_sizes) > 1 self._log_conf_interval = log_conf_interval self._evaluator_name = evaluator_name if self._log_conf_interval: exp_names_conf = {eval_mode: [self._ray.get([self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + '_stack_' + str(stack_size[0]) + ': ' + evaluator_name + ' Conf_' + bound_end) for bound_end in ['lower95', 'upper95']]) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo} else: exp_names_conf = None exp_name_total = {eval_mode: [self._ray.get(self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + '_stack_' + str(stack_size[0]) + ': ' + evaluator_name + ' Total')) for stack_size in self._t_prof.eval_stack_sizes] for eval_mode in self._t_prof.eval_modes_of_algo} (self._exp_name_total, self._exp_names_conf) = (exp_name_total, exp_names_conf) if self._is_multi_stack: self._exp_name_multi_stack = {eval_mode: self._ray.get(self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + 'Multi_Stack' + ': ' + evaluator_name + ' Averaged Total')) for eval_mode in self._t_prof.eval_modes_of_algo} if self._log_conf_interval: self._exp_names_multi_stack_conf = {eval_mode: self._ray.get([self._ray.remote(self._chief_handle.create_experiment, self._t_prof.name + ' ' + eval_mode + ': ' + evaluator_name + ' Conf_' + bound_end) for bound_end in ['lower95', 'upper95']]) for eval_mode in self._t_prof.eval_modes_of_algo}
DREAM
positive
def fill_with_agent_policy(self, agent): if not self._chance_filled: <DeepExtract> assert self._tree.root.strategy is None if self._tree.root.is_terminal: return if self._tree.root.p_id_acting_next == self._tree.CHANCE_ID: game_round = self._tree.root.children[0].env_state[EnvDictIdxs.current_round] n_children = len(self._tree.root.children) assert n_children == self._env_bldr.lut_holder.DICT_LUT_N_BOARDS[game_round] self._tree.root.strategy = np.zeros(shape=(self._env_bldr.rules.RANGE_SIZE, n_children), dtype=np.float32) for c_id in range(n_children): mask = PokerRange.get_possible_range_idxs(rules=self._env_bldr.rules, lut_holder=self._env_bldr.lut_holder, board_2d=self._tree.root.children[c_id].env_state[EnvDictIdxs.board_2d]) self._tree.root.strategy[mask, c_id] = 1.0 / (self._env_bldr.rules.N_CARDS_IN_DECK - 2) for c in self._tree.root.children: self._fill_chance_node_strategy(node=c) </DeepExtract> self._chance_filled = True <DeepExtract> if self._tree.root is not self._tree.root and self._tree.root.p_id_acted_last is not self._tree.CHANCE_ID: assert self._tree.root.parent.strategy.shape == (self._env_bldr.rules.RANGE_SIZE, len(self._tree.root.parent.children)) assert np.all(np.abs(np.sum(self._tree.root.parent.strategy, axis=1) - 1) < 0.001) if self._tree.root.is_terminal: return if isinstance(self._tree.root, ChanceNode) or (isinstance(self._tree.root, PlayerActionNode) and (not self._tree.root.is_terminal) and (self._tree.root.p_id_acting_next != self._tree.CHANCE_ID)): agent.set_to_public_tree_node_state(node=self._tree.root) assert self._tree.root.p_id_acting_next == agent._internal_env_wrapper.env.current_player.seat_id, self._tree.root.p_id_acting_next agent_strat = agent.get_a_probs_for_each_hand() self._tree.root.strategy = agent_strat[:, self._tree.root.allowed_actions] for c in self._tree.root.children: self._fill_with_agent_policy(node=c, agent=agent) </DeepExtract> <DeepExtract> self._update_reach_probs(node=self._tree.root) </DeepExtract>
def fill_with_agent_policy(self, agent): if not self._chance_filled: assert self._tree.root.strategy is None if self._tree.root.is_terminal: return if self._tree.root.p_id_acting_next == self._tree.CHANCE_ID: game_round = self._tree.root.children[0].env_state[EnvDictIdxs.current_round] n_children = len(self._tree.root.children) assert n_children == self._env_bldr.lut_holder.DICT_LUT_N_BOARDS[game_round] self._tree.root.strategy = np.zeros(shape=(self._env_bldr.rules.RANGE_SIZE, n_children), dtype=np.float32) for c_id in range(n_children): mask = PokerRange.get_possible_range_idxs(rules=self._env_bldr.rules, lut_holder=self._env_bldr.lut_holder, board_2d=self._tree.root.children[c_id].env_state[EnvDictIdxs.board_2d]) self._tree.root.strategy[mask, c_id] = 1.0 / (self._env_bldr.rules.N_CARDS_IN_DECK - 2) for c in self._tree.root.children: self._fill_chance_node_strategy(node=c) self._chance_filled = True if self._tree.root is not self._tree.root and self._tree.root.p_id_acted_last is not self._tree.CHANCE_ID: assert self._tree.root.parent.strategy.shape == (self._env_bldr.rules.RANGE_SIZE, len(self._tree.root.parent.children)) assert np.all(np.abs(np.sum(self._tree.root.parent.strategy, axis=1) - 1) < 0.001) if self._tree.root.is_terminal: return if isinstance(self._tree.root, ChanceNode) or (isinstance(self._tree.root, PlayerActionNode) and (not self._tree.root.is_terminal) and (self._tree.root.p_id_acting_next != self._tree.CHANCE_ID)): agent.set_to_public_tree_node_state(node=self._tree.root) assert self._tree.root.p_id_acting_next == agent._internal_env_wrapper.env.current_player.seat_id, self._tree.root.p_id_acting_next agent_strat = agent.get_a_probs_for_each_hand() self._tree.root.strategy = agent_strat[:, self._tree.root.allowed_actions] for c in self._tree.root.children: self._fill_with_agent_policy(node=c, agent=agent) self._update_reach_probs(node=self._tree.root) </DeepExtract>
DREAM
positive
def cookie_is_encoded(data): """ Return True if the argument looks like a encoded cookie.""" <DeepExtract> text = 'Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\nCause: %s\nFix: %s\n' % (0, 13, 'cookie_is_encoded() will be removed soon.', 'Do not use this API directly.') if DEBUG == 'strict': raise DeprecationWarning(text) warnings.warn(text, DeprecationWarning, stacklevel=3) return DeprecationWarning(text) </DeepExtract> return bool(data.startswith(tob('!')) and tob('?') in data)
def cookie_is_encoded(data): """ Return True if the argument looks like a encoded cookie.""" text = 'Warning: Use of deprecated feature or API. (Deprecated in Bottle-%d.%d)\nCause: %s\nFix: %s\n' % (0, 13, 'cookie_is_encoded() will be removed soon.', 'Do not use this API directly.') if DEBUG == 'strict': raise DeprecationWarning(text) warnings.warn(text, DeprecationWarning, stacklevel=3) return DeprecationWarning(text) return bool(data.startswith(tob('!')) and tob('?') in data)
DrRepair
positive
def run_sstableverify_process(self, keyspace, cf, options=None): <DeepExtract> if self.__install_dir is None: cdir = self.cluster.get_install_dir() else: common.validate_install_dir(self.__install_dir) cdir = self.__install_dir </DeepExtract> sstableverify = common.join_bin(cdir, 'bin', 'sstableverify') <DeepExtract> update_conf = not self.__conf_updated if update_conf: self.__conf_updated = True env = common.make_cassandra_env(self.get_install_dir(), self.get_path(), update_conf) env = common.update_java_version(jvm_version=None, install_dir=self.get_install_dir(), cassandra_version=self.cluster.cassandra_version(), env=env, info_message=self.name) for (key, value) in self.__environment_variables.items(): env[key] = value env = env </DeepExtract> cmd = [sstableverify, keyspace, cf] if options is not None: cmd[1:1] = options return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstableverify_process(self, keyspace, cf, options=None): if self.__install_dir is None: cdir = self.cluster.get_install_dir() else: common.validate_install_dir(self.__install_dir) cdir = self.__install_dir sstableverify = common.join_bin(cdir, 'bin', 'sstableverify') update_conf = not self.__conf_updated if update_conf: self.__conf_updated = True env = common.make_cassandra_env(self.get_install_dir(), self.get_path(), update_conf) env = common.update_java_version(jvm_version=None, install_dir=self.get_install_dir(), cassandra_version=self.cluster.cassandra_version(), env=env, info_message=self.name) for (key, value) in self.__environment_variables.items(): env[key] = value env = env cmd = [sstableverify, keyspace, cf] if options is not None: cmd[1:1] = options return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
ccm
positive
def childStateChanged(self, child, newState): if self.checkState(0) == Qt.Unchecked: QtGui.QTreeWidgetItem.setData(child, 0, Qt.CheckStateRole, newState) elif self.checkState(0) == Qt.PartiallyChecked: pass elif self.checkState(0) == Qt.Checked: QtGui.QTreeWidgetItem.setData(child, 0, Qt.CheckStateRole, newState) if self.noneSelected(): <DeepExtract> if self.lg: self.lg.delete() self.lg = None if self.allSelected(): self.setAllState(Qt.Unchecked) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.Unchecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Off') self.treeWidget().sig_logStatus.emit(self.name, -1) self.fm = None </DeepExtract> elif self.allSelected(): <DeepExtract> self.setAllState(Qt.PartiallyChecked, activeOnly=not self.noneSelected()) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.PartiallyChecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Requested') self.makeLog() </DeepExtract> else: <DeepExtract> self.setAllState(Qt.PartiallyChecked, activeOnly=not self.noneSelected()) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.PartiallyChecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Requested') self.makeLog() </DeepExtract>
def childStateChanged(self, child, newState): if self.checkState(0) == Qt.Unchecked: QtGui.QTreeWidgetItem.setData(child, 0, Qt.CheckStateRole, newState) elif self.checkState(0) == Qt.PartiallyChecked: pass elif self.checkState(0) == Qt.Checked: QtGui.QTreeWidgetItem.setData(child, 0, Qt.CheckStateRole, newState) if self.noneSelected(): if self.lg: self.lg.delete() self.lg = None if self.allSelected(): self.setAllState(Qt.Unchecked) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.Unchecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Off') self.treeWidget().sig_logStatus.emit(self.name, -1) self.fm = None elif self.allSelected(): self.setAllState(Qt.PartiallyChecked, activeOnly=not self.noneSelected()) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.PartiallyChecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Requested') self.makeLog() else: self.setAllState(Qt.PartiallyChecked, activeOnly=not self.noneSelected()) QtGui.QTreeWidgetItem.setData(self, 0, Qt.CheckStateRole, Qt.PartiallyChecked) QtGui.QTreeWidgetItem.setData(self, 1, Qt.DisplayRole, 'Requested') self.makeLog() </DeepExtract>
crazyflieROS
positive
def encode_emb(net, length): <DeepExtract> net = tf.layers.dropout(net, rate=cfg.embedding.dropout_rate, training=is_training) </DeepExtract> net = encoder_emb(net, length) <DeepExtract> net = tf.layers.dropout(net, rate=cfg.embedding.dropout_rate, training=is_training) </DeepExtract> return net
def encode_emb(net, length): net = tf.layers.dropout(net, rate=cfg.embedding.dropout_rate, training=is_training) net = encoder_emb(net, length) net = tf.layers.dropout(net, rate=cfg.embedding.dropout_rate, training=is_training) return net
class-balanced-loss
positive
def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): <DeepExtract> with tf.name_scope(scope, 'Intersection'): (y_min1, x_min1, y_max1, x_max1) = tf.split(value=boxlist1.get(), num_or_size_splits=4, axis=1) (y_min2, x_min2, y_max2, x_max2) = tf.split(value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) intersections = intersect_heights * intersect_widths </DeepExtract> <DeepExtract> with tf.name_scope(scope, 'Area'): (y_min, x_min, y_max, x_max) = tf.split(value=boxlist1.get(), num_or_size_splits=4, axis=1) areas1 = tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) </DeepExtract> <DeepExtract> with tf.name_scope(scope, 'Area'): (y_min, x_min, y_max, x_max) = tf.split(value=boxlist2.get(), num_or_size_splits=4, axis=1) areas2 = tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) </DeepExtract> unions = tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections return tf.where(tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): with tf.name_scope(scope, 'Intersection'): (y_min1, x_min1, y_max1, x_max1) = tf.split(value=boxlist1.get(), num_or_size_splits=4, axis=1) (y_min2, x_min2, y_max2, x_max2) = tf.split(value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) intersections = intersect_heights * intersect_widths with tf.name_scope(scope, 'Area'): (y_min, x_min, y_max, x_max) = tf.split(value=boxlist1.get(), num_or_size_splits=4, axis=1) areas1 = tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) with tf.name_scope(scope, 'Area'): (y_min, x_min, y_max, x_max) = tf.split(value=boxlist2.get(), num_or_size_splits=4, axis=1) areas2 = tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) unions = tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections return tf.where(tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
avod-ssd
positive
def test_EfiSignatureList_Sort_and_Deduplication_sha256(self): owner1 = uuid.uuid4().hex owner2 = uuid.uuid4().hex subTestList = [('SubTest_Empty', [], [], []), ('SubTest_One_Entry', [(HASHSTR[1], owner1)], [(HASHSTR[1], owner1)], []), ('SubTest_321', [(HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], []), ('SubTest_32123', [(HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[2], owner1), (HASHSTR[3], owner1)]), ('SubTest_122333221', [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1)]), ('SubTest_8005551212', [(HASHSTR[8], owner1), (HASHSTR[0], owner1), (HASHSTR[0], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1)], [(HASHSTR[0], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[5], owner1), (HASHSTR[8], owner1)], [(HASHSTR[0], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1)]), ('SubTest_122333221_mixed_sigowner', [(HASHSTR[1], owner2), (HASHSTR[2], owner2), (HASHSTR[2], owner1), (HASHSTR[3], owner2), (HASHSTR[3], owner2), (HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner2), (HASHSTR[1], owner1)], [(HASHSTR[1], owner2), (HASHSTR[2], owner2), (HASHSTR[3], owner2)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner2), (HASHSTR[3], owner2), (HASHSTR[3], owner1)])] for subTest in subTestList: (testName, input, expected_sort, expected_dupes) = subTest with self.subTest(msg=testName): <DeepExtract> Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if input else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in input: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) testEsl = Esl </DeepExtract> <DeepExtract> Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if expected_sort else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in expected_sort: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) expected_sort_esl = Esl </DeepExtract> <DeepExtract> Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if expected_dupes else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in expected_dupes: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) expected_dupes_esl = Esl </DeepExtract> output_dupes_esl = testEsl.SortBySignatureDataValue(deduplicate=True) self.assertEqual(testEsl.GetBytes(), expected_sort_esl.GetBytes()) self.assertEqual(output_dupes_esl.GetBytes(), expected_dupes_esl.GetBytes())
def test_EfiSignatureList_Sort_and_Deduplication_sha256(self): owner1 = uuid.uuid4().hex owner2 = uuid.uuid4().hex subTestList = [('SubTest_Empty', [], [], []), ('SubTest_One_Entry', [(HASHSTR[1], owner1)], [(HASHSTR[1], owner1)], []), ('SubTest_321', [(HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], []), ('SubTest_32123', [(HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[2], owner1), (HASHSTR[3], owner1)]), ('SubTest_122333221', [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[3], owner1), (HASHSTR[3], owner1)]), ('SubTest_8005551212', [(HASHSTR[8], owner1), (HASHSTR[0], owner1), (HASHSTR[0], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1)], [(HASHSTR[0], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[5], owner1), (HASHSTR[8], owner1)], [(HASHSTR[0], owner1), (HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[5], owner1), (HASHSTR[5], owner1)]), ('SubTest_122333221_mixed_sigowner', [(HASHSTR[1], owner2), (HASHSTR[2], owner2), (HASHSTR[2], owner1), (HASHSTR[3], owner2), (HASHSTR[3], owner2), (HASHSTR[3], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner2), (HASHSTR[1], owner1)], [(HASHSTR[1], owner2), (HASHSTR[2], owner2), (HASHSTR[3], owner2)], [(HASHSTR[1], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner1), (HASHSTR[2], owner2), (HASHSTR[3], owner2), (HASHSTR[3], owner1)])] for subTest in subTestList: (testName, input, expected_sort, expected_dupes) = subTest with self.subTest(msg=testName): Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if input else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in input: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) testEsl = Esl Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if expected_sort else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in expected_sort: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) expected_sort_esl = Esl Esl = EfiSignatureList(typeguid=EfiSignatureDataFactory.EFI_CERT_SHA256_GUID) SignatureSize = EfiSignatureDataEfiCertSha256.STATIC_STRUCT_SIZE if expected_dupes else 0 Esl.AddSignatureHeader(SigHeader=None, SigSize=SignatureSize) for entry in expected_dupes: (hashStr, ownerGuidStr) = entry hashBytes = bytes.fromhex(hashStr) Esl.AddSignatureData(EfiSignatureDataEfiCertSha256(digest=hashBytes, sigowner=uuid.UUID(ownerGuidStr))) expected_dupes_esl = Esl output_dupes_esl = testEsl.SortBySignatureDataValue(deduplicate=True) self.assertEqual(testEsl.GetBytes(), expected_sort_esl.GetBytes()) self.assertEqual(output_dupes_esl.GetBytes(), expected_dupes_esl.GetBytes())
edk2-pytool-library
positive
def parse(self, parser): lineno = parser.stream.current.lineno next(parser.stream) args = [] <DeepExtract> if isinstance(parser.parse_expression(), nodes.Name): kindarg = nodes.Const(parser.parse_expression().name) else: kindarg = parser.parse_expression() </DeepExtract> if kindarg.value in self.compressors: args.append(kindarg) else: raise TemplateSyntaxError('Compress kind may be one of: %r, got: %r' % (self.compressors.keys(), kindarg.value), parser.stream.current.lineno) parser.stream.skip_if('comma') namearg = nodes.Const(None) modearg = nodes.Const('file') if parser.stream.current.type != 'block_end': <DeepExtract> if isinstance(parser.parse_expression(), nodes.Name): modearg = nodes.Const(parser.parse_expression().name) else: modearg = parser.parse_expression() </DeepExtract> args.append(modearg) if modearg.value == compress.OUTPUT_FILE: if parser.stream.current.type != 'block_end': <DeepExtract> if isinstance(parser.parse_expression(), nodes.Name): namearg = nodes.Const(parser.parse_expression().name) else: namearg = parser.parse_expression() </DeepExtract> elif modearg.value == compress.OUTPUT_INLINE or modearg.value == compress.OUTPUT_PRELOAD: pass else: raise TemplateSyntaxError('Compress mode may be one of: %r, got %r' % (compress.OUTPUT_MODES, modearg.value), parser.stream.current.lineno) body = parser.parse_statements(['name:endcompress'], drop_needle=True) parser.stream.skip_if('name:' + kindarg.value) return nodes.CallBlock(self.call_method('_compress_normal', [kindarg, modearg, namearg]), [], [], body).set_lineno(lineno)
def parse(self, parser): lineno = parser.stream.current.lineno next(parser.stream) args = [] if isinstance(parser.parse_expression(), nodes.Name): kindarg = nodes.Const(parser.parse_expression().name) else: kindarg = parser.parse_expression() if kindarg.value in self.compressors: args.append(kindarg) else: raise TemplateSyntaxError('Compress kind may be one of: %r, got: %r' % (self.compressors.keys(), kindarg.value), parser.stream.current.lineno) parser.stream.skip_if('comma') namearg = nodes.Const(None) modearg = nodes.Const('file') if parser.stream.current.type != 'block_end': if isinstance(parser.parse_expression(), nodes.Name): modearg = nodes.Const(parser.parse_expression().name) else: modearg = parser.parse_expression() args.append(modearg) if modearg.value == compress.OUTPUT_FILE: if parser.stream.current.type != 'block_end': if isinstance(parser.parse_expression(), nodes.Name): namearg = nodes.Const(parser.parse_expression().name) else: namearg = parser.parse_expression() elif modearg.value == compress.OUTPUT_INLINE or modearg.value == compress.OUTPUT_PRELOAD: pass else: raise TemplateSyntaxError('Compress mode may be one of: %r, got %r' % (compress.OUTPUT_MODES, modearg.value), parser.stream.current.lineno) body = parser.parse_statements(['name:endcompress'], drop_needle=True) parser.stream.skip_if('name:' + kindarg.value) return nodes.CallBlock(self.call_method('_compress_normal', [kindarg, modearg, namearg]), [], [], body).set_lineno(lineno)
django-compressor
positive
def _start_data_server(self): """ For data_master environment: - configures backtrader REQ/REP server instance and starts server process. For others: - establishes network connection to existing data_server. """ self.data_server = None if self.data_context: self.data_context.destroy() self.data_socket = None if self.data_master: cmd = 'kill $( lsof -i:{} -t ) > /dev/null 2>&1'.format(self.data_port) os.system(cmd) self.data_server = BTgymDataFeedServer(dataset=self.dataset, network_address=self.data_network_address, log_level=self.log_level, task=self.task) self.data_server.daemon = False self.data_server.start() time.sleep(1) self.data_context = zmq.Context() self.data_socket = self.data_context.socket(zmq.REQ) self.data_socket.setsockopt(zmq.RCVTIMEO, self.connect_timeout * 1000) self.data_socket.setsockopt(zmq.SNDTIMEO, self.connect_timeout * 1000) self.data_socket.connect(self.data_network_address) self.log.debug('Pinging data_server at: {} ...'.format(self.data_network_address)) <DeepExtract> response = dict(status='ok', message=None) try: self.data_socket.send_pyobj({'ctrl': 'ping!'}) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'send_failed_due_to_connect_timeout' else: response['status'] = 'send_failed_for_unknown_reason' self.data_server_response = response start = time.time() try: response['message'] = self.data_socket.recv_pyobj() response['time'] = time.time() - start except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'receive_failed_due_to_connect_timeout' else: response['status'] = 'receive_failed_for_unknown_reason' self.data_server_response = response self.data_server_response = response </DeepExtract> if self.data_server_response['status'] in 'ok': self.log.debug('Data_server seems ready with response: <{}>'.format(self.data_server_response['message'])) else: msg = 'Data_server unreachable with status: <{}>.'.format(self.data_server_response['status']) self.log.error(msg) raise ConnectionError(msg) <DeepExtract> self.data_socket.send_pyobj({'ctrl': '_get_info'}) self.data_server_response = self.data_socket.recv_pyobj() (self.dataset_stat, self.dataset_columns, self.data_server_pid, self.data_lines_names) = (self.data_server_response['dataset_stat'], self.data_server_response['dataset_columns'], self.data_server_response['pid'], self.data_server_response['data_names']) </DeepExtract>
def _start_data_server(self): """ For data_master environment: - configures backtrader REQ/REP server instance and starts server process. For others: - establishes network connection to existing data_server. """ self.data_server = None if self.data_context: self.data_context.destroy() self.data_socket = None if self.data_master: cmd = 'kill $( lsof -i:{} -t ) > /dev/null 2>&1'.format(self.data_port) os.system(cmd) self.data_server = BTgymDataFeedServer(dataset=self.dataset, network_address=self.data_network_address, log_level=self.log_level, task=self.task) self.data_server.daemon = False self.data_server.start() time.sleep(1) self.data_context = zmq.Context() self.data_socket = self.data_context.socket(zmq.REQ) self.data_socket.setsockopt(zmq.RCVTIMEO, self.connect_timeout * 1000) self.data_socket.setsockopt(zmq.SNDTIMEO, self.connect_timeout * 1000) self.data_socket.connect(self.data_network_address) self.log.debug('Pinging data_server at: {} ...'.format(self.data_network_address)) response = dict(status='ok', message=None) try: self.data_socket.send_pyobj({'ctrl': 'ping!'}) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'send_failed_due_to_connect_timeout' else: response['status'] = 'send_failed_for_unknown_reason' self.data_server_response = response start = time.time() try: response['message'] = self.data_socket.recv_pyobj() response['time'] = time.time() - start except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: response['status'] = 'receive_failed_due_to_connect_timeout' else: response['status'] = 'receive_failed_for_unknown_reason' self.data_server_response = response self.data_server_response = response if self.data_server_response['status'] in 'ok': self.log.debug('Data_server seems ready with response: <{}>'.format(self.data_server_response['message'])) else: msg = 'Data_server unreachable with status: <{}>.'.format(self.data_server_response['status']) self.log.error(msg) raise ConnectionError(msg) self.data_socket.send_pyobj({'ctrl': '_get_info'}) self.data_server_response = self.data_socket.recv_pyobj() (self.dataset_stat, self.dataset_columns, self.data_server_pid, self.data_lines_names) = (self.data_server_response['dataset_stat'], self.data_server_response['dataset_columns'], self.data_server_response['pid'], self.data_server_response['data_names']) </DeepExtract>
btgym
positive
def _on_txt_changed(widget): <DeepExtract> path_type = widget.get_name()[widget.get_name().index('_') + 1:widget.get_name().rindex('_')] if path_type in PATH_TYPES: path_type = path_type path_type = None </DeepExtract> <DeepExtract> lbl_widget = self.__dict__['_lbl_%s_path' % path_type] lbl_widget.set_text(widget.get_text()) lbl_widget.set_tooltip_text(widget.get_text()) </DeepExtract>
def _on_txt_changed(widget): path_type = widget.get_name()[widget.get_name().index('_') + 1:widget.get_name().rindex('_')] if path_type in PATH_TYPES: path_type = path_type path_type = None lbl_widget = self.__dict__['_lbl_%s_path' % path_type] lbl_widget.set_text(widget.get_text()) lbl_widget.set_tooltip_text(widget.get_text()) </DeepExtract>
deluge-labelplus
positive
def doc_save(docname): <DeepExtract> if docname in self.cache: doc = self.cache[docname] doc = Document({}) if docname in self: json_data = json.loads(self[docname]) doc.load(json_data) self.doc_set(docname, doc) self.doc_save(docname) doc = doc </DeepExtract> self[docname] = doc.serialize()
def doc_save(docname): if docname in self.cache: doc = self.cache[docname] doc = Document({}) if docname in self: json_data = json.loads(self[docname]) doc.load(json_data) self.doc_set(docname, doc) self.doc_save(docname) doc = doc self[docname] = doc.serialize()
ConcurrenTree
positive
def test_edit_link_folder_contents(self): """https://github.com/plonegovbr/brasil.gov.portal/issues/587""" <DeepExtract> self.folder = api.content.create(type='Folder', container=self.portal, id='test-folder-OGG') self.audio = api.content.create(type='Audio', container=self.folder, id='my-audio') self.setup_content_data() self.mp3_audio = api.content.create(type='MPEG Audio File', container=self.audio, id='file.mp3') self.mp3_audio.file = self.mp3 self.mp3_audio.reindexObject() </DeepExtract> login_browser(self.browser, self.portal) url = self.ogg_audio.absolute_url() self.browser.open('{0}/folder_contents'.format(url)) self.assertIn('file.ogg/view', self.browser.contents)
def test_edit_link_folder_contents(self): """https://github.com/plonegovbr/brasil.gov.portal/issues/587""" self.folder = api.content.create(type='Folder', container=self.portal, id='test-folder-OGG') self.audio = api.content.create(type='Audio', container=self.folder, id='my-audio') self.setup_content_data() self.mp3_audio = api.content.create(type='MPEG Audio File', container=self.audio, id='file.mp3') self.mp3_audio.file = self.mp3 self.mp3_audio.reindexObject() login_browser(self.browser, self.portal) url = self.ogg_audio.absolute_url() self.browser.open('{0}/folder_contents'.format(url)) self.assertIn('file.ogg/view', self.browser.contents)
brasil.gov.portal
positive
def fetch_currencies(self, params={}): <DeepExtract> if not self.has['fetchCurrencies']: raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'fetchCurrencies')) </DeepExtract> return super().fetch_currencies(params)
def fetch_currencies(self, params={}): if not self.has['fetchCurrencies']: raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'fetchCurrencies')) return super().fetch_currencies(params)
btrccts
positive
def quit_sequence(self): <DeepExtract> if self.args.save: if self.args.verbose > -1: print('output saved at %s' % self.log_filepath) if self.args.update_pm_by != 'NONE': torch.save(self.perceptual_model.state_dict(), self.pm_filepath) print('perceptual model saved at %s.' % self.pm_filepath) if self.args.update_rl: torch.save(self.policy_model.state_dict(), self.rl_filepath) print('RL model saved at %s.' % self.rl_filepath) if self.args.update_ir: torch.save(self.intri_model.state_dict(), self.ir_filepath) print('Intrinsic reward model saved at %s.' % self.ir_filepath) if self.args.verbose > -1: print('training took %s' % (time.time() - self.start_time)) </DeepExtract> if self.args.jay1 or self.args.gazebo: rospy.logwarn('Quit') rospy.signal_shutdown('Quit') exit()
def quit_sequence(self): if self.args.save: if self.args.verbose > -1: print('output saved at %s' % self.log_filepath) if self.args.update_pm_by != 'NONE': torch.save(self.perceptual_model.state_dict(), self.pm_filepath) print('perceptual model saved at %s.' % self.pm_filepath) if self.args.update_rl: torch.save(self.policy_model.state_dict(), self.rl_filepath) print('RL model saved at %s.' % self.rl_filepath) if self.args.update_ir: torch.save(self.intri_model.state_dict(), self.ir_filepath) print('Intrinsic reward model saved at %s.' % self.ir_filepath) if self.args.verbose > -1: print('training took %s' % (time.time() - self.start_time)) if self.args.jay1 or self.args.gazebo: rospy.logwarn('Quit') rospy.signal_shutdown('Quit') exit()
dal
positive
def plottingOfOvernightTestcasesOnFAVERAGE(fileName): <DeepExtract> fi = open(fileName, 'r') data = load(fi) fi.close() helpText = data[0] returnData = data[1:] (helpText, daten) = (helpText, returnData) </DeepExtract> titleString = 'NUMBA - using "faverage"\nnAverages=%s, nFreqbins=%s, nMics=%s, nTest=%s' % (daten[2], daten[3], daten[4], daten[5]) <DeepExtract> for cnt in range(len(daten[0])): if 'vectorized' in daten[0][cnt]: lineStyle = '--' elif 'faverage' in daten[0][cnt]: lineStyle = '--' else: lineStyle = '-' plt.semilogy(daten[1][cnt], label=daten[0][cnt], linestyle=lineStyle, marker='o') plt.xticks(range(len(daten[1][1]))) plt.xlabel('trials [1]') plt.ylabel('Time per Trial [s]') plt.grid(which='major') plt.grid(which='minor', linestyle='--') plt.title(titleString) (yMin, yMax) = plt.ylim() newYMin = 10 ** np.floor(np.log10(yMin)) plt.ylim(newYMin, yMax) plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0) plt.show() </DeepExtract>
def plottingOfOvernightTestcasesOnFAVERAGE(fileName): fi = open(fileName, 'r') data = load(fi) fi.close() helpText = data[0] returnData = data[1:] (helpText, daten) = (helpText, returnData) titleString = 'NUMBA - using "faverage"\nnAverages=%s, nFreqbins=%s, nMics=%s, nTest=%s' % (daten[2], daten[3], daten[4], daten[5]) for cnt in range(len(daten[0])): if 'vectorized' in daten[0][cnt]: lineStyle = '--' elif 'faverage' in daten[0][cnt]: lineStyle = '--' else: lineStyle = '-' plt.semilogy(daten[1][cnt], label=daten[0][cnt], linestyle=lineStyle, marker='o') plt.xticks(range(len(daten[1][1]))) plt.xlabel('trials [1]') plt.ylabel('Time per Trial [s]') plt.grid(which='major') plt.grid(which='minor', linestyle='--') plt.title(titleString) (yMin, yMax) = plt.ylim() newYMin = 10 ** np.floor(np.log10(yMin)) plt.ylim(newYMin, yMax) plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0) plt.show() </DeepExtract>
acoular
positive
def add_proposals(roidb, rois, scales, crowd_thresh): """Add proposal boxes (rois) to an roidb that has ground-truth annotations but no proposals. If the proposals are not at the original image scale, specify the scale factor that separate them in scales. """ box_list = [] for i in range(len(roidb)): inv_im_scale = 1.0 / scales[i] idx = np.where(rois[:, 0] == i)[0] box_list.append(rois[idx, 1:] * inv_im_scale) <DeepExtract> assert len(box_list) == len(roidb) for (i, entry) in enumerate(roidb): boxes = box_list[i] num_boxes = boxes.shape[0] gt_overlaps = np.zeros((num_boxes, entry['gt_overlaps'].shape[1]), dtype=entry['gt_overlaps'].dtype) box_to_gt_ind_map = -np.ones(num_boxes, dtype=entry['box_to_gt_ind_map'].dtype) gt_inds = np.where(entry['gt_classes'] > 0)[0] if len(gt_inds) > 0: gt_boxes = entry['boxes'][gt_inds, :] gt_classes = entry['gt_classes'][gt_inds] proposal_to_gt_overlaps = box_utils.bbox_overlaps(boxes.astype(dtype=np.float32, copy=False), gt_boxes.astype(dtype=np.float32, copy=False)) argmaxes = proposal_to_gt_overlaps.argmax(axis=1) maxes = proposal_to_gt_overlaps.max(axis=1) I = np.where(maxes > 0)[0] gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I] box_to_gt_ind_map[I] = gt_inds[argmaxes[I]] entry['boxes'] = np.append(entry['boxes'], boxes.astype(entry['boxes'].dtype, copy=False), axis=0) entry['gt_classes'] = np.append(entry['gt_classes'], np.zeros(num_boxes, dtype=entry['gt_classes'].dtype)) entry['seg_areas'] = np.append(entry['seg_areas'], np.zeros(num_boxes, dtype=entry['seg_areas'].dtype)) entry['gt_overlaps'] = np.append(entry['gt_overlaps'].toarray(), gt_overlaps, axis=0) entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps']) entry['is_crowd'] = np.append(entry['is_crowd'], np.zeros(num_boxes, dtype=entry['is_crowd'].dtype)) entry['box_to_gt_ind_map'] = np.append(entry['box_to_gt_ind_map'], box_to_gt_ind_map.astype(entry['box_to_gt_ind_map'].dtype, copy=False)) </DeepExtract> if crowd_thresh > 0: <DeepExtract> for entry in roidb: gt_overlaps = entry['gt_overlaps'].toarray() crowd_inds = np.where(entry['is_crowd'] == 1)[0] non_gt_inds = np.where(entry['gt_classes'] == 0)[0] if len(crowd_inds) == 0 or len(non_gt_inds) == 0: continue crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :]) non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :]) iscrowd_flags = [int(True)] * len(crowd_inds) ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags) bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0] gt_overlaps[non_gt_inds[bad_inds], :] = -1 entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps) </DeepExtract> <DeepExtract> for entry in roidb: gt_overlaps = entry['gt_overlaps'].toarray() max_overlaps = gt_overlaps.max(axis=1) max_classes = gt_overlaps.argmax(axis=1) entry['max_classes'] = max_classes entry['max_overlaps'] = max_overlaps zero_inds = np.where(max_overlaps == 0)[0] assert all(max_classes[zero_inds] == 0) nonzero_inds = np.where(max_overlaps > 0)[0] assert all(max_classes[nonzero_inds] != 0) </DeepExtract>
def add_proposals(roidb, rois, scales, crowd_thresh): """Add proposal boxes (rois) to an roidb that has ground-truth annotations but no proposals. If the proposals are not at the original image scale, specify the scale factor that separate them in scales. """ box_list = [] for i in range(len(roidb)): inv_im_scale = 1.0 / scales[i] idx = np.where(rois[:, 0] == i)[0] box_list.append(rois[idx, 1:] * inv_im_scale) assert len(box_list) == len(roidb) for (i, entry) in enumerate(roidb): boxes = box_list[i] num_boxes = boxes.shape[0] gt_overlaps = np.zeros((num_boxes, entry['gt_overlaps'].shape[1]), dtype=entry['gt_overlaps'].dtype) box_to_gt_ind_map = -np.ones(num_boxes, dtype=entry['box_to_gt_ind_map'].dtype) gt_inds = np.where(entry['gt_classes'] > 0)[0] if len(gt_inds) > 0: gt_boxes = entry['boxes'][gt_inds, :] gt_classes = entry['gt_classes'][gt_inds] proposal_to_gt_overlaps = box_utils.bbox_overlaps(boxes.astype(dtype=np.float32, copy=False), gt_boxes.astype(dtype=np.float32, copy=False)) argmaxes = proposal_to_gt_overlaps.argmax(axis=1) maxes = proposal_to_gt_overlaps.max(axis=1) I = np.where(maxes > 0)[0] gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I] box_to_gt_ind_map[I] = gt_inds[argmaxes[I]] entry['boxes'] = np.append(entry['boxes'], boxes.astype(entry['boxes'].dtype, copy=False), axis=0) entry['gt_classes'] = np.append(entry['gt_classes'], np.zeros(num_boxes, dtype=entry['gt_classes'].dtype)) entry['seg_areas'] = np.append(entry['seg_areas'], np.zeros(num_boxes, dtype=entry['seg_areas'].dtype)) entry['gt_overlaps'] = np.append(entry['gt_overlaps'].toarray(), gt_overlaps, axis=0) entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps']) entry['is_crowd'] = np.append(entry['is_crowd'], np.zeros(num_boxes, dtype=entry['is_crowd'].dtype)) entry['box_to_gt_ind_map'] = np.append(entry['box_to_gt_ind_map'], box_to_gt_ind_map.astype(entry['box_to_gt_ind_map'].dtype, copy=False)) if crowd_thresh > 0: for entry in roidb: gt_overlaps = entry['gt_overlaps'].toarray() crowd_inds = np.where(entry['is_crowd'] == 1)[0] non_gt_inds = np.where(entry['gt_classes'] == 0)[0] if len(crowd_inds) == 0 or len(non_gt_inds) == 0: continue crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :]) non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :]) iscrowd_flags = [int(True)] * len(crowd_inds) ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags) bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0] gt_overlaps[non_gt_inds[bad_inds], :] = -1 entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps) for entry in roidb: gt_overlaps = entry['gt_overlaps'].toarray() max_overlaps = gt_overlaps.max(axis=1) max_classes = gt_overlaps.argmax(axis=1) entry['max_classes'] = max_classes entry['max_overlaps'] = max_overlaps zero_inds = np.where(max_overlaps == 0)[0] assert all(max_classes[zero_inds] == 0) nonzero_inds = np.where(max_overlaps > 0)[0] assert all(max_classes[nonzero_inds] != 0) </DeepExtract>
Detectron
positive
def from_prediction(features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray]=None, remove_leading_feature_dimension: bool=True) -> Protein: """Assembles a protein from a prediction. Args: features: Dictionary holding model inputs. result: Dictionary holding model outputs. b_factors: (Optional) B-factors to use for the protein. remove_leading_feature_dimension: Whether to remove the leading dimension of the `features` values. Returns: A protein instance. """ fold_output = result['structure_module'] def _maybe_remove_leading_dim(arr: np.ndarray) -> np.ndarray: return arr[0] if remove_leading_feature_dimension else arr if 'asym_id' in features: <DeepExtract> chain_index = features['asym_id'][0] if remove_leading_feature_dimension else features['asym_id'] </DeepExtract> else: chain_index = np.zeros_like(_maybe_remove_leading_dim(features['aatype'])) if b_factors is None: b_factors = np.zeros_like(fold_output['final_atom_mask']) return Protein(aatype=_maybe_remove_leading_dim(features['aatype']), atom_positions=fold_output['final_atom_positions'], atom_mask=fold_output['final_atom_mask'], residue_index=_maybe_remove_leading_dim(features['residue_index']) + 1, chain_index=chain_index, b_factors=b_factors)
def from_prediction(features: FeatureDict, result: ModelOutput, b_factors: Optional[np.ndarray]=None, remove_leading_feature_dimension: bool=True) -> Protein: """Assembles a protein from a prediction. Args: features: Dictionary holding model inputs. result: Dictionary holding model outputs. b_factors: (Optional) B-factors to use for the protein. remove_leading_feature_dimension: Whether to remove the leading dimension of the `features` values. Returns: A protein instance. """ fold_output = result['structure_module'] def _maybe_remove_leading_dim(arr: np.ndarray) -> np.ndarray: return arr[0] if remove_leading_feature_dimension else arr if 'asym_id' in features: chain_index = features['asym_id'][0] if remove_leading_feature_dimension else features['asym_id'] else: chain_index = np.zeros_like(_maybe_remove_leading_dim(features['aatype'])) if b_factors is None: b_factors = np.zeros_like(fold_output['final_atom_mask']) return Protein(aatype=_maybe_remove_leading_dim(features['aatype']), atom_positions=fold_output['final_atom_positions'], atom_mask=fold_output['final_atom_mask'], residue_index=_maybe_remove_leading_dim(features['residue_index']) + 1, chain_index=chain_index, b_factors=b_factors)
alphafold
positive
def add_displacement(nodes: bpy.types.Nodes, links: bpy.types.NodeLinks, displacement_image_path: str, output_node: bpy.types.Node): """ Adds bump to the principled bsdf node. :param nodes: Nodes from the current material :param links: Links from the current material :param displacement_image_path: Path to the metal image :param output_node: Output node of the current material :return: bpy.types.Node: The newly constructed texture node """ if os.path.exists(displacement_image_path): <DeepExtract> image_node = nodes.new('ShaderNodeTexImage') if isinstance(displacement_image_path, bpy.types.Image): image_node.image = displacement_image_path else: image_node.image = bpy.data.images.load(displacement_image_path, check_existing=True) if True: image_node.image.colorspace_settings.name = 'Non-Color' image_node.location.x = _x_texture_node image_node.location.y = _y_texture_node * -4 displacement_texture = image_node </DeepExtract> displacement_node = nodes.new('ShaderNodeDisplacement') displacement_node.inputs['Midlevel'].default_value = 0.5 displacement_node.inputs['Scale'].default_value = 0.15 displacement_node.location.x = _x_texture_node * 0.5 displacement_node.location.y = _y_texture_node * -4 links.new(displacement_texture.outputs['Color'], displacement_node.inputs['Height']) links.new(displacement_node.outputs['Displacement'], output_node.inputs['Displacement']) return displacement_texture return None
def add_displacement(nodes: bpy.types.Nodes, links: bpy.types.NodeLinks, displacement_image_path: str, output_node: bpy.types.Node): """ Adds bump to the principled bsdf node. :param nodes: Nodes from the current material :param links: Links from the current material :param displacement_image_path: Path to the metal image :param output_node: Output node of the current material :return: bpy.types.Node: The newly constructed texture node """ if os.path.exists(displacement_image_path): image_node = nodes.new('ShaderNodeTexImage') if isinstance(displacement_image_path, bpy.types.Image): image_node.image = displacement_image_path else: image_node.image = bpy.data.images.load(displacement_image_path, check_existing=True) if True: image_node.image.colorspace_settings.name = 'Non-Color' image_node.location.x = _x_texture_node image_node.location.y = _y_texture_node * -4 displacement_texture = image_node displacement_node = nodes.new('ShaderNodeDisplacement') displacement_node.inputs['Midlevel'].default_value = 0.5 displacement_node.inputs['Scale'].default_value = 0.15 displacement_node.location.x = _x_texture_node * 0.5 displacement_node.location.y = _y_texture_node * -4 links.new(displacement_texture.outputs['Color'], displacement_node.inputs['Height']) links.new(displacement_node.outputs['Displacement'], output_node.inputs['Displacement']) return displacement_texture return None
BlenderProc
positive
def execute(self, context): ed = bpy.context.scene.mbdyn.elems nd = bpy.context.scene.mbdyn.nodes try: elem = ed['total_pin_joint_' + str(self.int_label)] <DeepExtract> mbs = context.scene.mbdyn nd = mbs.nodes if any((obj == elem.blender_object for obj in context.scene.objects.keys())): retval = {'OBJECT_EXISTS'} try: n1 = nd['node_' + str(elem.nodes[0].int_label)].blender_object except KeyError: retval = {'NODE1_NOTFOUND'} n1OBJ = bpy.data.objects[n1] try: set_active_collection('joints') elcol = bpy.data.collections.new(name=elem.name) bpy.data.collections['joints'].children.link(elcol) set_active_collection(elcol.name) lib_path = os.path.join(mbs.addon_path, 'library', 'joints.blend', 'Object') bpy.ops.wm.append(directory=lib_path, filename='total.pin') totjOBJ = bpy.context.selected_objects[0] totjOBJ.name = elem.name totjOBJ.location = elem.offsets[0].value totjOBJ.rotation_mode = 'QUATERNION' totjOBJ.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) OBJs = list() OBJs.append(totjOBJ) pos = ['total.disp.x', 'total.disp.y', 'total.disp.z'] for kk in range(3): if not elem.offsets[1].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=pos[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = elem.offsets[0].value obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) rot = ['total.rot.x', 'total.rot.y', 'total.rot.z'] for kk in range(3): if not elem.offsets[3].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=rot[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = elem.offsets[0].value obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) ctx = bpy.context.copy() ctx['active_object'] = OBJs[0] ctx['selected_editable_objects'] = OBJs bpy.ops.object.join(ctx) s = 1.0 / sqrt(3.0) * n1OBJ.scale.magnitude totjOBJ.scale = Vector((s, s, s)) RF1p = bpy.data.objects.new(totjOBJ.name + '_RF1_pos', None) RF1p.empty_display_type = 'ARROWS' RF1p.rotation_mode = 'QUATERNION' RF1p.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) RF1r = bpy.data.objects.new(totjOBJ.name + '_RF1_rot', None) RF1r.empty_display_type = 'ARROWS' RF1r.rotation_mode = 'QUATERNION' RF1r.rotation_quaternion = Quaternion(elem.rotoffsets[1].value) parenting(totjOBJ, n1OBJ) parenting(RF1p, n1OBJ) parenting(RF1r, n1OBJ) elem.blender_object = totjOBJ.name totjOBJ.mbdyn.dkey = elem.name totjOBJ.mbdyn.type = 'element' elcol.objects.link(n1OBJ) elcol.objects.link(RF1p) elcol.objects.link(RF1r) RF1p.hide_set(state=True) RF1r.hide_set(state=True) set_active_collection('Master Collection') retval = {'FINISHED'} except FileNotFoundError: retval = {'LIBRARY_ERROR'} except KeyError: retval = {'COLLECTION_ERROR'} </DeepExtract> if retval == {'OBJECT_EXISTS'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE1_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'COLLECTION_ERROR'}: eldbmsf(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'LIBRARY_ERROR'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'FINISHED'}: eldbmsg({'IMPORT_SUCCESS'}, type(self).__name__ + '::execute()', elem) return retval else: return retval except KeyError: eldbmsg({'DICT_ERROR'}, type(self).__name__ + '::execute()', elem) return {'CANCELLED'}
def execute(self, context): ed = bpy.context.scene.mbdyn.elems nd = bpy.context.scene.mbdyn.nodes try: elem = ed['total_pin_joint_' + str(self.int_label)] mbs = context.scene.mbdyn nd = mbs.nodes if any((obj == elem.blender_object for obj in context.scene.objects.keys())): retval = {'OBJECT_EXISTS'} try: n1 = nd['node_' + str(elem.nodes[0].int_label)].blender_object except KeyError: retval = {'NODE1_NOTFOUND'} n1OBJ = bpy.data.objects[n1] try: set_active_collection('joints') elcol = bpy.data.collections.new(name=elem.name) bpy.data.collections['joints'].children.link(elcol) set_active_collection(elcol.name) lib_path = os.path.join(mbs.addon_path, 'library', 'joints.blend', 'Object') bpy.ops.wm.append(directory=lib_path, filename='total.pin') totjOBJ = bpy.context.selected_objects[0] totjOBJ.name = elem.name totjOBJ.location = elem.offsets[0].value totjOBJ.rotation_mode = 'QUATERNION' totjOBJ.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) OBJs = list() OBJs.append(totjOBJ) pos = ['total.disp.x', 'total.disp.y', 'total.disp.z'] for kk in range(3): if not elem.offsets[1].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=pos[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = elem.offsets[0].value obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) rot = ['total.rot.x', 'total.rot.y', 'total.rot.z'] for kk in range(3): if not elem.offsets[3].value[kk]: app_retval = bpy.ops.wm.append(directory=lib_path, filename=rot[kk]) if app_retval != {'FINISHED'}: retval = {'LIBRARY_ERROR'} obj = bpy.context.selected_objects[0] OBJs.append(obj) obj.location = elem.offsets[0].value obj.rotation_mode = 'QUATERNION' obj.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) ctx = bpy.context.copy() ctx['active_object'] = OBJs[0] ctx['selected_editable_objects'] = OBJs bpy.ops.object.join(ctx) s = 1.0 / sqrt(3.0) * n1OBJ.scale.magnitude totjOBJ.scale = Vector((s, s, s)) RF1p = bpy.data.objects.new(totjOBJ.name + '_RF1_pos', None) RF1p.empty_display_type = 'ARROWS' RF1p.rotation_mode = 'QUATERNION' RF1p.rotation_quaternion = Quaternion(elem.rotoffsets[0].value) RF1r = bpy.data.objects.new(totjOBJ.name + '_RF1_rot', None) RF1r.empty_display_type = 'ARROWS' RF1r.rotation_mode = 'QUATERNION' RF1r.rotation_quaternion = Quaternion(elem.rotoffsets[1].value) parenting(totjOBJ, n1OBJ) parenting(RF1p, n1OBJ) parenting(RF1r, n1OBJ) elem.blender_object = totjOBJ.name totjOBJ.mbdyn.dkey = elem.name totjOBJ.mbdyn.type = 'element' elcol.objects.link(n1OBJ) elcol.objects.link(RF1p) elcol.objects.link(RF1r) RF1p.hide_set(state=True) RF1r.hide_set(state=True) set_active_collection('Master Collection') retval = {'FINISHED'} except FileNotFoundError: retval = {'LIBRARY_ERROR'} except KeyError: retval = {'COLLECTION_ERROR'} if retval == {'OBJECT_EXISTS'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'NODE1_NOTFOUND'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'COLLECTION_ERROR'}: eldbmsf(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'LIBRARY_ERROR'}: eldbmsg(retval, type(self).__name__ + '::execute()', elem) return {'CANCELLED'} elif retval == {'FINISHED'}: eldbmsg({'IMPORT_SUCCESS'}, type(self).__name__ + '::execute()', elem) return retval else: return retval except KeyError: eldbmsg({'DICT_ERROR'}, type(self).__name__ + '::execute()', elem) return {'CANCELLED'}
blendyn
positive
def _search(self, max_evals, timeout): num_evals_done = 0 <DeepExtract> batch = [] hp_values_samples = self._problem._hp_space._space.sample_configuration(self._evaluator.num_workers) if self._evaluator.num_workers == 1: hp_values_samples = [hp_values_samples] for i in range(self._evaluator.num_workers): arch_seq = self._gen_random_arch() hp_values = list(dict(hp_values_samples[i]).values()) config = self._problem.gen_config(arch_seq, hp_values) config = self._add_default_keys(config) batch.append(config) batch = batch </DeepExtract> self._evaluator.submit(batch) while max_evals < 0 or num_evals_done < max_evals: results = self._evaluator.gather('BATCH', 1) num_received = num_evals_done num_evals_done += len(results) num_received = num_evals_done - num_received if num_received > 0: self._evaluator.dump_evals(saved_keys=self._saved_keys, log_dir=self._log_dir) if max_evals < 0 or num_evals_done < max_evals: self._evaluator.submit(self._gen_random_batch(size=num_received))
def _search(self, max_evals, timeout): num_evals_done = 0 batch = [] hp_values_samples = self._problem._hp_space._space.sample_configuration(self._evaluator.num_workers) if self._evaluator.num_workers == 1: hp_values_samples = [hp_values_samples] for i in range(self._evaluator.num_workers): arch_seq = self._gen_random_arch() hp_values = list(dict(hp_values_samples[i]).values()) config = self._problem.gen_config(arch_seq, hp_values) config = self._add_default_keys(config) batch.append(config) batch = batch self._evaluator.submit(batch) while max_evals < 0 or num_evals_done < max_evals: results = self._evaluator.gather('BATCH', 1) num_received = num_evals_done num_evals_done += len(results) num_received = num_evals_done - num_received if num_received > 0: self._evaluator.dump_evals(saved_keys=self._saved_keys, log_dir=self._log_dir) if max_evals < 0 or num_evals_done < max_evals: self._evaluator.submit(self._gen_random_batch(size=num_received))
deephyper
positive
def lia(k, n): transition_probabilities = [[2 / 4, 1 / 4, 0], [2 / 4, 2 / 4, 2 / 4], [0, 1 / 4, 2 / 4]] k_probabilities = [0, 1, 0] for kk in range(k - 1): new_probabilities = [0, 0, 0] for j in range(3): for i in range(3): new_probabilities[j] += transition_probabilities[j][i] * k_probabilities[i] k_probabilities = new_probabilities <DeepExtract> coeffs = [[1, 0]] for i in range(1, 2 ** k + 1): new_coeffs = [1] for j in range(i): new_coeffs.append(coeffs[-1][j] + coeffs[-1][j + 1]) new_coeffs.append(0) coeffs.append(new_coeffs) counts = coeffs[-1][0:-1] </DeepExtract> probability = 0 prob_individual = k_probabilities[1] ** 2 for nn in range(n, 2 ** k + 1): n1 = 2 ** k - nn probability += counts[nn] * (1 - prob_individual) ** n1 * prob_individual ** nn return probability
def lia(k, n): transition_probabilities = [[2 / 4, 1 / 4, 0], [2 / 4, 2 / 4, 2 / 4], [0, 1 / 4, 2 / 4]] k_probabilities = [0, 1, 0] for kk in range(k - 1): new_probabilities = [0, 0, 0] for j in range(3): for i in range(3): new_probabilities[j] += transition_probabilities[j][i] * k_probabilities[i] k_probabilities = new_probabilities coeffs = [[1, 0]] for i in range(1, 2 ** k + 1): new_coeffs = [1] for j in range(i): new_coeffs.append(coeffs[-1][j] + coeffs[-1][j + 1]) new_coeffs.append(0) coeffs.append(new_coeffs) counts = coeffs[-1][0:-1] probability = 0 prob_individual = k_probabilities[1] ** 2 for nn in range(n, 2 ** k + 1): n1 = 2 ** k - nn probability += counts[nn] * (1 - prob_individual) ** n1 * prob_individual ** nn return probability
bioinformatics
positive
def __init__(self, runscontainer): super().__init__(runscontainer) self.output_dir = runscontainer.output_dir if self.runscontainer.file_format != 'APT': raise Deactivated('{} deactivated, only designed for file-format APT (but detected {})'.format(self.get_name(), self.runscontainer.file_format)) apt_warning(self.logger) <DeepExtract> runs = self.runscontainer.get_aggregated(keep_folders=True, keep_budgets=False) apt_config_dict = self._runspec_dict_apt_config(runs) results_fit_dict = self._runspec_dict_results_fit(runs) for (k, runspec_dict) in [('Auto-PyTorch Configuration', apt_config_dict), ('Results of the fit()-call', results_fit_dict)]: order_spec = list(list(runspec_dict.values())[0].keys()) html_table_specific = DataFrame(runspec_dict) html_table_specific = html_table_specific.reindex(order_spec) html_table_specific = html_table_specific.to_html(escape=False, justify='left') self.result[k] = {'table': html_table_specific} </DeepExtract> self.result['General'] = {'table': html_table, 'tooltip': 'AutoPyTorch configuration.'}
def __init__(self, runscontainer): super().__init__(runscontainer) self.output_dir = runscontainer.output_dir if self.runscontainer.file_format != 'APT': raise Deactivated('{} deactivated, only designed for file-format APT (but detected {})'.format(self.get_name(), self.runscontainer.file_format)) apt_warning(self.logger) runs = self.runscontainer.get_aggregated(keep_folders=True, keep_budgets=False) apt_config_dict = self._runspec_dict_apt_config(runs) results_fit_dict = self._runspec_dict_results_fit(runs) for (k, runspec_dict) in [('Auto-PyTorch Configuration', apt_config_dict), ('Results of the fit()-call', results_fit_dict)]: order_spec = list(list(runspec_dict.values())[0].keys()) html_table_specific = DataFrame(runspec_dict) html_table_specific = html_table_specific.reindex(order_spec) html_table_specific = html_table_specific.to_html(escape=False, justify='left') self.result[k] = {'table': html_table_specific} self.result['General'] = {'table': html_table, 'tooltip': 'AutoPyTorch configuration.'}
CAVE
positive
def __init__(self, concepts=None, preferred_formats=None): if preferred_formats is not None: self.preferred_formats = preferred_formats if concepts is None: concepts = () elif isinstance(concepts, DataView): node = concepts.parse() concepts = node.get_concepts_for_select() self.params = [] self.row_length = 0 self.concepts = concepts self._header = [] self._header_checked = False for concept in concepts: formatter_class = formatters.get(concept.formatter) <DeepExtract> formatter = formatter_class(concept=concept, keys=keys, formats=self.preferred_formats) length = len(formatter.field_names) params = (formatter, length) self.row_length += length if index is not None: self.params.insert(index, params) else: self.params.append(params) meta = formatter.get_meta(exporter=self.short_name.lower()) header = meta['header'] if index is not None: self._header.insert(index, header) else: self._header.append(header) </DeepExtract> self._format_cache = {}
def __init__(self, concepts=None, preferred_formats=None): if preferred_formats is not None: self.preferred_formats = preferred_formats if concepts is None: concepts = () elif isinstance(concepts, DataView): node = concepts.parse() concepts = node.get_concepts_for_select() self.params = [] self.row_length = 0 self.concepts = concepts self._header = [] self._header_checked = False for concept in concepts: formatter_class = formatters.get(concept.formatter) formatter = formatter_class(concept=concept, keys=keys, formats=self.preferred_formats) length = len(formatter.field_names) params = (formatter, length) self.row_length += length if index is not None: self.params.insert(index, params) else: self.params.append(params) meta = formatter.get_meta(exporter=self.short_name.lower()) header = meta['header'] if index is not None: self._header.insert(index, header) else: self._header.append(header) self._format_cache = {}
avocado
positive
def test_finds_pcr_product_across_circular_boundary(self): upstream = get_random_sequence(2000) p1_bs = 'catagcgcacaggacgcggag' middle = 'cggcacctgtgagccg' p2_bs = 'taatgaccccgaagcagg' downstream = get_random_sequence(2000) p1 = 'aaaaaaaaaa' + p1_bs p2 = 'aaaaaaaaaa' + str(Seq(p2_bs).reverse_complement()) template = ''.join([middle[10:], p2_bs, downstream, upstream, p1_bs, middle[0:10]]) <DeepExtract> g = Genome(name='Foo') g.save() for seq in templates: f = Fragment.create_with_sequence('Bar', seq, circular=True) Genome_Fragment(genome=g, fragment=f, inherited=False).save() try: os.unlink(fragment_fasta_fn(f)) except BaseException: pass build_all_genome_dbs(refresh=True) g = Genome.objects.get(pk=g.id) </DeepExtract> r = pcr_from_genome(g, p1, p2) self.assertEquals(r[0], ''.join([p1, middle, str(Seq(p2).reverse_complement())])) self.assertEquals(r[3]['fragment_name'], g.fragments.all()[0].name) self.assertEquals(r[3]['fragment_id'], g.fragments.all()[0].id) self.assertEquals(r[3]['region'], (len(template) - 10 - len(p1_bs) + 1, len(middle) - 10 + len(p2_bs)))
def test_finds_pcr_product_across_circular_boundary(self): upstream = get_random_sequence(2000) p1_bs = 'catagcgcacaggacgcggag' middle = 'cggcacctgtgagccg' p2_bs = 'taatgaccccgaagcagg' downstream = get_random_sequence(2000) p1 = 'aaaaaaaaaa' + p1_bs p2 = 'aaaaaaaaaa' + str(Seq(p2_bs).reverse_complement()) template = ''.join([middle[10:], p2_bs, downstream, upstream, p1_bs, middle[0:10]]) g = Genome(name='Foo') g.save() for seq in templates: f = Fragment.create_with_sequence('Bar', seq, circular=True) Genome_Fragment(genome=g, fragment=f, inherited=False).save() try: os.unlink(fragment_fasta_fn(f)) except BaseException: pass build_all_genome_dbs(refresh=True) g = Genome.objects.get(pk=g.id) r = pcr_from_genome(g, p1, p2) self.assertEquals(r[0], ''.join([p1, middle, str(Seq(p2).reverse_complement())])) self.assertEquals(r[3]['fragment_name'], g.fragments.all()[0].name) self.assertEquals(r[3]['fragment_id'], g.fragments.all()[0].id) self.assertEquals(r[3]['region'], (len(template) - 10 - len(p1_bs) + 1, len(middle) - 10 + len(p2_bs)))
edge
positive
def test_multipartite(): <DeepExtract> parser = parse_graph_argument('simple', 'gnp 5 1.0 3') G = obtain_graph(parser) </DeepExtract> assert G.number_of_vertices() == 5 * 3
def test_multipartite(): parser = parse_graph_argument('simple', 'gnp 5 1.0 3') G = obtain_graph(parser) assert G.number_of_vertices() == 5 * 3
cnfgen
positive
def main(cfg): model = create_model('res_50', cfg.MODEL.HEAD_CONV, cfg).cuda() weight_path = '/home/tensorboy/data/centerpose/trained_best_model/res_50_best_model.pth' state_dict = torch.load(weight_path, map_location=lambda storage, loc: storage)['state_dict'] model.load_state_dict(state_dict) onnx_file_path = './model/resnet50.onnx' image = cv2.imread('../images/image1.jpg') <DeepExtract> (height, width) = image.shape[0:2] new_height = int(height * 1) new_width = int(width * 1) mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3) std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3) (inp_height, inp_width) = (cfg.MODEL.INPUT_H, cfg.MODEL.INPUT_W) c = np.array([new_width / 2.0, new_height / 2.0], dtype=np.float32) s = max(height, width) * 1.0 trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) resized_image = cv2.resize(image, (new_width, new_height)) inp_image = cv2.warpAffine(resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255.0 - mean) / std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) images = torch.from_numpy(images) meta = {'c': c, 's': s, 'out_height': inp_height // cfg.MODEL.DOWN_RATIO, 'out_width': inp_width // cfg.MODEL.DOWN_RATIO} (images, meta) = (images, meta) </DeepExtract> model.cuda() model.eval() model.float() torch_input = images.cuda() print(torch_input.shape) torch.onnx.export(model, torch_input, onnx_file_path, verbose=False) sess = nxrun.InferenceSession(onnx_file_path) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name print(input_name) print(sess.get_outputs()[0].name) print(sess.get_outputs()[1].name) print(sess.get_outputs()[2].name) output_onnx = sess.run(None, {input_name: images.cpu().data.numpy()}) (hm, wh, hps, reg, hm_hp, hp_offset) = output_onnx print(hm) print(len(output_onnx))
def main(cfg): model = create_model('res_50', cfg.MODEL.HEAD_CONV, cfg).cuda() weight_path = '/home/tensorboy/data/centerpose/trained_best_model/res_50_best_model.pth' state_dict = torch.load(weight_path, map_location=lambda storage, loc: storage)['state_dict'] model.load_state_dict(state_dict) onnx_file_path = './model/resnet50.onnx' image = cv2.imread('../images/image1.jpg') (height, width) = image.shape[0:2] new_height = int(height * 1) new_width = int(width * 1) mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3) std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3) (inp_height, inp_width) = (cfg.MODEL.INPUT_H, cfg.MODEL.INPUT_W) c = np.array([new_width / 2.0, new_height / 2.0], dtype=np.float32) s = max(height, width) * 1.0 trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) resized_image = cv2.resize(image, (new_width, new_height)) inp_image = cv2.warpAffine(resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255.0 - mean) / std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) images = torch.from_numpy(images) meta = {'c': c, 's': s, 'out_height': inp_height // cfg.MODEL.DOWN_RATIO, 'out_width': inp_width // cfg.MODEL.DOWN_RATIO} (images, meta) = (images, meta) model.cuda() model.eval() model.float() torch_input = images.cuda() print(torch_input.shape) torch.onnx.export(model, torch_input, onnx_file_path, verbose=False) sess = nxrun.InferenceSession(onnx_file_path) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name print(input_name) print(sess.get_outputs()[0].name) print(sess.get_outputs()[1].name) print(sess.get_outputs()[2].name) output_onnx = sess.run(None, {input_name: images.cpu().data.numpy()}) (hm, wh, hps, reg, hm_hp, hp_offset) = output_onnx print(hm) print(len(output_onnx))
centerpose
positive
def get_robot_cor(self, use_world_map): """Get robot center of rotation and current heading""" <DeepExtract> if use_world_map: rx = self.robot.world.particle_filter.pose[0] ry = self.robot.world.particle_filter.pose[1] rtheta = self.robot.world.particle_filter.pose[2] else: rx = self.robot.pose.position.x ry = self.robot.pose.position.y rtheta = self.robot.pose.rotation.angle_z.radians (rx, ry, rtheta) = (rx, ry, rtheta) </DeepExtract> cx = rx + center_of_rotation_offset * cos(rtheta) cy = ry + center_of_rotation_offset * sin(rtheta) return (cx, cy, rtheta)
def get_robot_cor(self, use_world_map): """Get robot center of rotation and current heading""" if use_world_map: rx = self.robot.world.particle_filter.pose[0] ry = self.robot.world.particle_filter.pose[1] rtheta = self.robot.world.particle_filter.pose[2] else: rx = self.robot.pose.position.x ry = self.robot.pose.position.y rtheta = self.robot.pose.rotation.angle_z.radians (rx, ry, rtheta) = (rx, ry, rtheta) cx = rx + center_of_rotation_offset * cos(rtheta) cy = ry + center_of_rotation_offset * sin(rtheta) return (cx, cy, rtheta)
cozmo-tools
positive
def idx2model(self, idx, allow_none=False): """ Find model name for the given idx. Parameters ---------- idx : float, int, str, array-like idx or idx-es of devices. allow_none : bool If True, return `None` at the positions where idx is not found. Returns ------- If `idx` is a list, return a list of model instances. If `idx` is a single element, return a model instance. """ ret = [] <DeepExtract> single = False list_alike = (list, tuple, np.ndarray) if not isinstance(idx, list_alike): idx = [idx] single = True elif len(idx) > 0 and isinstance(idx[0], list_alike): idx = list_flatten(idx) (idx, single) = (idx, single) </DeepExtract> for i in idx: try: if i is None and allow_none: ret.append(None) else: ret.append(self._idx2model[i]) except KeyError: raise KeyError(f'Group <{self.class_name}> does not contain device with idx={i}') if single: ret = ret[0] return ret
def idx2model(self, idx, allow_none=False): """ Find model name for the given idx. Parameters ---------- idx : float, int, str, array-like idx or idx-es of devices. allow_none : bool If True, return `None` at the positions where idx is not found. Returns ------- If `idx` is a list, return a list of model instances. If `idx` is a single element, return a model instance. """ ret = [] single = False list_alike = (list, tuple, np.ndarray) if not isinstance(idx, list_alike): idx = [idx] single = True elif len(idx) > 0 and isinstance(idx[0], list_alike): idx = list_flatten(idx) (idx, single) = (idx, single) for i in idx: try: if i is None and allow_none: ret.append(None) else: ret.append(self._idx2model[i]) except KeyError: raise KeyError(f'Group <{self.class_name}> does not contain device with idx={i}') if single: ret = ret[0] return ret
andes
positive
def _stringify_py37(annotation: Any) -> str: """stringify() for py37+.""" module = getattr(annotation, '__module__', None) if module == 'typing': if getattr(annotation, '_name', None): qualname = annotation._name elif getattr(annotation, '__qualname__', None): qualname = annotation.__qualname__ elif getattr(annotation, '__forward_arg__', None): qualname = annotation.__forward_arg__ else: <DeepExtract> res = _stringify(annotation.__origin__) qualname = res </DeepExtract> elif hasattr(annotation, '__qualname__'): qualname = '%s.%s' % (module, annotation.__qualname__) else: qualname = repr(annotation) if getattr(annotation, '__args__', None): if qualname == 'Union': if len(annotation.__args__) == 2 and annotation.__args__[1] is NoneType: return 'Optional[%s]' % stringify(annotation.__args__[0]) else: args = ', '.join((stringify(a) for a in annotation.__args__)) return '%s[%s]' % (qualname, args) elif qualname == 'Callable': args = ', '.join((stringify(a) for a in annotation.__args__[:-1])) <DeepExtract> res = _stringify(annotation.__args__[-1]) returns = res </DeepExtract> return '%s[[%s], %s]' % (qualname, args, returns) elif annotation._special: return qualname else: args = ', '.join((stringify(a) for a in annotation.__args__)) return '%s[%s]' % (qualname, args) return qualname
def _stringify_py37(annotation: Any) -> str: """stringify() for py37+.""" module = getattr(annotation, '__module__', None) if module == 'typing': if getattr(annotation, '_name', None): qualname = annotation._name elif getattr(annotation, '__qualname__', None): qualname = annotation.__qualname__ elif getattr(annotation, '__forward_arg__', None): qualname = annotation.__forward_arg__ else: res = _stringify(annotation.__origin__) qualname = res elif hasattr(annotation, '__qualname__'): qualname = '%s.%s' % (module, annotation.__qualname__) else: qualname = repr(annotation) if getattr(annotation, '__args__', None): if qualname == 'Union': if len(annotation.__args__) == 2 and annotation.__args__[1] is NoneType: return 'Optional[%s]' % stringify(annotation.__args__[0]) else: args = ', '.join((stringify(a) for a in annotation.__args__)) return '%s[%s]' % (qualname, args) elif qualname == 'Callable': args = ', '.join((stringify(a) for a in annotation.__args__[:-1])) res = _stringify(annotation.__args__[-1]) returns = res return '%s[[%s], %s]' % (qualname, args, returns) elif annotation._special: return qualname else: args = ', '.join((stringify(a) for a in annotation.__args__)) return '%s[%s]' % (qualname, args) return qualname
cotk
positive
def _process_message_base(self, domain_id, ipv4, ipv6, port, msg): """Process received message (common process for any kind of network module) Args: domain_id (bytes): target domain_id ipv4 (str): IPv4 address of the sender node ipv6 (str): IPv6 address of the sender node port (int): Port number of the sender msg (dict): received message """ if KeyType.infra_msg_type not in msg: return self.logger.debug('[%s] process_message(type=%d)' % (self.domains[domain_id]['name'], int.from_bytes(msg[KeyType.infra_msg_type], 'big'))) if msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_NETWORK: <DeepExtract> if not self.check_admin_signature(domain_id, msg): self.logger.error('Illegal access to domain %s' % domain_id.hex()) return source_node_id = msg[KeyType.source_node_id] if source_node_id in self.domains[domain_id]['neighbor'].nodeinfo_list: admin_msg_seq = msg[KeyType.message_seq] if self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id].admin_sequence_number >= admin_msg_seq: return self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id].admin_sequence_number = admin_msg_seq if KeyType.domain_ping in msg and port is not None: self._receive_domain_ping(domain_id, port, msg) elif msg[KeyType.command] == BBcNetwork.REQUEST_KEY_EXCHANGE: if KeyType.ecdh in msg and KeyType.hint in msg and (KeyType.nonce in msg) and (KeyType.random in msg): if source_node_id not in self.domains[domain_id]['neighbor'].nodeinfo_list: self.add_neighbor(domain_id, source_node_id, ipv4, ipv6, port) nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] if nodeinfo.key_manager is None: nodeinfo.key_manager = KeyExchangeManager(self, domain_id, source_node_id) nodeinfo.key_manager.receive_exchange_request(msg[KeyType.ecdh], msg[KeyType.nonce], msg[KeyType.random], msg[KeyType.hint]) elif msg[KeyType.command] == BBcNetwork.RESPONSE_KEY_EXCHANGE: if KeyType.ecdh in msg and KeyType.hint in msg and (KeyType.nonce in msg) and (KeyType.random in msg): nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] nodeinfo.key_manager.receive_exchange_response(msg[KeyType.ecdh], msg[KeyType.random], msg[KeyType.hint]) elif msg[KeyType.command] == BBcNetwork.CONFIRM_KEY_EXCHANGE: nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] nodeinfo.key_manager.receive_confirmation() elif msg[KeyType.command] == BBcNetwork.NOTIFY_LEAVE: if KeyType.source_node_id in msg: self.domains[domain_id]['topology'].notify_neighbor_update(source_node_id, is_new=False) self.domains[domain_id]['neighbor'].remove(source_node_id) </DeepExtract> elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_USER: <DeepExtract> if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new </DeepExtract> self.domains[domain_id]['user'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_DATA: <DeepExtract> if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new </DeepExtract> self.domains[domain_id]['data'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_TOPOLOGY: <DeepExtract> if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new </DeepExtract> self.domains[domain_id]['topology'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_DOMAIN0: <DeepExtract> if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new </DeepExtract> self.domain0manager.process_message(msg)
def _process_message_base(self, domain_id, ipv4, ipv6, port, msg): """Process received message (common process for any kind of network module) Args: domain_id (bytes): target domain_id ipv4 (str): IPv4 address of the sender node ipv6 (str): IPv6 address of the sender node port (int): Port number of the sender msg (dict): received message """ if KeyType.infra_msg_type not in msg: return self.logger.debug('[%s] process_message(type=%d)' % (self.domains[domain_id]['name'], int.from_bytes(msg[KeyType.infra_msg_type], 'big'))) if msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_NETWORK: if not self.check_admin_signature(domain_id, msg): self.logger.error('Illegal access to domain %s' % domain_id.hex()) return source_node_id = msg[KeyType.source_node_id] if source_node_id in self.domains[domain_id]['neighbor'].nodeinfo_list: admin_msg_seq = msg[KeyType.message_seq] if self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id].admin_sequence_number >= admin_msg_seq: return self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id].admin_sequence_number = admin_msg_seq if KeyType.domain_ping in msg and port is not None: self._receive_domain_ping(domain_id, port, msg) elif msg[KeyType.command] == BBcNetwork.REQUEST_KEY_EXCHANGE: if KeyType.ecdh in msg and KeyType.hint in msg and (KeyType.nonce in msg) and (KeyType.random in msg): if source_node_id not in self.domains[domain_id]['neighbor'].nodeinfo_list: self.add_neighbor(domain_id, source_node_id, ipv4, ipv6, port) nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] if nodeinfo.key_manager is None: nodeinfo.key_manager = KeyExchangeManager(self, domain_id, source_node_id) nodeinfo.key_manager.receive_exchange_request(msg[KeyType.ecdh], msg[KeyType.nonce], msg[KeyType.random], msg[KeyType.hint]) elif msg[KeyType.command] == BBcNetwork.RESPONSE_KEY_EXCHANGE: if KeyType.ecdh in msg and KeyType.hint in msg and (KeyType.nonce in msg) and (KeyType.random in msg): nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] nodeinfo.key_manager.receive_exchange_response(msg[KeyType.ecdh], msg[KeyType.random], msg[KeyType.hint]) elif msg[KeyType.command] == BBcNetwork.CONFIRM_KEY_EXCHANGE: nodeinfo = self.domains[domain_id]['neighbor'].nodeinfo_list[source_node_id] nodeinfo.key_manager.receive_confirmation() elif msg[KeyType.command] == BBcNetwork.NOTIFY_LEAVE: if KeyType.source_node_id in msg: self.domains[domain_id]['topology'].notify_neighbor_update(source_node_id, is_new=False) self.domains[domain_id]['neighbor'].remove(source_node_id) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_USER: if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new self.domains[domain_id]['user'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_DATA: if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new self.domains[domain_id]['data'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_TOPOLOGY: if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new self.domains[domain_id]['topology'].process_message(msg) elif msg[KeyType.infra_msg_type] == InfraMessageCategory.CATEGORY_DOMAIN0: if domain_id not in self.domains or self.domains[domain_id]['neighbor'].my_node_id == msg[KeyType.source_node_id] or port is None: return None is_new = self.domains[domain_id]['neighbor'].add(node_id=msg[KeyType.source_node_id], ipv4=ipv4, ipv6=ipv6, port=port, is_static=is_static) if is_new is not None and is_new: nodelist = self.domains[domain_id]['neighbor'].nodeinfo_list self.domains[domain_id]['topology'].notify_neighbor_update(msg[KeyType.source_node_id], is_new=True) self.stats.update_stats('network', 'neighbor_nodes', len(nodelist)) return is_new self.domain0manager.process_message(msg)
bbc1
positive
def _get_gremlin_connection(headers: Any=None) -> client.Client: if self.gremlin_connection is None: uri = f'{HTTP_PROTOCOL}://{self.host}:{self.port}/gremlin' <DeepExtract> request = requests.Request(method='GET', url=uri, data=data, params=params, headers=headers) if self.boto3_session is not None: aws_request = self._get_aws_request(method='GET', url=uri, data=data, params=params, headers=headers, service=service) request.headers = dict(aws_request.headers) request = request.prepare() </DeepExtract> ws_url = f'{WS_PROTOCOL}://{self.host}:{self.port}/gremlin' self.gremlin_connection = client.Client(ws_url, 'g', headers=dict(request.headers), call_from_event_loop=True) return self.gremlin_connection
def _get_gremlin_connection(headers: Any=None) -> client.Client: if self.gremlin_connection is None: uri = f'{HTTP_PROTOCOL}://{self.host}:{self.port}/gremlin' request = requests.Request(method='GET', url=uri, data=data, params=params, headers=headers) if self.boto3_session is not None: aws_request = self._get_aws_request(method='GET', url=uri, data=data, params=params, headers=headers, service=service) request.headers = dict(aws_request.headers) request = request.prepare() ws_url = f'{WS_PROTOCOL}://{self.host}:{self.port}/gremlin' self.gremlin_connection = client.Client(ws_url, 'g', headers=dict(request.headers), call_from_event_loop=True) return self.gremlin_connection
aws-data-wrangler
positive
def test_fp_celsius_conversion(self): <DeepExtract> mips_machine.re_init() mips_machine.base = 'hex' mips_machine.flavor = 'mips_asm' test_code = self.read_test_code(TEST_DIR_NAME + 'fp_cel_to_fah.asm') assemble(test_code, mips_machine) </DeepExtract> eight_string = mips_machine.registers['F12'] nine_string = mips_machine.registers['F13'] bin_string = eight_string + nine_string <DeepExtract> hx = hex(int(bin_string, 2)) result = struct.unpack('d', struct.pack('q', int(hx, 16)))[0] float_value = float(result) </DeepExtract> self.assertEqual(float_value, 1.8 * 10.0 + 32.0) self.assertEqual(mips_machine.memory['C'], 1.8 * 10.0 + 32.0)
def test_fp_celsius_conversion(self): mips_machine.re_init() mips_machine.base = 'hex' mips_machine.flavor = 'mips_asm' test_code = self.read_test_code(TEST_DIR_NAME + 'fp_cel_to_fah.asm') assemble(test_code, mips_machine) eight_string = mips_machine.registers['F12'] nine_string = mips_machine.registers['F13'] bin_string = eight_string + nine_string hx = hex(int(bin_string, 2)) result = struct.unpack('d', struct.pack('q', int(hx, 16)))[0] float_value = float(result) self.assertEqual(float_value, 1.8 * 10.0 + 32.0) self.assertEqual(mips_machine.memory['C'], 1.8 * 10.0 + 32.0)
Emu86
positive
def convert_image_processor(proc: dict): flat = [] def recurse_convert(p: dict): t = p.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in p['children']: <DeepExtract> t = c.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in c['children']: recurse_convert(c) elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not c.get('noNormalize', False), 'invert': not c.get('noInvert', False), 'transpose': not c.get('noTranspose', False), 'pad': c.get('pad', 0), 'pad_value': c.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') </DeepExtract> elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not p.get('noNormalize', False), 'invert': not p.get('noInvert', False), 'transpose': not p.get('noTranspose', False), 'pad': p.get('pad', 0), 'pad_value': p.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') <DeepExtract> t = proc.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in proc['children']: recurse_convert(c) elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not proc.get('noNormalize', False), 'invert': not proc.get('noInvert', False), 'transpose': not proc.get('noTranspose', False), 'pad': proc.get('pad', 0), 'pad_value': proc.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') </DeepExtract> return flat
def convert_image_processor(proc: dict): flat = [] def recurse_convert(p: dict): t = p.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in p['children']: t = c.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in c['children']: recurse_convert(c) elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not c.get('noNormalize', False), 'invert': not c.get('noInvert', False), 'transpose': not c.get('noTranspose', False), 'pad': c.get('pad', 0), 'pad_value': c.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not p.get('noNormalize', False), 'invert': not p.get('noInvert', False), 'transpose': not p.get('noTranspose', False), 'pad': p.get('pad', 0), 'pad_value': p.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') t = proc.get('type', 'DEFAULT_NORMALIZER') if t == 'DEFAULT_NORMALIZER': flat.extend(default_data_normalizer()) elif t == 'MULTI_NORMALIZER': for c in proc['children']: recurse_convert(c) elif t == 'NOOP_NORMALIZER': pass elif t == 'RANGE_NORMALIZER': flat.append(image_processor('DataRangeNormalizer')) elif t == 'CENTER_NORMALIZER': flat.append(image_processor('CenterNormalizer')) elif t == 'FINAL_PREPARATION': flat.append(image_processor('FinalPreparation', args={'normalize': not proc.get('noNormalize', False), 'invert': not proc.get('noInvert', False), 'transpose': not proc.get('noTranspose', False), 'pad': proc.get('pad', 0), 'pad_value': proc.get('padValue', 0), 'as_uint8': True})) elif t == 'SCALE_TO_HEIGHT': flat.append(image_processor('ScaleToHeightProcessor')) else: raise ValueError(f'Unknown type {t}') return flat
calamari
positive
def vis_one_image(im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False, ext='pdf'): """Visual debugging of detections.""" if not os.path.exists(output_dir): os.makedirs(output_dir) if isinstance(boxes, list): <DeepExtract> box_list = [b for b in boxes if len(b) > 0] if len(box_list) > 0: boxes = np.concatenate(box_list) else: boxes = None if segms is not None: segms = [s for slist in segms for s in slist] else: segms = None if keypoints is not None: keyps = [k for klist in keypoints for k in klist] else: keyps = None classes = [] for j in range(len(boxes)): classes += [j] * len(boxes[j]) (boxes, segms, keypoints, classes) = (boxes, segms, keyps, classes) </DeepExtract> if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh: return if segms is not None: masks = mask_util.decode(segms) color_list = colormap(rgb=True) / 255 (dataset_keypoints, _) = keypoint_utils.get_keypoints() <DeepExtract> kp_lines = [[dataset_keypoints.index('left_eye'), dataset_keypoints.index('right_eye')], [dataset_keypoints.index('left_eye'), dataset_keypoints.index('nose')], [dataset_keypoints.index('right_eye'), dataset_keypoints.index('nose')], [dataset_keypoints.index('right_eye'), dataset_keypoints.index('right_ear')], [dataset_keypoints.index('left_eye'), dataset_keypoints.index('left_ear')], [dataset_keypoints.index('right_shoulder'), dataset_keypoints.index('right_elbow')], [dataset_keypoints.index('right_elbow'), dataset_keypoints.index('right_wrist')], [dataset_keypoints.index('left_shoulder'), dataset_keypoints.index('left_elbow')], [dataset_keypoints.index('left_elbow'), dataset_keypoints.index('left_wrist')], [dataset_keypoints.index('right_hip'), dataset_keypoints.index('right_knee')], [dataset_keypoints.index('right_knee'), dataset_keypoints.index('right_ankle')], [dataset_keypoints.index('left_hip'), dataset_keypoints.index('left_knee')], [dataset_keypoints.index('left_knee'), dataset_keypoints.index('left_ankle')], [dataset_keypoints.index('right_shoulder'), dataset_keypoints.index('left_shoulder')], [dataset_keypoints.index('right_hip'), dataset_keypoints.index('left_hip')]] kp_lines = kp_lines </DeepExtract> cmap = plt.get_cmap('rainbow') colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)] fig = plt.figure(frameon=False) fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi) ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.axis('off') fig.add_axes(ax) ax.imshow(im) areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) sorted_inds = np.argsort(-areas) mask_color_id = 0 for i in sorted_inds: bbox = boxes[i, :4] score = boxes[i, -1] if score < thresh: continue print(dataset.classes[classes[i]], score) ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='g', linewidth=0.5, alpha=box_alpha)) if show_class: ax.text(bbox[0], bbox[1] - 2, get_class_string(classes[i], score, dataset), fontsize=3, family='serif', bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'), color='white') if segms is not None and len(segms) > i: img = np.ones(im.shape) color_mask = color_list[mask_color_id % len(color_list), 0:3] mask_color_id += 1 w_ratio = 0.4 for c in range(3): color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio for c in range(3): img[:, :, c] = color_mask[c] e = masks[:, :, i] (_, contour, hier) = cv2.findContours(e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) for c in contour: polygon = Polygon(c.reshape((-1, 2)), fill=True, facecolor=color_mask, edgecolor='w', linewidth=1.2, alpha=0.5) ax.add_patch(polygon) if keypoints is not None and len(keypoints) > i: kps = keypoints[i] plt.autoscale(False) for l in range(len(kp_lines)): i1 = kp_lines[l][0] i2 = kp_lines[l][1] if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: x = [kps[0, i1], kps[0, i2]] y = [kps[1, i1], kps[1, i2]] line = ax.plot(x, y) plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7) if kps[2, i1] > kp_thresh: ax.plot(kps[0, i1], kps[1, i1], '.', color=colors[l], markersize=3.0, alpha=0.7) if kps[2, i2] > kp_thresh: ax.plot(kps[0, i2], kps[1, i2], '.', color=colors[l], markersize=3.0, alpha=0.7) mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] + kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0 sc_mid_shoulder = np.minimum(kps[2, dataset_keypoints.index('right_shoulder')], kps[2, dataset_keypoints.index('left_shoulder')]) mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] + kps[:2, dataset_keypoints.index('left_hip')]) / 2.0 sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')], kps[2, dataset_keypoints.index('left_hip')]) if sc_mid_shoulder > kp_thresh and kps[2, dataset_keypoints.index('nose')] > kp_thresh: x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]] y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]] line = ax.plot(x, y) plt.setp(line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7) if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh: x = [mid_shoulder[0], mid_hip[0]] y = [mid_shoulder[1], mid_hip[1]] line = ax.plot(x, y) plt.setp(line, color=colors[len(kp_lines) + 1], linewidth=1.0, alpha=0.7) output_name = os.path.basename(im_name) + '.' + ext fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi) plt.close('all')
def vis_one_image(im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False, ext='pdf'): """Visual debugging of detections.""" if not os.path.exists(output_dir): os.makedirs(output_dir) if isinstance(boxes, list): box_list = [b for b in boxes if len(b) > 0] if len(box_list) > 0: boxes = np.concatenate(box_list) else: boxes = None if segms is not None: segms = [s for slist in segms for s in slist] else: segms = None if keypoints is not None: keyps = [k for klist in keypoints for k in klist] else: keyps = None classes = [] for j in range(len(boxes)): classes += [j] * len(boxes[j]) (boxes, segms, keypoints, classes) = (boxes, segms, keyps, classes) if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh: return if segms is not None: masks = mask_util.decode(segms) color_list = colormap(rgb=True) / 255 (dataset_keypoints, _) = keypoint_utils.get_keypoints() kp_lines = [[dataset_keypoints.index('left_eye'), dataset_keypoints.index('right_eye')], [dataset_keypoints.index('left_eye'), dataset_keypoints.index('nose')], [dataset_keypoints.index('right_eye'), dataset_keypoints.index('nose')], [dataset_keypoints.index('right_eye'), dataset_keypoints.index('right_ear')], [dataset_keypoints.index('left_eye'), dataset_keypoints.index('left_ear')], [dataset_keypoints.index('right_shoulder'), dataset_keypoints.index('right_elbow')], [dataset_keypoints.index('right_elbow'), dataset_keypoints.index('right_wrist')], [dataset_keypoints.index('left_shoulder'), dataset_keypoints.index('left_elbow')], [dataset_keypoints.index('left_elbow'), dataset_keypoints.index('left_wrist')], [dataset_keypoints.index('right_hip'), dataset_keypoints.index('right_knee')], [dataset_keypoints.index('right_knee'), dataset_keypoints.index('right_ankle')], [dataset_keypoints.index('left_hip'), dataset_keypoints.index('left_knee')], [dataset_keypoints.index('left_knee'), dataset_keypoints.index('left_ankle')], [dataset_keypoints.index('right_shoulder'), dataset_keypoints.index('left_shoulder')], [dataset_keypoints.index('right_hip'), dataset_keypoints.index('left_hip')]] kp_lines = kp_lines cmap = plt.get_cmap('rainbow') colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)] fig = plt.figure(frameon=False) fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi) ax = plt.Axes(fig, [0.0, 0.0, 1.0, 1.0]) ax.axis('off') fig.add_axes(ax) ax.imshow(im) areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) sorted_inds = np.argsort(-areas) mask_color_id = 0 for i in sorted_inds: bbox = boxes[i, :4] score = boxes[i, -1] if score < thresh: continue print(dataset.classes[classes[i]], score) ax.add_patch(plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1], fill=False, edgecolor='g', linewidth=0.5, alpha=box_alpha)) if show_class: ax.text(bbox[0], bbox[1] - 2, get_class_string(classes[i], score, dataset), fontsize=3, family='serif', bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'), color='white') if segms is not None and len(segms) > i: img = np.ones(im.shape) color_mask = color_list[mask_color_id % len(color_list), 0:3] mask_color_id += 1 w_ratio = 0.4 for c in range(3): color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio for c in range(3): img[:, :, c] = color_mask[c] e = masks[:, :, i] (_, contour, hier) = cv2.findContours(e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) for c in contour: polygon = Polygon(c.reshape((-1, 2)), fill=True, facecolor=color_mask, edgecolor='w', linewidth=1.2, alpha=0.5) ax.add_patch(polygon) if keypoints is not None and len(keypoints) > i: kps = keypoints[i] plt.autoscale(False) for l in range(len(kp_lines)): i1 = kp_lines[l][0] i2 = kp_lines[l][1] if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh: x = [kps[0, i1], kps[0, i2]] y = [kps[1, i1], kps[1, i2]] line = ax.plot(x, y) plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7) if kps[2, i1] > kp_thresh: ax.plot(kps[0, i1], kps[1, i1], '.', color=colors[l], markersize=3.0, alpha=0.7) if kps[2, i2] > kp_thresh: ax.plot(kps[0, i2], kps[1, i2], '.', color=colors[l], markersize=3.0, alpha=0.7) mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] + kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0 sc_mid_shoulder = np.minimum(kps[2, dataset_keypoints.index('right_shoulder')], kps[2, dataset_keypoints.index('left_shoulder')]) mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] + kps[:2, dataset_keypoints.index('left_hip')]) / 2.0 sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')], kps[2, dataset_keypoints.index('left_hip')]) if sc_mid_shoulder > kp_thresh and kps[2, dataset_keypoints.index('nose')] > kp_thresh: x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]] y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]] line = ax.plot(x, y) plt.setp(line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7) if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh: x = [mid_shoulder[0], mid_hip[0]] y = [mid_shoulder[1], mid_hip[1]] line = ax.plot(x, y) plt.setp(line, color=colors[len(kp_lines) + 1], linewidth=1.0, alpha=0.7) output_name = os.path.basename(im_name) + '.' + ext fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi) plt.close('all')
Detectron.pytorch
positive
def search_lat_lon(self, BOX, nrows=None): is_indexbox(BOX) log.debug('Argo index searching for lat/lon in BOX=%s ...' % BOX) <DeepExtract> def read_csv(input_file, nrows=None): if nrows is not None: buf = io.BytesIO() n = 0 for line in input_file: n += 1 buf.write(line) if n >= nrows + 8 + 1: break buf.seek(0) return read_csv(buf, nrows=None) this_table = csv.read_csv(input_file, read_options=csv.ReadOptions(use_threads=True, skip_rows=8), convert_options=csv.ConvertOptions(column_types={'date': pa.timestamp('s'), 'date_update': pa.timestamp('s')}, timestamp_parsers=['%Y%m%d%H%M%S'])) return this_table def csv2index(obj, origin): index_file = origin.split(self.fs['src'].fs.sep)[-1] index = read_csv(obj, nrows=nrows) check_index_cols(index.column_names, convention=index_file.split('.')[0]) log.debug("Argo index file loaded with pyarrow read_csv. src='%s'" % origin) return index if not hasattr(self, 'index') or force: this_path = self.index_path if nrows is not None: this_path = this_path + '/local' + '#%i.%s' % (nrows, self.ext) else: this_path = this_path + '/local.%s' % self.ext if self.cache and self.fs['client'].exists(this_path): log.debug("Index already in memory as pyarrow table, loading... src='%s'" % this_path) self.index = self._read(self.fs['client'].fs, this_path, fmt=self.ext) self.index_path_cache = this_path else: log.debug('Load index from scratch (nrows=%s) ...' % nrows) if self.fs['src'].exists(self.index_path + '.gz'): with self.fs['src'].open(self.index_path + '.gz', 'rb') as fg: with gzip.open(fg) as f: self.index = csv2index(f, self.index_path + '.gz') else: with self.fs['src'].open(self.index_path, 'rb') as f: self.index = csv2index(f, self.index_path) if self.cache and self.index.shape[0] > 0: self._write(self.fs['client'], this_path, self.index, fmt=self.ext) self.index = self._read(self.fs['client'].fs, this_path) self.index_path_cache = this_path log.debug("Index saved in cache as pyarrow table. dest='%s'" % this_path) if self.N_RECORDS == 0: raise DataNotFound('No data found in the index') elif nrows is not None and self.N_RECORDS != nrows: self.index = self.index[0:nrows - 1] return self </DeepExtract> self.search_type = {'BOX': BOX} filt = [] filt.append(pa.compute.greater_equal(self.index['longitude'], BOX[0])) filt.append(pa.compute.less_equal(self.index['longitude'], BOX[1])) filt.append(pa.compute.greater_equal(self.index['latitude'], BOX[2])) filt.append(pa.compute.less_equal(self.index['latitude'], BOX[3])) <DeepExtract> if version.parse(pa.__version__) < version.parse('7.0'): filt = [i.to_pylist() for i in filt] if 'and' == 'or': self.search_filter = np.logical_or.reduce(filt) elif 'and' == 'and': self.search_filter = np.logical_and.reduce(filt) </DeepExtract> <DeepExtract> this_path = self.search_path if nrows is not None: this_path = this_path + '/local' + '#%i.%s' % (nrows, self.ext) else: this_path = this_path + '/local.%s' % self.ext if self.cache and self.fs['client'].exists(this_path): log.debug("Search results already in memory as pyarrow table, loading... src='%s'" % this_path) self.search = self._read(self.fs['client'].fs, this_path, fmt=self.ext) self.search_path_cache.commit(this_path) else: log.debug('Compute search from scratch (nrows=%s) ...' % nrows) this_filter = np.nonzero(self.search_filter)[0] n_match = this_filter.shape[0] if nrows is not None and n_match > 0: self.search = self.index.take(this_filter.take(range(np.min([nrows, n_match])))) else: self.search = self.index.filter(self.search_filter) log.debug('Found %i/%i matches' % (self.search.shape[0], self.index.shape[0])) if self.cache and self.search.shape[0] > 0: self._write(self.fs['client'], this_path, self.search, fmt=self.ext) self.search = self._read(self.fs['client'].fs, this_path) self.search_path_cache.commit(this_path) log.debug("Search results saved in cache as pyarrow table. dest='%s'" % this_path) return self </DeepExtract> return self
def search_lat_lon(self, BOX, nrows=None): is_indexbox(BOX) log.debug('Argo index searching for lat/lon in BOX=%s ...' % BOX) def read_csv(input_file, nrows=None): if nrows is not None: buf = io.BytesIO() n = 0 for line in input_file: n += 1 buf.write(line) if n >= nrows + 8 + 1: break buf.seek(0) return read_csv(buf, nrows=None) this_table = csv.read_csv(input_file, read_options=csv.ReadOptions(use_threads=True, skip_rows=8), convert_options=csv.ConvertOptions(column_types={'date': pa.timestamp('s'), 'date_update': pa.timestamp('s')}, timestamp_parsers=['%Y%m%d%H%M%S'])) return this_table def csv2index(obj, origin): index_file = origin.split(self.fs['src'].fs.sep)[-1] index = read_csv(obj, nrows=nrows) check_index_cols(index.column_names, convention=index_file.split('.')[0]) log.debug("Argo index file loaded with pyarrow read_csv. src='%s'" % origin) return index if not hasattr(self, 'index') or force: this_path = self.index_path if nrows is not None: this_path = this_path + '/local' + '#%i.%s' % (nrows, self.ext) else: this_path = this_path + '/local.%s' % self.ext if self.cache and self.fs['client'].exists(this_path): log.debug("Index already in memory as pyarrow table, loading... src='%s'" % this_path) self.index = self._read(self.fs['client'].fs, this_path, fmt=self.ext) self.index_path_cache = this_path else: log.debug('Load index from scratch (nrows=%s) ...' % nrows) if self.fs['src'].exists(self.index_path + '.gz'): with self.fs['src'].open(self.index_path + '.gz', 'rb') as fg: with gzip.open(fg) as f: self.index = csv2index(f, self.index_path + '.gz') else: with self.fs['src'].open(self.index_path, 'rb') as f: self.index = csv2index(f, self.index_path) if self.cache and self.index.shape[0] > 0: self._write(self.fs['client'], this_path, self.index, fmt=self.ext) self.index = self._read(self.fs['client'].fs, this_path) self.index_path_cache = this_path log.debug("Index saved in cache as pyarrow table. dest='%s'" % this_path) if self.N_RECORDS == 0: raise DataNotFound('No data found in the index') elif nrows is not None and self.N_RECORDS != nrows: self.index = self.index[0:nrows - 1] return self self.search_type = {'BOX': BOX} filt = [] filt.append(pa.compute.greater_equal(self.index['longitude'], BOX[0])) filt.append(pa.compute.less_equal(self.index['longitude'], BOX[1])) filt.append(pa.compute.greater_equal(self.index['latitude'], BOX[2])) filt.append(pa.compute.less_equal(self.index['latitude'], BOX[3])) if version.parse(pa.__version__) < version.parse('7.0'): filt = [i.to_pylist() for i in filt] if 'and' == 'or': self.search_filter = np.logical_or.reduce(filt) elif 'and' == 'and': self.search_filter = np.logical_and.reduce(filt) this_path = self.search_path if nrows is not None: this_path = this_path + '/local' + '#%i.%s' % (nrows, self.ext) else: this_path = this_path + '/local.%s' % self.ext if self.cache and self.fs['client'].exists(this_path): log.debug("Search results already in memory as pyarrow table, loading... src='%s'" % this_path) self.search = self._read(self.fs['client'].fs, this_path, fmt=self.ext) self.search_path_cache.commit(this_path) else: log.debug('Compute search from scratch (nrows=%s) ...' % nrows) this_filter = np.nonzero(self.search_filter)[0] n_match = this_filter.shape[0] if nrows is not None and n_match > 0: self.search = self.index.take(this_filter.take(range(np.min([nrows, n_match])))) else: self.search = self.index.filter(self.search_filter) log.debug('Found %i/%i matches' % (self.search.shape[0], self.index.shape[0])) if self.cache and self.search.shape[0] > 0: self._write(self.fs['client'], this_path, self.search, fmt=self.ext) self.search = self._read(self.fs['client'].fs, this_path) self.search_path_cache.commit(this_path) log.debug("Search results saved in cache as pyarrow table. dest='%s'" % this_path) return self return self
argopy
positive
def _get_elem_and_compare_to_old_value(self, obj, path_for_err_reporting, expected_old_value, elem=None, action=None): try: if action == GET: current_old_value = obj[elem] elif action == GETATTR: current_old_value = getattr(obj, elem) else: raise DeltaError(INVALID_ACTION_WHEN_CALLING_GET_ELEM.format(action)) except (KeyError, IndexError, AttributeError, IndexError, TypeError) as e: current_old_value = not_found if isinstance(path_for_err_reporting, (list, tuple)): path_for_err_reporting = '.'.join([i[0] for i in path_for_err_reporting]) if self.verify_symmetry: <DeepExtract> if self.log_errors: getattr(logger, level)(VERIFICATION_MSG.format(path_for_err_reporting, expected_old_value, current_old_value, e)) if self.raise_errors: raise DeltaError(VERIFICATION_MSG.format(path_for_err_reporting, expected_old_value, current_old_value, e)) </DeepExtract> else: <DeepExtract> if self.log_errors: getattr(logger, level)(UNABLE_TO_GET_PATH_MSG.format(path_for_err_reporting)) if self.raise_errors: raise DeltaError(UNABLE_TO_GET_PATH_MSG.format(path_for_err_reporting)) </DeepExtract> return current_old_value
def _get_elem_and_compare_to_old_value(self, obj, path_for_err_reporting, expected_old_value, elem=None, action=None): try: if action == GET: current_old_value = obj[elem] elif action == GETATTR: current_old_value = getattr(obj, elem) else: raise DeltaError(INVALID_ACTION_WHEN_CALLING_GET_ELEM.format(action)) except (KeyError, IndexError, AttributeError, IndexError, TypeError) as e: current_old_value = not_found if isinstance(path_for_err_reporting, (list, tuple)): path_for_err_reporting = '.'.join([i[0] for i in path_for_err_reporting]) if self.verify_symmetry: if self.log_errors: getattr(logger, level)(VERIFICATION_MSG.format(path_for_err_reporting, expected_old_value, current_old_value, e)) if self.raise_errors: raise DeltaError(VERIFICATION_MSG.format(path_for_err_reporting, expected_old_value, current_old_value, e)) else: if self.log_errors: getattr(logger, level)(UNABLE_TO_GET_PATH_MSG.format(path_for_err_reporting)) if self.raise_errors: raise DeltaError(UNABLE_TO_GET_PATH_MSG.format(path_for_err_reporting)) return current_old_value
deepdiff
positive
@pytest.mark.enabled_repositories def test_enabled_repositories(shell, system_release): """Verify, that the EUS repositories are enabled after conversion""" enabled_repos = shell('yum repolist').output try: if 'redhat-8.4' in system_release or 'redhat-8.6' in system_release: if os.path.exists('/non_eus_repos_used'): <DeepExtract> baseos_repo = 'rhel-8-for-x86_64-baseos-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos </DeepExtract> else: <DeepExtract> baseos_repo = 'rhel-8-for-x86_64-baseos-eus-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-eus-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos </DeepExtract> elif 'redhat-8.5' in system_release: <DeepExtract> baseos_repo = 'rhel-8-for-x86_64-baseos-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos </DeepExtract> elif 'redhat-7.9' in system_release: assert 'rhel-7-server-rpms/7Server/x86_64' in enabled_repos finally: shell('subscription-manager unregister')
@pytest.mark.enabled_repositories def test_enabled_repositories(shell, system_release): """Verify, that the EUS repositories are enabled after conversion""" enabled_repos = shell('yum repolist').output try: if 'redhat-8.4' in system_release or 'redhat-8.6' in system_release: if os.path.exists('/non_eus_repos_used'): baseos_repo = 'rhel-8-for-x86_64-baseos-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos else: baseos_repo = 'rhel-8-for-x86_64-baseos-eus-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-eus-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos elif 'redhat-8.5' in system_release: baseos_repo = 'rhel-8-for-x86_64-baseos-rpms' appstream_repo = 'rhel-8-for-x86_64-appstream-rpms' assert baseos_repo in enabled_repos assert appstream_repo in enabled_repos elif 'redhat-7.9' in system_release: assert 'rhel-7-server-rpms/7Server/x86_64' in enabled_repos finally: shell('subscription-manager unregister')
convert2rhel
positive
def parse_fence_header(header): <DeepExtract> fence_char = '`' if header[0] == '`' else '~' </DeepExtract> items = header.strip(f' {fence_char}').split(',') lang = items[0] def get_kv(item): vals = item.split('=') if len(vals) == 1: return (vals[0], 'yes') return vals args = dict((get_kv(item) for item in items[1:])) return (lang, args)
def parse_fence_header(header): fence_char = '`' if header[0] == '`' else '~' items = header.strip(f' {fence_char}').split(',') lang = items[0] def get_kv(item): vals = item.split('=') if len(vals) == 1: return (vals[0], 'yes') return vals args = dict((get_kv(item) for item in items[1:])) return (lang, args)
elsie
positive
def update_curve(c, dt): <DeepExtract> if dt == 0 and c.counter == 0: rate = self.velocity_factor * self.dt_calculate c.counter += 1 else: rate = dt * self.velocity_factor if dt > 0: c.counter = 0 rate = rate </DeepExtract> c.become(self.get_sin_graph(self.t_offset + rate)) self.t_offset += rate
def update_curve(c, dt): if dt == 0 and c.counter == 0: rate = self.velocity_factor * self.dt_calculate c.counter += 1 else: rate = dt * self.velocity_factor if dt > 0: c.counter = 0 rate = rate c.become(self.get_sin_graph(self.t_offset + rate)) self.t_offset += rate
AnimationsWithManim
positive
@pytest.fixture(params=['i', 'ii']) def data_add_points_until_main_model_fully_linear(request, criterion): <DeepExtract> with open(f"{TEST_FIXTURES_DIR / f'add_points_until_main_model_fully_linear_{request.param}.yaml'}") as file: data = yaml.full_load(file) test_data = data </DeepExtract> history = LeastSquaresHistory() n = 3 n_modelpoints = test_data['n_modelpoints'] history.add_entries(np.array(test_data['history_x'])[:-(n - n_modelpoints)], np.array(test_data['history_criterion'])[:-(n - n_modelpoints)]) MainModel = namedtuple('MainModel', ['linear_terms', 'square_terms']) main_model = MainModel(linear_terms=np.array(test_data['linear_terms']), square_terms=np.array(test_data['square_terms'])) index_best_x = test_data['index_best_x'] x_accepted = test_data['history_x'][index_best_x] inputs_dict = {'history': history, 'main_model': main_model, 'model_improving_points': np.array(test_data['model_improving_points']), 'model_indices': np.array(test_data['model_indices']), 'x_accepted': np.array(x_accepted), 'n_modelpoints': n_modelpoints, 'delta': test_data['delta'], 'criterion': criterion, 'lower_bounds': None, 'upper_bounds': None} expected_dict = {'model_indices': test_data['model_indices_expected'], 'history_x': test_data['history_x_expected']} return (inputs_dict, expected_dict)
@pytest.fixture(params=['i', 'ii']) def data_add_points_until_main_model_fully_linear(request, criterion): with open(f"{TEST_FIXTURES_DIR / f'add_points_until_main_model_fully_linear_{request.param}.yaml'}") as file: data = yaml.full_load(file) test_data = data history = LeastSquaresHistory() n = 3 n_modelpoints = test_data['n_modelpoints'] history.add_entries(np.array(test_data['history_x'])[:-(n - n_modelpoints)], np.array(test_data['history_criterion'])[:-(n - n_modelpoints)]) MainModel = namedtuple('MainModel', ['linear_terms', 'square_terms']) main_model = MainModel(linear_terms=np.array(test_data['linear_terms']), square_terms=np.array(test_data['square_terms'])) index_best_x = test_data['index_best_x'] x_accepted = test_data['history_x'][index_best_x] inputs_dict = {'history': history, 'main_model': main_model, 'model_improving_points': np.array(test_data['model_improving_points']), 'model_indices': np.array(test_data['model_indices']), 'x_accepted': np.array(x_accepted), 'n_modelpoints': n_modelpoints, 'delta': test_data['delta'], 'criterion': criterion, 'lower_bounds': None, 'upper_bounds': None} expected_dict = {'model_indices': test_data['model_indices_expected'], 'history_x': test_data['history_x_expected']} return (inputs_dict, expected_dict)
estimagic
positive
def response_header_is_equal_to(response, header_name, value): """ """ <DeepExtract> result = response.headers.get(header_name) if not result: fail('Match not found at <{headers}> for <{header_name}>'.format(headers=response.headers, header_name=header_name)) actual_value = result </DeepExtract> assert_that(actual_value).is_equal_to(value)
def response_header_is_equal_to(response, header_name, value): """ """ result = response.headers.get(header_name) if not result: fail('Match not found at <{headers}> for <{header_name}>'.format(headers=response.headers, header_name=header_name)) actual_value = result assert_that(actual_value).is_equal_to(value)
behave-restful
positive
def _unschedule(self, coro): """Unschedule a coroutine. Unprime any pending triggers""" if coro in self._pending_coros: assert not coro.has_started() self._pending_coros.remove(coro) coro.close() return trigger = coro._trigger if trigger is not None: coro._trigger = None if coro in self._trigger2coros.setdefault(trigger, []): self._trigger2coros[trigger].remove(coro) if not self._trigger2coros[trigger]: trigger.unprime() del self._trigger2coros[trigger] assert self._test is not None if coro is self._test: if _debug: self.log.debug(f'Unscheduling test {coro}') if not self._terminate: self._terminate = True <DeepExtract> items = list(self._trigger2coros.items()) for (trigger, waiting) in items[::-1]: for coro in waiting: if _debug: self.log.debug('Killing %s' % str(coro)) coro.kill() for coro in self._scheduling: if _debug: self.log.debug('Killing %s' % str(coro)) coro.kill() for task in self._pending_coros: task.kill() if self._main_thread is not threading.current_thread(): raise Exception('Cleanup() called outside of the main thread') for ext in self._pending_threads: self.log.warning('Waiting for %s to exit', ext.thread) </DeepExtract> elif Join(coro) in self._trigger2coros: <DeepExtract> if self._is_reacting: self._pending_triggers.append(Join(coro)) return if self._pending_triggers: raise InternalError('Expected all triggers to be handled but found {}'.format(self._pending_triggers)) self._is_reacting = True try: self._event_loop(Join(coro)) finally: self._is_reacting = False </DeepExtract> else: try: coro._outcome.get() except (TestComplete, AssertionError) as e: coro.log.info('Test stopped by this forked coroutine') e = remove_traceback_frames(e, ['_unschedule', 'get']) <DeepExtract> if self._test._outcome is not None: raise InternalError('Outcome already has a value, but is being set again.') outcome = outcomes.Error(e) if _debug: self._test.log.debug(f'outcome forced to {outcome}') self._test._outcome = outcome self._unschedule(self._test) </DeepExtract> except BaseException as e: coro.log.error('Exception raised by this forked coroutine') e = remove_traceback_frames(e, ['_unschedule', 'get']) warnings.warn('"Unwatched" tasks that throw exceptions will not cause the test to fail. See issue #2664 for more details.', FutureWarning) <DeepExtract> if self._test._outcome is not None: raise InternalError('Outcome already has a value, but is being set again.') outcome = outcomes.Error(e) if _debug: self._test.log.debug(f'outcome forced to {outcome}') self._test._outcome = outcome self._unschedule(self._test) </DeepExtract>
def _unschedule(self, coro): """Unschedule a coroutine. Unprime any pending triggers""" if coro in self._pending_coros: assert not coro.has_started() self._pending_coros.remove(coro) coro.close() return trigger = coro._trigger if trigger is not None: coro._trigger = None if coro in self._trigger2coros.setdefault(trigger, []): self._trigger2coros[trigger].remove(coro) if not self._trigger2coros[trigger]: trigger.unprime() del self._trigger2coros[trigger] assert self._test is not None if coro is self._test: if _debug: self.log.debug(f'Unscheduling test {coro}') if not self._terminate: self._terminate = True items = list(self._trigger2coros.items()) for (trigger, waiting) in items[::-1]: for coro in waiting: if _debug: self.log.debug('Killing %s' % str(coro)) coro.kill() for coro in self._scheduling: if _debug: self.log.debug('Killing %s' % str(coro)) coro.kill() for task in self._pending_coros: task.kill() if self._main_thread is not threading.current_thread(): raise Exception('Cleanup() called outside of the main thread') for ext in self._pending_threads: self.log.warning('Waiting for %s to exit', ext.thread) elif Join(coro) in self._trigger2coros: if self._is_reacting: self._pending_triggers.append(Join(coro)) return if self._pending_triggers: raise InternalError('Expected all triggers to be handled but found {}'.format(self._pending_triggers)) self._is_reacting = True try: self._event_loop(Join(coro)) finally: self._is_reacting = False else: try: coro._outcome.get() except (TestComplete, AssertionError) as e: coro.log.info('Test stopped by this forked coroutine') e = remove_traceback_frames(e, ['_unschedule', 'get']) if self._test._outcome is not None: raise InternalError('Outcome already has a value, but is being set again.') outcome = outcomes.Error(e) if _debug: self._test.log.debug(f'outcome forced to {outcome}') self._test._outcome = outcome self._unschedule(self._test) except BaseException as e: coro.log.error('Exception raised by this forked coroutine') e = remove_traceback_frames(e, ['_unschedule', 'get']) warnings.warn('"Unwatched" tasks that throw exceptions will not cause the test to fail. See issue #2664 for more details.', FutureWarning) if self._test._outcome is not None: raise InternalError('Outcome already has a value, but is being set again.') outcome = outcomes.Error(e) if _debug: self._test.log.debug(f'outcome forced to {outcome}') self._test._outcome = outcome self._unschedule(self._test) </DeepExtract>
cocotb
positive
def forward(self, x1, x2, out_size): assert x1.shape[:2] == x2.shape[:2] assert len(out_size) == 2 <DeepExtract> if x1.shape[-2:] == out_size: x1 = x1 elif x1.shape[-2:] < out_size: x1 = F.interpolate(x1, size=out_size, mode='nearest') else: assert x1.shape[-2] % out_size[-2] == 0 and x1.shape[-1] % out_size[-1] == 0 kernel_size = x1.shape[-1] // out_size[-1] x1 = F.max_pool2d(x1, kernel_size=kernel_size, stride=kernel_size) x1 = x1 </DeepExtract> <DeepExtract> if x2.shape[-2:] == out_size: x2 = x2 elif x2.shape[-2:] < out_size: x2 = F.interpolate(x2, size=out_size, mode='nearest') else: assert x2.shape[-2] % out_size[-2] == 0 and x2.shape[-1] % out_size[-1] == 0 kernel_size = x2.shape[-1] // out_size[-1] x2 = F.max_pool2d(x2, kernel_size=kernel_size, stride=kernel_size) x2 = x2 </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> if self.with_conv: x = self.conv_out(x) return x
def forward(self, x1, x2, out_size): assert x1.shape[:2] == x2.shape[:2] assert len(out_size) == 2 if x1.shape[-2:] == out_size: x1 = x1 elif x1.shape[-2:] < out_size: x1 = F.interpolate(x1, size=out_size, mode='nearest') else: assert x1.shape[-2] % out_size[-2] == 0 and x1.shape[-1] % out_size[-1] == 0 kernel_size = x1.shape[-1] // out_size[-1] x1 = F.max_pool2d(x1, kernel_size=kernel_size, stride=kernel_size) x1 = x1 if x2.shape[-2:] == out_size: x2 = x2 elif x2.shape[-2:] < out_size: x2 = F.interpolate(x2, size=out_size, mode='nearest') else: assert x2.shape[-2] % out_size[-2] == 0 and x2.shape[-1] % out_size[-1] == 0 kernel_size = x2.shape[-1] // out_size[-1] x2 = F.max_pool2d(x2, kernel_size=kernel_size, stride=kernel_size) x2 = x2 raise NotImplementedError if self.with_conv: x = self.conv_out(x) return x
DetectoRS
positive
def get_symbol(self, cfg, is_train=True): """ return a generated symbol, it also need to be assigned to self.sym """ num_classes = cfg.dataset.NUM_CLASSES if is_train: <DeepExtract> data = mx.symbol.Variable(name='data') seg_cls_gt = mx.symbol.Variable(name='label') conv_feat = self.get_resnet_conv(data) fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0) fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0) fc6 = mx.symbol.Convolution(data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name='fc6', bias=fc6_bias, weight=fc6_weight, workspace=self.workspace) relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6') score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0) score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0) score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name='score', bias=score_bias, weight=score_weight, workspace=self.workspace) upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True, name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace) croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score') softmax = mx.symbol.SoftmaxOutput(data=croped_score, label=seg_cls_gt, normalization='valid', multi_output=True, use_ignore=True, ignore_label=255, name='softmax') self.sym = softmax </DeepExtract> else: <DeepExtract> data = mx.symbol.Variable(name='data') conv_feat = self.get_resnet_conv(data) fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0) fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0) fc6 = mx.symbol.Convolution(data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name='fc6', bias=fc6_bias, weight=fc6_weight, workspace=self.workspace) relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6') score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0) score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0) score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name='score', bias=score_bias, weight=score_weight, workspace=self.workspace) upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True, name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace) croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score') softmax = mx.symbol.SoftmaxOutput(data=croped_score, normalization='valid', multi_output=True, use_ignore=True, ignore_label=255, name='softmax') self.sym = softmax </DeepExtract> return self.sym
def get_symbol(self, cfg, is_train=True): """ return a generated symbol, it also need to be assigned to self.sym """ num_classes = cfg.dataset.NUM_CLASSES if is_train: data = mx.symbol.Variable(name='data') seg_cls_gt = mx.symbol.Variable(name='label') conv_feat = self.get_resnet_conv(data) fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0) fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0) fc6 = mx.symbol.Convolution(data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name='fc6', bias=fc6_bias, weight=fc6_weight, workspace=self.workspace) relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6') score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0) score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0) score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name='score', bias=score_bias, weight=score_weight, workspace=self.workspace) upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True, name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace) croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score') softmax = mx.symbol.SoftmaxOutput(data=croped_score, label=seg_cls_gt, normalization='valid', multi_output=True, use_ignore=True, ignore_label=255, name='softmax') self.sym = softmax else: data = mx.symbol.Variable(name='data') conv_feat = self.get_resnet_conv(data) fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0) fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0) fc6 = mx.symbol.Convolution(data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name='fc6', bias=fc6_bias, weight=fc6_weight, workspace=self.workspace) relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6') score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0) score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0) score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name='score', bias=score_bias, weight=score_weight, workspace=self.workspace) upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True, name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace) croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score') softmax = mx.symbol.SoftmaxOutput(data=croped_score, normalization='valid', multi_output=True, use_ignore=True, ignore_label=255, name='softmax') self.sym = softmax return self.sym
Deformable-ConvNets
positive
def getMap(root, i, map): if not root: return if map[i][0] == None: map[i][0] = root map[i][1] = root <DeepExtract> if not root.left: return if map[i + 1][0] == None: map[i + 1][0] = root.left map[i + 1][1] = root.left getMap(root.left.left, i + 1 + 1, map) getMap(root.left.right, i + 1 + 1, map) </DeepExtract> <DeepExtract> if not root.right: return if map[i + 1][0] == None: map[i + 1][0] = root.right map[i + 1][1] = root.right getMap(root.right.left, i + 1 + 1, map) getMap(root.right.right, i + 1 + 1, map) </DeepExtract>
def getMap(root, i, map): if not root: return if map[i][0] == None: map[i][0] = root map[i][1] = root if not root.left: return if map[i + 1][0] == None: map[i + 1][0] = root.left map[i + 1][1] = root.left getMap(root.left.left, i + 1 + 1, map) getMap(root.left.right, i + 1 + 1, map) if not root.right: return if map[i + 1][0] == None: map[i + 1][0] = root.right map[i + 1][1] = root.right getMap(root.right.left, i + 1 + 1, map) getMap(root.right.right, i + 1 + 1, map) </DeepExtract>
Data_Structures_and_Algorithms
positive
def file2sentences(input_path, data_type, output_path, paragraphs_path, max_length=100, min_length=5, max_plength=400, min_plength=5): if data_type.lower() == 'wiki10000': <DeepExtract> outfile = open(output_path, 'w', encoding='utf8') outfile_p = open(paragraphs_path, 'w', encoding='utf8') with codecs.open(input_path, encoding='utf8') as infile: data = json.load(infile) pid = 0 sid = 0 for k in data: paragraph_list = data[k] for p in paragraph_list: len_p = len(p.split()) if len_p >= max_plength or len_p <= min_plength: continue p = normalize_text(p) outfile_p.write(str(pid) + '\t' + p.rstrip().replace('\n', '\\n') + '\n') sentences = nltk.sent_tokenize(p) for s in sentences: len_s = len(s.split()) if len_s >= max_length or len_s <= min_length: continue s = normalize_text(s) outfile.write(str(pid) + '\t' + str(sid) + '\t' + s.rstrip().replace('\n', '\\n') + '\n') sid += 1 pid += 1 infile.close() outfile.close() outfile_p.close() </DeepExtract> elif data_type.lower() == 'squad': <DeepExtract> outfile = open(output_path, 'w', encoding='utf8') outfile_p = open(paragraphs_path, 'w', encoding='utf8') with codecs.open(input_path, 'r', encoding='utf8') as infile: source = json.load(infile) pid = 0 sid = 0 for article in tqdm(source['data']): for para in article['paragraphs']: context = para['context'] p = context len_p = len(p.split()) if len_p >= max_plength or len_p <= min_plength: continue p = normalize_text(p) outfile_p.write(str(pid) + '\t' + p.rstrip().replace('\n', '\\n') + '\n') sentences = nltk.sent_tokenize(context) for s in sentences: len_s = len(s.split()) if len_s >= max_length or len_s <= min_length: continue s = normalize_text(s) outfile.write(str(pid) + '\t' + str(sid) + '\t' + s.rstrip().replace('\n', '\\n') + '\n') sid += 1 pid += 1 infile.close() outfile.close() outfile_p.close() </DeepExtract> else: print('The data_type must be wiki10000 or squad...')
def file2sentences(input_path, data_type, output_path, paragraphs_path, max_length=100, min_length=5, max_plength=400, min_plength=5): if data_type.lower() == 'wiki10000': outfile = open(output_path, 'w', encoding='utf8') outfile_p = open(paragraphs_path, 'w', encoding='utf8') with codecs.open(input_path, encoding='utf8') as infile: data = json.load(infile) pid = 0 sid = 0 for k in data: paragraph_list = data[k] for p in paragraph_list: len_p = len(p.split()) if len_p >= max_plength or len_p <= min_plength: continue p = normalize_text(p) outfile_p.write(str(pid) + '\t' + p.rstrip().replace('\n', '\\n') + '\n') sentences = nltk.sent_tokenize(p) for s in sentences: len_s = len(s.split()) if len_s >= max_length or len_s <= min_length: continue s = normalize_text(s) outfile.write(str(pid) + '\t' + str(sid) + '\t' + s.rstrip().replace('\n', '\\n') + '\n') sid += 1 pid += 1 infile.close() outfile.close() outfile_p.close() elif data_type.lower() == 'squad': outfile = open(output_path, 'w', encoding='utf8') outfile_p = open(paragraphs_path, 'w', encoding='utf8') with codecs.open(input_path, 'r', encoding='utf8') as infile: source = json.load(infile) pid = 0 sid = 0 for article in tqdm(source['data']): for para in article['paragraphs']: context = para['context'] p = context len_p = len(p.split()) if len_p >= max_plength or len_p <= min_plength: continue p = normalize_text(p) outfile_p.write(str(pid) + '\t' + p.rstrip().replace('\n', '\\n') + '\n') sentences = nltk.sent_tokenize(context) for s in sentences: len_s = len(s.split()) if len_s >= max_length or len_s <= min_length: continue s = normalize_text(s) outfile.write(str(pid) + '\t' + str(sid) + '\t' + s.rstrip().replace('\n', '\\n') + '\n') sid += 1 pid += 1 infile.close() outfile.close() outfile_p.close() else: print('The data_type must be wiki10000 or squad...')
ACS-QG
positive
def _readse(pos): """Return interpretation of next bits as a signed exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ <DeepExtract> oldpos = pos try: while not self[pos]: pos += 1 except IndexError: raise ReadError('Read off end of bitstring trying to read code.') leadingzeros = pos - oldpos codenum = (1 << leadingzeros) - 1 if leadingzeros > 0: if pos + leadingzeros + 1 > self.len: raise ReadError('Read off end of bitstring trying to read code.') codenum += self._readuint(leadingzeros, pos + 1) pos += leadingzeros + 1 else: assert codenum == 0 pos += 1 (codenum, pos) = (codenum, pos) </DeepExtract> m = (codenum + 1) // 2 if not codenum % 2: return (-m, pos) else: return (m, pos)
def _readse(pos): """Return interpretation of next bits as a signed exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ oldpos = pos try: while not self[pos]: pos += 1 except IndexError: raise ReadError('Read off end of bitstring trying to read code.') leadingzeros = pos - oldpos codenum = (1 << leadingzeros) - 1 if leadingzeros > 0: if pos + leadingzeros + 1 > self.len: raise ReadError('Read off end of bitstring trying to read code.') codenum += self._readuint(leadingzeros, pos + 1) pos += leadingzeros + 1 else: assert codenum == 0 pos += 1 (codenum, pos) = (codenum, pos) m = (codenum + 1) // 2 if not codenum % 2: return (-m, pos) else: return (m, pos)
Arduino-Telescope-Control
positive
def test_azure_oauth_based_authentication_payload_is_correct(mocker): <DeepExtract> rp: RedshiftProperty = RedshiftProperty() rp.user_name = 'AzureDiamond' rp.password = 'hunter2' acp: AzureCredentialsProvider = AzureCredentialsProvider() rp.idp_tenant = 'example.com' rp.client_secret = 'peanut butter' rp.client_id = '1234' acp.add_parameter(rp) (acp, rp) = (acp, rp) </DeepExtract> MockRequest: MagicMock = MagicMock() MockRequest.raise_for_status.return_value = None MockRequest.json.return_value = {'access_token': 'mocked_token'} mocker.patch('requests.post', return_value=MockRequest) spy = mocker.spy(requests, 'post') acp.azure_oauth_based_authentication() assert spy.called assert spy.call_count == 1 assert spy.call_args[0][0] == 'https://login.microsoftonline.com/{tenant}/oauth2/token'.format(tenant=acp.idp_tenant) assert spy.call_args[1]['data']['username'] == acp.user_name assert spy.call_args[1]['data']['password'] == acp.password assert spy.call_args[1]['data']['client_secret'] == acp.client_secret assert spy.call_args[1]['data']['client_id'] == acp.client_id assert spy.call_args[1]['data']['resource'] == acp.client_id assert spy.call_args[1]['headers'] == azure_headers
def test_azure_oauth_based_authentication_payload_is_correct(mocker): rp: RedshiftProperty = RedshiftProperty() rp.user_name = 'AzureDiamond' rp.password = 'hunter2' acp: AzureCredentialsProvider = AzureCredentialsProvider() rp.idp_tenant = 'example.com' rp.client_secret = 'peanut butter' rp.client_id = '1234' acp.add_parameter(rp) (acp, rp) = (acp, rp) MockRequest: MagicMock = MagicMock() MockRequest.raise_for_status.return_value = None MockRequest.json.return_value = {'access_token': 'mocked_token'} mocker.patch('requests.post', return_value=MockRequest) spy = mocker.spy(requests, 'post') acp.azure_oauth_based_authentication() assert spy.called assert spy.call_count == 1 assert spy.call_args[0][0] == 'https://login.microsoftonline.com/{tenant}/oauth2/token'.format(tenant=acp.idp_tenant) assert spy.call_args[1]['data']['username'] == acp.user_name assert spy.call_args[1]['data']['password'] == acp.password assert spy.call_args[1]['data']['client_secret'] == acp.client_secret assert spy.call_args[1]['data']['client_id'] == acp.client_id assert spy.call_args[1]['data']['resource'] == acp.client_id assert spy.call_args[1]['headers'] == azure_headers
amazon-redshift-python-driver
positive
def action_select_next_with_popup(self, widget=None, event=None, previous=False): self.popup.show() <DeepExtract> if not self.get_windows(): return if time() - self.nextlist_time > 1.5 or self.nextlist is None: windows_stacked = self.screen.get_windows_stacked() windows = self.get_windows() snula = self.globals.settings['select_next_use_lastest_active'] rwl = self.globals.settings['reorder_window_list'] if snula and (not rwl): self.nextlist = [] minimized_list = [] for window in windows_stacked: if window in windows: if window.is_minimized(): minimized_list.append(self[window]) else: self.nextlist.append(self[window]) self.nextlist.reverse() self.nextlist.extend(minimized_list) else: topwindow = None for i in range(1, len(windows_stacked) + 1): if windows_stacked[-i] in windows and (not windows_stacked[-i].is_minimized()): topwindow = self[windows_stacked[-i]] break self.nextlist = windows if topwindow: while self.nextlist[0] != topwindow: window = self.nextlist.pop(0) self.nextlist.append(window) if self.nextlist[0].wnck.is_active(): if previous: window = self.nextlist.pop(-1) self.nextlist.insert(0, window) else: window = self.nextlist.pop(0) self.nextlist.append(window) elif previous: window = self.nextlist.pop(-1) self.nextlist.insert(0, window) else: window = self.nextlist.pop(0) self.nextlist.append(window) window = self.nextlist[0] self.nextlist_time = time() if not window in self: return self.popup.show() if self.globals.settings['select_next_activate_immediately'] and (not self.globals.settings['reorder_window_list']): window.action_select_window(widget, event) else: if self.scrollpeak_window: self.scrollpeak_window.item.set_highlighted(False) self.scrollpeak_window = window self.scrollpeak_window.item.set_highlighted(True) if self.scrollpeak_sid is not None: gobject.source_remove(self.scrollpeak_sid) if not keyboard_select: self.scrollpeak_sid = gobject.timeout_add(1500, self.scrollpeak_select) while gtk.events_pending(): gtk.main_iteration(False) self.scrollpeak_window.opacify() </DeepExtract> self.popup.hide_if_not_hovered(1500)
def action_select_next_with_popup(self, widget=None, event=None, previous=False): self.popup.show() if not self.get_windows(): return if time() - self.nextlist_time > 1.5 or self.nextlist is None: windows_stacked = self.screen.get_windows_stacked() windows = self.get_windows() snula = self.globals.settings['select_next_use_lastest_active'] rwl = self.globals.settings['reorder_window_list'] if snula and (not rwl): self.nextlist = [] minimized_list = [] for window in windows_stacked: if window in windows: if window.is_minimized(): minimized_list.append(self[window]) else: self.nextlist.append(self[window]) self.nextlist.reverse() self.nextlist.extend(minimized_list) else: topwindow = None for i in range(1, len(windows_stacked) + 1): if windows_stacked[-i] in windows and (not windows_stacked[-i].is_minimized()): topwindow = self[windows_stacked[-i]] break self.nextlist = windows if topwindow: while self.nextlist[0] != topwindow: window = self.nextlist.pop(0) self.nextlist.append(window) if self.nextlist[0].wnck.is_active(): if previous: window = self.nextlist.pop(-1) self.nextlist.insert(0, window) else: window = self.nextlist.pop(0) self.nextlist.append(window) elif previous: window = self.nextlist.pop(-1) self.nextlist.insert(0, window) else: window = self.nextlist.pop(0) self.nextlist.append(window) window = self.nextlist[0] self.nextlist_time = time() if not window in self: return self.popup.show() if self.globals.settings['select_next_activate_immediately'] and (not self.globals.settings['reorder_window_list']): window.action_select_window(widget, event) else: if self.scrollpeak_window: self.scrollpeak_window.item.set_highlighted(False) self.scrollpeak_window = window self.scrollpeak_window.item.set_highlighted(True) if self.scrollpeak_sid is not None: gobject.source_remove(self.scrollpeak_sid) if not keyboard_select: self.scrollpeak_sid = gobject.timeout_add(1500, self.scrollpeak_select) while gtk.events_pending(): gtk.main_iteration(False) self.scrollpeak_window.opacify() self.popup.hide_if_not_hovered(1500)
dockbarx
positive