before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def read_calib_from_video(calib_root_dir): """ Read calibration for camera 2 from video calib files. there are calib_cam_to_cam and calib_velo_to_cam under the calib_root_dir """ data = {} <DeepExtract> data = {} with open(os.path.join(calib_root_dir, 'calib_cam_to_cam.txt'), 'r') as f: for line in f.readlines(): line = line.rstrip() if len(line) == 0: continue (key, value) = line.split(':', 1) try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass cam2cam = data </DeepExtract> <DeepExtract> data = {} with open(os.path.join(calib_root_dir, 'calib_velo_to_cam.txt'), 'r') as f: for line in f.readlines(): line = line.rstrip() if len(line) == 0: continue (key, value) = line.split(':', 1) try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass velo2cam = data </DeepExtract> Tr_velo_to_cam = np.zeros((3, 4)) Tr_velo_to_cam[0:3, 0:3] = np.reshape(velo2cam['R'], [3, 3]) Tr_velo_to_cam[:, 3] = velo2cam['T'] data['Tr_velo_to_cam'] = np.reshape(Tr_velo_to_cam, [12]) data['R0_rect'] = cam2cam['R_rect_00'] data['P2'] = cam2cam['P_rect_02'] data['P3'] = cam2cam['P_rect_03'] return data
def read_calib_from_video(calib_root_dir): """ Read calibration for camera 2 from video calib files. there are calib_cam_to_cam and calib_velo_to_cam under the calib_root_dir """ data = {} data = {} with open(os.path.join(calib_root_dir, 'calib_cam_to_cam.txt'), 'r') as f: for line in f.readlines(): line = line.rstrip() if len(line) == 0: continue (key, value) = line.split(':', 1) try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass cam2cam = data data = {} with open(os.path.join(calib_root_dir, 'calib_velo_to_cam.txt'), 'r') as f: for line in f.readlines(): line = line.rstrip() if len(line) == 0: continue (key, value) = line.split(':', 1) try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass velo2cam = data Tr_velo_to_cam = np.zeros((3, 4)) Tr_velo_to_cam[0:3, 0:3] = np.reshape(velo2cam['R'], [3, 3]) Tr_velo_to_cam[:, 3] = velo2cam['T'] data['Tr_velo_to_cam'] = np.reshape(Tr_velo_to_cam, [12]) data['R0_rect'] = cam2cam['R_rect_00'] data['P2'] = cam2cam['P_rect_02'] data['P3'] = cam2cam['P_rect_03'] return data
ebms_3dod
positive
def read(sock): try: if sock == self.server: <DeepExtract> (new_sock, address) = sock.accept() self.peers[new_sock] = Peer(new_sock, address) serve_address = self.peers[new_sock].address log.info('Accepted incoming connection from «%s»', serve_address) </DeepExtract> elif self.peers[sock].bolt_version: <DeepExtract> v = self.peers[sock].bolt_version chunked_data = b'' message_data = b'' chunk_size = -1 debug = [] while chunk_size != 0: chunk_header = sock.recv(2) if len(chunk_header) == 0: self.stop() return chunked_data += chunk_header (chunk_size,) = raw_unpack(UINT_16, chunk_header) if chunk_size > 0: chunk = sock.recv(chunk_size) chunked_data += chunk message_data += chunk else: chunk = b'' debug.append(' [%s] %s' % (h(chunk_header), h(chunk))) request = unpack(message_data) if self.script.match_request(request): log.debug('C: %s', message_repr(v, request)) elif self.script.match_auto_request(request): log.debug('C! %s', message_repr(v, request)) else: if self.script.lines: expected = message_repr(v, self.script.lines[0].message) else: expected = 'END OF SCRIPT' log.debug('C: %s', message_repr(v, request)) log.error('Message mismatch (expected <%s>, received <%s>)', expected, message_repr(v, request)) self.stop() raise SystemExit(EXIT_OFF_SCRIPT) responses = self.script.match_responses() if not responses and self.script.match_auto_request(request): if request.tag in (CLIENT[v].get('HELLO'), CLIENT[v].get('INIT')): responses = [Structure(SERVER[v]['SUCCESS'], {'connection_id': str(uuid4()), 'server': server_agents.get(v, 'Neo4j/9.99.999')})] elif request.tag == CLIENT[v].get('GOODBYE'): log.debug('S: <EXIT>') self.stop() raise SystemExit(EXIT_OK) elif request.tag == CLIENT[v]['RUN']: responses = [Structure(SERVER[v]['SUCCESS'], {'fields': []})] else: responses = [Structure(SERVER[v]['SUCCESS'], {})] for response in responses: if isinstance(response, Structure): data = pack(response) self.send_chunk(sock, data) self.send_chunk(sock) log.debug('S: %s', message_repr(v, Structure(response.tag, *response.fields))) elif isinstance(response, ExitCommand): self.stop() raise SystemExit(EXIT_OK) else: raise RuntimeError('Unknown response type %r' % (response,)) </DeepExtract> else: <DeepExtract> data = sock.recv(4) if data == BOLT: log.debug('C: <BOLT>') else: if data: log.error('C: <#?@!>') self.stop() return raw_data = sock.recv(16) (suggested_version_1,) = raw_unpack(INT_32, raw_data, 0) (suggested_version_2,) = raw_unpack(INT_32, raw_data, 4) (suggested_version_3,) = raw_unpack(INT_32, raw_data, 8) (suggested_version_4,) = raw_unpack(INT_32, raw_data, 12) client_requested_versions = [suggested_version_1, suggested_version_2, suggested_version_3, suggested_version_4] log.debug('C: <VERSION> [0x%08x, 0x%08x, 0x%08x, 0x%08x]' % tuple(client_requested_versions)) v = self.script.bolt_version if v not in client_requested_versions: raise RuntimeError('Script protocol version %r not offered by client' % v) response = raw_pack(INT_32, v) log.debug('S: <VERSION> 0x%08x' % v) self.peers[sock].bolt_version = v sock.send(response) </DeepExtract> except (KeyError, OSError): if self.running: raise
def read(sock): try: if sock == self.server: (new_sock, address) = sock.accept() self.peers[new_sock] = Peer(new_sock, address) serve_address = self.peers[new_sock].address log.info('Accepted incoming connection from «%s»', serve_address) elif self.peers[sock].bolt_version: v = self.peers[sock].bolt_version chunked_data = b'' message_data = b'' chunk_size = -1 debug = [] while chunk_size != 0: chunk_header = sock.recv(2) if len(chunk_header) == 0: self.stop() return chunked_data += chunk_header (chunk_size,) = raw_unpack(UINT_16, chunk_header) if chunk_size > 0: chunk = sock.recv(chunk_size) chunked_data += chunk message_data += chunk else: chunk = b'' debug.append(' [%s] %s' % (h(chunk_header), h(chunk))) request = unpack(message_data) if self.script.match_request(request): log.debug('C: %s', message_repr(v, request)) elif self.script.match_auto_request(request): log.debug('C! %s', message_repr(v, request)) else: if self.script.lines: expected = message_repr(v, self.script.lines[0].message) else: expected = 'END OF SCRIPT' log.debug('C: %s', message_repr(v, request)) log.error('Message mismatch (expected <%s>, received <%s>)', expected, message_repr(v, request)) self.stop() raise SystemExit(EXIT_OFF_SCRIPT) responses = self.script.match_responses() if not responses and self.script.match_auto_request(request): if request.tag in (CLIENT[v].get('HELLO'), CLIENT[v].get('INIT')): responses = [Structure(SERVER[v]['SUCCESS'], {'connection_id': str(uuid4()), 'server': server_agents.get(v, 'Neo4j/9.99.999')})] elif request.tag == CLIENT[v].get('GOODBYE'): log.debug('S: <EXIT>') self.stop() raise SystemExit(EXIT_OK) elif request.tag == CLIENT[v]['RUN']: responses = [Structure(SERVER[v]['SUCCESS'], {'fields': []})] else: responses = [Structure(SERVER[v]['SUCCESS'], {})] for response in responses: if isinstance(response, Structure): data = pack(response) self.send_chunk(sock, data) self.send_chunk(sock) log.debug('S: %s', message_repr(v, Structure(response.tag, *response.fields))) elif isinstance(response, ExitCommand): self.stop() raise SystemExit(EXIT_OK) else: raise RuntimeError('Unknown response type %r' % (response,)) else: data = sock.recv(4) if data == BOLT: log.debug('C: <BOLT>') else: if data: log.error('C: <#?@!>') self.stop() return raw_data = sock.recv(16) (suggested_version_1,) = raw_unpack(INT_32, raw_data, 0) (suggested_version_2,) = raw_unpack(INT_32, raw_data, 4) (suggested_version_3,) = raw_unpack(INT_32, raw_data, 8) (suggested_version_4,) = raw_unpack(INT_32, raw_data, 12) client_requested_versions = [suggested_version_1, suggested_version_2, suggested_version_3, suggested_version_4] log.debug('C: <VERSION> [0x%08x, 0x%08x, 0x%08x, 0x%08x]' % tuple(client_requested_versions)) v = self.script.bolt_version if v not in client_requested_versions: raise RuntimeError('Script protocol version %r not offered by client' % v) response = raw_pack(INT_32, v) log.debug('S: <VERSION> 0x%08x' % v) self.peers[sock].bolt_version = v sock.send(response) except (KeyError, OSError): if self.running: raise
boltkit
positive
def auto_cancel_all_orders(self, symbol, countdownTime): check_should_not_none(symbol, 'symbol') check_should_not_none(symbol, 'countdownTime') builder = UrlParamsBuilder() builder.put_url('symbol', symbol) builder.put_url('countdownTime', countdownTime) <DeepExtract> request = RestApiRequest() request.method = 'POST' request.host = self.__server_url builder.put_url('recvWindow', 60000) builder.put_url('timestamp', str(get_current_timestamp() - 1000)) create_signature(self.__secret_key, builder) request.header.update({'Content-Type': 'application/json'}) request.header.update({'X-MBX-APIKEY': self.__api_key}) request.post_body = builder.post_map request.url = '/dapi/v1/countdownCancelAll' + '?' + builder.build_url() print('====== Request ======') print(request) PrintMix.print_data(request) print('=====================') request = request </DeepExtract> def parse(json_wrapper): result = CountdownCancelAll.json_parse(json_wrapper) return result request.json_parser = parse return request
def auto_cancel_all_orders(self, symbol, countdownTime): check_should_not_none(symbol, 'symbol') check_should_not_none(symbol, 'countdownTime') builder = UrlParamsBuilder() builder.put_url('symbol', symbol) builder.put_url('countdownTime', countdownTime) request = RestApiRequest() request.method = 'POST' request.host = self.__server_url builder.put_url('recvWindow', 60000) builder.put_url('timestamp', str(get_current_timestamp() - 1000)) create_signature(self.__secret_key, builder) request.header.update({'Content-Type': 'application/json'}) request.header.update({'X-MBX-APIKEY': self.__api_key}) request.post_body = builder.post_map request.url = '/dapi/v1/countdownCancelAll' + '?' + builder.build_url() print('====== Request ======') print(request) PrintMix.print_data(request) print('=====================') request = request def parse(json_wrapper): result = CountdownCancelAll.json_parse(json_wrapper) return result request.json_parser = parse return request
Binance_Futures_python
positive
def model_fn(self, features, labels, mode, params): """Build the model based on features, labels, and mode. Args: features: The features dictionary containing the data Tensor and the number of examples. labels: The labels Tensor resulting from calling the model. mode: A string indicating the training mode. params: A dictionary of hyperparameters. Returns: A tf.estimator.EstimatorSpec. """ del params is_training = mode == tf.estimator.ModeKeys.TRAIN eval_active = mode == tf.estimator.ModeKeys.EVAL is_predict = mode == tf.estimator.ModeKeys.PREDICT features = tf.transpose(features, [3, 0, 1, 2]) <DeepExtract> is_training = mode == tf.estimator.ModeKeys.TRAIN is_predict = mode == tf.estimator.ModeKeys.PREDICT steps_per_epoch = float(NUM_TRAIN_IMAGES) / self.hparams.train_batch_size num_total_steps = int(steps_per_epoch * self.hparams.num_epochs) if getattr(self.hparams, 'num_total_steps', None) is None: self.hparams.add_hparam('num_total_steps', num_total_steps) else: self.hparams.set_hparam('num_total_steps', num_total_steps) hparams = copy.deepcopy(self.hparams) if not is_training: hparams.set_hparam('use_aux_head', False) tf.logging.info('Amoeba net received hparams for {}:\n{}'.format('training' if is_training else 'eval', formatted_hparams(hparams))) (logits, end_points) = model_builder.build_network(features, LABEL_CLASSES, is_training, hparams) if not is_predict: labels = tf.one_hot(labels, LABEL_CLASSES) loss = model_builder.build_softmax_loss(logits, end_points, labels, label_smoothing=hparams.label_smoothing, add_summary=False) if is_training: flops = model_builder.compute_flops_per_example(hparams.train_batch_size) else: flops = model_builder.compute_flops_per_example(hparams.eval_batch_size) tf.logging.info('number of flops: {}'.format(flops)) self._calc_num_trainable_params() if is_predict: (loss, logits) = (None, logits) (loss, logits) = (loss, logits) </DeepExtract> if is_predict: predictions = {'logits': logits} if self.hparams.use_tpu: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) else: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) host_call = None train_op = None if is_training: global_step = tf.train.get_or_create_global_step() gs_t = tf.reshape(tf.cast(global_step, tf.int32), [1]) <DeepExtract> steps_per_epoch = NUM_TRAIN_IMAGES // self.hparams.train_batch_size lr_warmup_epochs = 0 if self.hparams.lr_decay_method == 'exponential': lr_warmup_epochs = self.hparams.lr_warmup_epochs learning_rate = model_builder.build_learning_rate(self.hparams.lr, self.hparams.lr_decay_method, global_step, total_steps=steps_per_epoch * self.hparams.num_epochs, decay_steps=steps_per_epoch * self.hparams.lr_num_epochs_per_decay, decay_factor=self.hparams.lr_decay_value, add_summary=False, warmup_steps=int(lr_warmup_epochs * steps_per_epoch)) learning_rate = tf.maximum(learning_rate, 0.0001 * self.hparams.lr, name='learning_rate') learning_rate = learning_rate </DeepExtract> <DeepExtract> if self.hparams.optimizer == 'sgd': tf.logging.info('Using SGD optimizer') optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) elif self.hparams.optimizer == 'momentum': tf.logging.info('Using Momentum optimizer') optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=self.hparams.momentum_rate) elif self.hparams.optimizer == 'rmsprop': tf.logging.info('Using RMSProp optimizer') optimizer = tf.train.RMSPropOptimizer(learning_rate, RMSPROP_DECAY, momentum=RMSPROP_MOMENTUM, epsilon=RMSPROP_EPSILON) else: tf.logging.fatal('Unknown optimizer:', self.hparams.optimizer) if self.hparams.use_tpu: optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) optimizer = optimizer </DeepExtract> update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): <DeepExtract> grads_and_vars = optimizer.compute_gradients(loss) if self.hparams.gradient_clipping_by_global_norm > 0.0: (g, v) = zip(*grads_and_vars) (g, _) = tf.clip_by_global_norm(g, self.hparams.gradient_clipping_by_global_norm) grads_and_vars = zip(g, v) train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) </DeepExtract> if self.hparams.moving_average_decay > 0: ema = tf.train.ExponentialMovingAverage(decay=self.hparams.moving_average_decay, num_updates=global_step) variables_to_average = tf.trainable_variables() + tf.moving_average_variables() with tf.control_dependencies([train_op]): with tf.name_scope('moving_average'): train_op = ema.apply(variables_to_average) lr_t = tf.reshape(learning_rate, [1]) host_call = None if self.hparams.enable_hostcall: def host_call_fn(gs, lr): gs = tf.cast(tf.reduce_mean(gs), tf.int64) with tf.contrib.summary.create_file_writer(self.model_dir).as_default(): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs) return tf.contrib.summary.all_summary_ops() host_call = (host_call_fn, [gs_t, lr_t]) eval_metrics = None eval_metric_ops = None if eval_active: def metric_fn(labels, logits): """Evaluation metric fn. Performed on CPU, do not reference TPU ops.""" predictions = tf.argmax(logits, axis=1) categorical_labels = labels top_1_accuracy = tf.metrics.accuracy(categorical_labels, predictions) in_top_5 = tf.cast(tf.nn.in_top_k(logits, categorical_labels, 5), tf.float32) top_5_accuracy = tf.metrics.mean(in_top_5) return {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy} eval_metrics = (metric_fn, [labels, logits]) <DeepExtract> predictions = tf.argmax(logits, axis=1) categorical_labels = labels top_1_accuracy = tf.metrics.accuracy(categorical_labels, predictions) in_top_5 = tf.cast(tf.nn.in_top_k(logits, categorical_labels, 5), tf.float32) top_5_accuracy = tf.metrics.mean(in_top_5) eval_metric_ops = {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy} </DeepExtract> if self.hparams.use_tpu: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, host_call=host_call, eval_metrics=eval_metrics) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
def model_fn(self, features, labels, mode, params): """Build the model based on features, labels, and mode. Args: features: The features dictionary containing the data Tensor and the number of examples. labels: The labels Tensor resulting from calling the model. mode: A string indicating the training mode. params: A dictionary of hyperparameters. Returns: A tf.estimator.EstimatorSpec. """ del params is_training = mode == tf.estimator.ModeKeys.TRAIN eval_active = mode == tf.estimator.ModeKeys.EVAL is_predict = mode == tf.estimator.ModeKeys.PREDICT features = tf.transpose(features, [3, 0, 1, 2]) is_training = mode == tf.estimator.ModeKeys.TRAIN is_predict = mode == tf.estimator.ModeKeys.PREDICT steps_per_epoch = float(NUM_TRAIN_IMAGES) / self.hparams.train_batch_size num_total_steps = int(steps_per_epoch * self.hparams.num_epochs) if getattr(self.hparams, 'num_total_steps', None) is None: self.hparams.add_hparam('num_total_steps', num_total_steps) else: self.hparams.set_hparam('num_total_steps', num_total_steps) hparams = copy.deepcopy(self.hparams) if not is_training: hparams.set_hparam('use_aux_head', False) tf.logging.info('Amoeba net received hparams for {}:\n{}'.format('training' if is_training else 'eval', formatted_hparams(hparams))) (logits, end_points) = model_builder.build_network(features, LABEL_CLASSES, is_training, hparams) if not is_predict: labels = tf.one_hot(labels, LABEL_CLASSES) loss = model_builder.build_softmax_loss(logits, end_points, labels, label_smoothing=hparams.label_smoothing, add_summary=False) if is_training: flops = model_builder.compute_flops_per_example(hparams.train_batch_size) else: flops = model_builder.compute_flops_per_example(hparams.eval_batch_size) tf.logging.info('number of flops: {}'.format(flops)) self._calc_num_trainable_params() if is_predict: (loss, logits) = (None, logits) (loss, logits) = (loss, logits) if is_predict: predictions = {'logits': logits} if self.hparams.use_tpu: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions) else: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) host_call = None train_op = None if is_training: global_step = tf.train.get_or_create_global_step() gs_t = tf.reshape(tf.cast(global_step, tf.int32), [1]) steps_per_epoch = NUM_TRAIN_IMAGES // self.hparams.train_batch_size lr_warmup_epochs = 0 if self.hparams.lr_decay_method == 'exponential': lr_warmup_epochs = self.hparams.lr_warmup_epochs learning_rate = model_builder.build_learning_rate(self.hparams.lr, self.hparams.lr_decay_method, global_step, total_steps=steps_per_epoch * self.hparams.num_epochs, decay_steps=steps_per_epoch * self.hparams.lr_num_epochs_per_decay, decay_factor=self.hparams.lr_decay_value, add_summary=False, warmup_steps=int(lr_warmup_epochs * steps_per_epoch)) learning_rate = tf.maximum(learning_rate, 0.0001 * self.hparams.lr, name='learning_rate') learning_rate = learning_rate if self.hparams.optimizer == 'sgd': tf.logging.info('Using SGD optimizer') optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) elif self.hparams.optimizer == 'momentum': tf.logging.info('Using Momentum optimizer') optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=self.hparams.momentum_rate) elif self.hparams.optimizer == 'rmsprop': tf.logging.info('Using RMSProp optimizer') optimizer = tf.train.RMSPropOptimizer(learning_rate, RMSPROP_DECAY, momentum=RMSPROP_MOMENTUM, epsilon=RMSPROP_EPSILON) else: tf.logging.fatal('Unknown optimizer:', self.hparams.optimizer) if self.hparams.use_tpu: optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) optimizer = optimizer update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): grads_and_vars = optimizer.compute_gradients(loss) if self.hparams.gradient_clipping_by_global_norm > 0.0: (g, v) = zip(*grads_and_vars) (g, _) = tf.clip_by_global_norm(g, self.hparams.gradient_clipping_by_global_norm) grads_and_vars = zip(g, v) train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) if self.hparams.moving_average_decay > 0: ema = tf.train.ExponentialMovingAverage(decay=self.hparams.moving_average_decay, num_updates=global_step) variables_to_average = tf.trainable_variables() + tf.moving_average_variables() with tf.control_dependencies([train_op]): with tf.name_scope('moving_average'): train_op = ema.apply(variables_to_average) lr_t = tf.reshape(learning_rate, [1]) host_call = None if self.hparams.enable_hostcall: def host_call_fn(gs, lr): gs = tf.cast(tf.reduce_mean(gs), tf.int64) with tf.contrib.summary.create_file_writer(self.model_dir).as_default(): with tf.contrib.summary.always_record_summaries(): tf.contrib.summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs) return tf.contrib.summary.all_summary_ops() host_call = (host_call_fn, [gs_t, lr_t]) eval_metrics = None eval_metric_ops = None if eval_active: def metric_fn(labels, logits): """Evaluation metric fn. Performed on CPU, do not reference TPU ops.""" predictions = tf.argmax(logits, axis=1) categorical_labels = labels top_1_accuracy = tf.metrics.accuracy(categorical_labels, predictions) in_top_5 = tf.cast(tf.nn.in_top_k(logits, categorical_labels, 5), tf.float32) top_5_accuracy = tf.metrics.mean(in_top_5) return {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy} eval_metrics = (metric_fn, [labels, logits]) predictions = tf.argmax(logits, axis=1) categorical_labels = labels top_1_accuracy = tf.metrics.accuracy(categorical_labels, predictions) in_top_5 = tf.cast(tf.nn.in_top_k(logits, categorical_labels, 5), tf.float32) top_5_accuracy = tf.metrics.mean(in_top_5) eval_metric_ops = {'top_1_accuracy': top_1_accuracy, 'top_5_accuracy': top_5_accuracy} if self.hparams.use_tpu: return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=loss, train_op=train_op, host_call=host_call, eval_metrics=eval_metrics) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
class-balanced-loss
positive
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False): """Convert a string to list of integers representing token-ids. For example, a sentence "I have a dog" may become tokenized into ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. Args: sentence: the sentence in bytes format to convert to token-ids. vocabulary: a dictionary mapping tokens to integers. tokenizer: a function to use to tokenize each sentence; if None, basic_tokenizer will be used. normalize_digits: Boolean; if true, all digits are replaced by 0s. Returns: a list of integers, the token-ids for the sentence. """ if tokenizer: words = tokenizer(sentence) else: <DeepExtract> words = [] for space_separated_fragment in sentence.strip().split(): l = _WORD_SPLIT.split(space_separated_fragment) words.extend(l) words = [w for w in words if w] </DeepExtract> if not normalize_digits: return [vocabulary.get(w, UNK_ID) for w in words] return [vocabulary.get(_DIGIT_RE.sub(b'0', w), UNK_ID) for w in words]
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False): """Convert a string to list of integers representing token-ids. For example, a sentence "I have a dog" may become tokenized into ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. Args: sentence: the sentence in bytes format to convert to token-ids. vocabulary: a dictionary mapping tokens to integers. tokenizer: a function to use to tokenize each sentence; if None, basic_tokenizer will be used. normalize_digits: Boolean; if true, all digits are replaced by 0s. Returns: a list of integers, the token-ids for the sentence. """ if tokenizer: words = tokenizer(sentence) else: words = [] for space_separated_fragment in sentence.strip().split(): l = _WORD_SPLIT.split(space_separated_fragment) words.extend(l) words = [w for w in words if w] if not normalize_digits: return [vocabulary.get(w, UNK_ID) for w in words] return [vocabulary.get(_DIGIT_RE.sub(b'0', w), UNK_ID) for w in words]
DeepAffinity
positive
def test(self): with open(self._join_data_path('test_file1.py')) as f: text1 = f.read() with open(self._join_data_path('test_file2.py')) as f: text2 = f.read() lexer = get_lexer_for_filename('data/test_file1.py', stripnl=False) raw_text_document1 = RawTextDocument(text1) raw_text_document2 = RawTextDocument(text2) file_diff = TwoWayFileDiffFactory().process(raw_text_document1, raw_text_document2) (document_model1, document_model2) = TextDocumentDiffModelFactory().process(file_diff) highlighted_text1 = HighlightedText(raw_text_document1, lexer) print('Document 1:') <DeepExtract> for text_block in document_model1: print('=' * 100) print(text_block) for text_fragment in text_block: margin = ' ' * 2 print(margin + '-' * 48) print(margin + ('\n' + margin).join(repr(text_fragment).splitlines())) if bool(text_fragment): line = '#' * 100 print(line) print(str(text_fragment).rstrip()) print(line) </DeepExtract> print('\nHighlighted Document 1:') highlighted_document1 = highlight_document(document_model1, highlighted_text1) <DeepExtract> for text_block in highlighted_document1: print('=' * 100) print(text_block) for text_fragment in text_block: margin = ' ' * 2 print(margin + '-' * 48) print(margin + ('\n' + margin).join(repr(text_fragment).splitlines())) if bool(text_fragment): line = '#' * 100 print(line) print(str(text_fragment).rstrip()) print(line) </DeepExtract>
def test(self): with open(self._join_data_path('test_file1.py')) as f: text1 = f.read() with open(self._join_data_path('test_file2.py')) as f: text2 = f.read() lexer = get_lexer_for_filename('data/test_file1.py', stripnl=False) raw_text_document1 = RawTextDocument(text1) raw_text_document2 = RawTextDocument(text2) file_diff = TwoWayFileDiffFactory().process(raw_text_document1, raw_text_document2) (document_model1, document_model2) = TextDocumentDiffModelFactory().process(file_diff) highlighted_text1 = HighlightedText(raw_text_document1, lexer) print('Document 1:') for text_block in document_model1: print('=' * 100) print(text_block) for text_fragment in text_block: margin = ' ' * 2 print(margin + '-' * 48) print(margin + ('\n' + margin).join(repr(text_fragment).splitlines())) if bool(text_fragment): line = '#' * 100 print(line) print(str(text_fragment).rstrip()) print(line) print('\nHighlighted Document 1:') highlighted_document1 = highlight_document(document_model1, highlighted_text1) for text_block in highlighted_document1: print('=' * 100) print(text_block) for text_fragment in text_block: margin = ' ' * 2 print(margin + '-' * 48) print(margin + ('\n' + margin).join(repr(text_fragment).splitlines())) if bool(text_fragment): line = '#' * 100 print(line) print(str(text_fragment).rstrip()) print(line) </DeepExtract>
CodeReview
positive
def print_summary(self, verbose=False, no_color=False): """Prints a summary of the validation process so far.""" buffer = StringIO() self.handler = OutputHandler(buffer, no_color) self.handler.write('\n<<GREEN>>Summary:').write('-' * 30) self.handler.write('%s Errors, %s Warnings, %s Notices' % (len(self.errors), len(self.warnings), len(self.notices))) if self.failed(): self.handler.write('<<BLUE>>Test failed! Errors:') for error in self.errors: <DeepExtract> output = ['\n', '<<RED>>Error:<<NORMAL>>\t', error['message']] if verbose: verbose_output = [] if error['description']: verbose_output.append(self._flatten_list(error['description'])) verbose_output.append('\tTier:\t%d' % error['tier']) files = error['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if error['line']: verbose_output.append('\tLine:\t%s' % error['line']) if error['column'] and error['column'] != 0: verbose_output.append('\tColumn:\t%d' % error['column']) if 'context' in error and error['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in error.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) </DeepExtract> for warning in self.warnings: <DeepExtract> output = ['\n', '<<YELLOW>>Warning:<<NORMAL>> ', warning['message']] if verbose: verbose_output = [] if warning['description']: verbose_output.append(self._flatten_list(warning['description'])) verbose_output.append('\tTier:\t%d' % warning['tier']) files = warning['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if warning['line']: verbose_output.append('\tLine:\t%s' % warning['line']) if warning['column'] and warning['column'] != 0: verbose_output.append('\tColumn:\t%d' % warning['column']) if 'context' in warning and warning['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in warning.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) </DeepExtract> else: self.handler.write('<<GREEN>>All tests succeeded!') if self.notices: for notice in self.notices: <DeepExtract> output = ['\n', '<<WHITE>>Notice:<<NORMAL>>\t', notice['message']] if verbose: verbose_output = [] if notice['description']: verbose_output.append(self._flatten_list(notice['description'])) verbose_output.append('\tTier:\t%d' % notice['tier']) files = notice['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if notice['line']: verbose_output.append('\tLine:\t%s' % notice['line']) if notice['column'] and notice['column'] != 0: verbose_output.append('\tColumn:\t%d' % notice['column']) if 'context' in notice and notice['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in notice.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) </DeepExtract> self.handler.write('\n') if self.unfinished: self.handler.write('<<RED>>Validation terminated early') self.handler.write('Errors during validation are preventingthe validation proecss from completing.') self.handler.write('Use the <<YELLOW>>--determined<<NORMAL>> flag to ignore these errors.') self.handler.write('\n') return buffer.getvalue()
def print_summary(self, verbose=False, no_color=False): """Prints a summary of the validation process so far.""" buffer = StringIO() self.handler = OutputHandler(buffer, no_color) self.handler.write('\n<<GREEN>>Summary:').write('-' * 30) self.handler.write('%s Errors, %s Warnings, %s Notices' % (len(self.errors), len(self.warnings), len(self.notices))) if self.failed(): self.handler.write('<<BLUE>>Test failed! Errors:') for error in self.errors: output = ['\n', '<<RED>>Error:<<NORMAL>>\t', error['message']] if verbose: verbose_output = [] if error['description']: verbose_output.append(self._flatten_list(error['description'])) verbose_output.append('\tTier:\t%d' % error['tier']) files = error['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if error['line']: verbose_output.append('\tLine:\t%s' % error['line']) if error['column'] and error['column'] != 0: verbose_output.append('\tColumn:\t%d' % error['column']) if 'context' in error and error['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in error.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) for warning in self.warnings: output = ['\n', '<<YELLOW>>Warning:<<NORMAL>> ', warning['message']] if verbose: verbose_output = [] if warning['description']: verbose_output.append(self._flatten_list(warning['description'])) verbose_output.append('\tTier:\t%d' % warning['tier']) files = warning['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if warning['line']: verbose_output.append('\tLine:\t%s' % warning['line']) if warning['column'] and warning['column'] != 0: verbose_output.append('\tColumn:\t%d' % warning['column']) if 'context' in warning and warning['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in warning.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) else: self.handler.write('<<GREEN>>All tests succeeded!') if self.notices: for notice in self.notices: output = ['\n', '<<WHITE>>Notice:<<NORMAL>>\t', notice['message']] if verbose: verbose_output = [] if notice['description']: verbose_output.append(self._flatten_list(notice['description'])) verbose_output.append('\tTier:\t%d' % notice['tier']) files = notice['file'] if files is not None and files != '': fmsg = '\tFile:\t%s' if type(files) is list: if files[-1] == '': files[-1] = '(none)' verbose_output.append(fmsg % ' > '.join(files)) else: verbose_output.append(fmsg % files) if notice['line']: verbose_output.append('\tLine:\t%s' % notice['line']) if notice['column'] and notice['column'] != 0: verbose_output.append('\tColumn:\t%d' % notice['column']) if 'context' in notice and notice['context']: verbose_output.append('\tContext:') verbose_output.extend(['\t> %s' % ('-' * 20 if x is None else x) for x in notice.get('context', [])]) output.append('\n') output.append('\n'.join(verbose_output)) self.handler.write(u''.join(map(unicodehelper.decode, output))) self.handler.write('\n') if self.unfinished: self.handler.write('<<RED>>Validation terminated early') self.handler.write('Errors during validation are preventingthe validation proecss from completing.') self.handler.write('Use the <<YELLOW>>--determined<<NORMAL>> flag to ignore these errors.') self.handler.write('\n') return buffer.getvalue()
app-validator
positive
def get_nouns(text): <DeepExtract> tagged = pos_tag_text(text) words = [w for (w, pos) in tagged if pos == 'NN'] nouns = words </DeepExtract> return nouns
def get_nouns(text): tagged = pos_tag_text(text) words = [w for (w, pos) in tagged if pos == 'NN'] nouns = words return nouns
aurum-datadiscovery
positive
def run_strategy(context, data): """ A function to define core strategy steps """ <DeepExtract> try: price_data = data.history(context.securities, 'close', context.params['indicator_lookback'], context.params['indicator_freq']) except: return for security in context.securities: px = price_data.loc[:, security].values context.signals[security] = signal_function(px, context.params) </DeepExtract> <DeepExtract> num_secs = len(context.securities) weight = round(1.0 / num_secs, 2) * context.params['leverage'] for security in context.securities: if context.signals[security] > context.params['buy_signal_threshold']: context.target_position[security] = weight elif context.signals[security] < context.params['sell_signal_threshold']: context.target_position[security] = -weight else: context.target_position[security] = 0 </DeepExtract> <DeepExtract> for security in context.securities: order_target_percent(security, context.target_position[security]) </DeepExtract>
def run_strategy(context, data): """ A function to define core strategy steps """ try: price_data = data.history(context.securities, 'close', context.params['indicator_lookback'], context.params['indicator_freq']) except: return for security in context.securities: px = price_data.loc[:, security].values context.signals[security] = signal_function(px, context.params) num_secs = len(context.securities) weight = round(1.0 / num_secs, 2) * context.params['leverage'] for security in context.securities: if context.signals[security] > context.params['buy_signal_threshold']: context.target_position[security] = weight elif context.signals[security] < context.params['sell_signal_threshold']: context.target_position[security] = -weight else: context.target_position[security] = 0 for security in context.securities: order_target_percent(security, context.target_position[security]) </DeepExtract>
blueshift-demo-strategies
positive
def filter_subscan_dropouts(self, perc=0, return_type='rec'): """Filtration to drop data and ensure that we only average parts with same timestamp. Potentially this could reduce risk of non-closing errors. Args: perc (float): drop baseline from scan if it has less than this fraction of median baseline observation time during the scan return_type (str): data frame ('df') or recarray ('rec') Returns: (Obsdata): a observation object with flagged data points removed """ if not isinstance(self.scans, np.ndarray): print('List of scans in ndarray format required! Add it with add_scans') else: df = ehdf.make_df(self) tot_points = np.shape(df)[0] (bins, labs) = ehdf.get_bins_labels(self.scans) df['scan_id'] = list(pd.cut(df.time, bins, labels=labs)) df['count_samples'] = 1 hm1 = df.groupby(['scan_id', 'baseline', 'polarization']) hm1 = hm1.agg({'count_samples': np.sum}).reset_index() hm1['count_baselines_before'] = 1 hm2 = hm1.groupby(['scan_id', 'polarization']) hm2 = hm2.agg({'count_samples': lambda x: perc * np.median(x), 'count_baselines_before': np.sum}).reset_index() dict_elem_in_scan = dict(zip(hm2.scan_id, hm2.count_samples)) hm1 = hm1[list(map(lambda x: x[1] >= dict_elem_in_scan[x[0]], list(zip(hm1.scan_id, hm1.count_samples))))] list_good_scans_baselines = list(zip(hm1.scan_id, hm1.baseline)) df_filtered = df[list(map(lambda x: x in list_good_scans_baselines, list(zip(df.scan_id, df.baseline))))] df_filtered['count_samples'] = 1 hm3 = df_filtered.groupby(['scan_id', 'baseline', 'polarization']) hm3 = hm3.agg({'count_samples': np.sum}).reset_index() hm3['count_baselines_after'] = 1 hm4 = hm3.groupby(['scan_id', 'polarization']) hm4 = hm4.agg({'count_baselines_after': np.sum}).reset_index() dict_how_many_baselines = dict(zip(hm4.scan_id, hm4.count_baselines_after)) df_filtered['count_baselines_per_time'] = 1 hm5 = df_filtered.groupby(['datetime', 'scan_id', 'polarization']) hm5 = hm5.agg({'count_baselines_per_time': np.sum}).reset_index() dict_datetime_num_baselines = dict(zip(hm5.datetime, hm5.count_baselines_per_time)) df_filtered2 = df_filtered[list(map(lambda x: dict_datetime_num_baselines[x[1]] == dict_how_many_baselines[x[0]], list(zip(df_filtered.scan_id, df_filtered.datetime))))] remaining_points = np.shape(df_filtered2)[0] print('Flagged out {} of {} datapoints'.format(tot_points - remaining_points, tot_points)) if return_type == 'rec': out_vis = ehdf.df_to_rec(df_filtered2, 'vis') <DeepExtract> arglist = [self.ra, self.dec, self.rf, self.bw, self.data, self.tarr] argdict = {'scantable': self.scans, 'polrep': self.polrep, 'source': self.source, 'mjd': self.mjd, 'timetype': self.timetype, 'ampcal': self.ampcal, 'phasecal': self.phasecal, 'opacitycal': self.opacitycal, 'dcal': self.dcal, 'frcal': self.frcal} (arglist, argdict) = (arglist, argdict) </DeepExtract> arglist[DATPOS] = out_vis out = Obsdata(*arglist, **argdict) return out
def filter_subscan_dropouts(self, perc=0, return_type='rec'): """Filtration to drop data and ensure that we only average parts with same timestamp. Potentially this could reduce risk of non-closing errors. Args: perc (float): drop baseline from scan if it has less than this fraction of median baseline observation time during the scan return_type (str): data frame ('df') or recarray ('rec') Returns: (Obsdata): a observation object with flagged data points removed """ if not isinstance(self.scans, np.ndarray): print('List of scans in ndarray format required! Add it with add_scans') else: df = ehdf.make_df(self) tot_points = np.shape(df)[0] (bins, labs) = ehdf.get_bins_labels(self.scans) df['scan_id'] = list(pd.cut(df.time, bins, labels=labs)) df['count_samples'] = 1 hm1 = df.groupby(['scan_id', 'baseline', 'polarization']) hm1 = hm1.agg({'count_samples': np.sum}).reset_index() hm1['count_baselines_before'] = 1 hm2 = hm1.groupby(['scan_id', 'polarization']) hm2 = hm2.agg({'count_samples': lambda x: perc * np.median(x), 'count_baselines_before': np.sum}).reset_index() dict_elem_in_scan = dict(zip(hm2.scan_id, hm2.count_samples)) hm1 = hm1[list(map(lambda x: x[1] >= dict_elem_in_scan[x[0]], list(zip(hm1.scan_id, hm1.count_samples))))] list_good_scans_baselines = list(zip(hm1.scan_id, hm1.baseline)) df_filtered = df[list(map(lambda x: x in list_good_scans_baselines, list(zip(df.scan_id, df.baseline))))] df_filtered['count_samples'] = 1 hm3 = df_filtered.groupby(['scan_id', 'baseline', 'polarization']) hm3 = hm3.agg({'count_samples': np.sum}).reset_index() hm3['count_baselines_after'] = 1 hm4 = hm3.groupby(['scan_id', 'polarization']) hm4 = hm4.agg({'count_baselines_after': np.sum}).reset_index() dict_how_many_baselines = dict(zip(hm4.scan_id, hm4.count_baselines_after)) df_filtered['count_baselines_per_time'] = 1 hm5 = df_filtered.groupby(['datetime', 'scan_id', 'polarization']) hm5 = hm5.agg({'count_baselines_per_time': np.sum}).reset_index() dict_datetime_num_baselines = dict(zip(hm5.datetime, hm5.count_baselines_per_time)) df_filtered2 = df_filtered[list(map(lambda x: dict_datetime_num_baselines[x[1]] == dict_how_many_baselines[x[0]], list(zip(df_filtered.scan_id, df_filtered.datetime))))] remaining_points = np.shape(df_filtered2)[0] print('Flagged out {} of {} datapoints'.format(tot_points - remaining_points, tot_points)) if return_type == 'rec': out_vis = ehdf.df_to_rec(df_filtered2, 'vis') arglist = [self.ra, self.dec, self.rf, self.bw, self.data, self.tarr] argdict = {'scantable': self.scans, 'polrep': self.polrep, 'source': self.source, 'mjd': self.mjd, 'timetype': self.timetype, 'ampcal': self.ampcal, 'phasecal': self.phasecal, 'opacitycal': self.opacitycal, 'dcal': self.dcal, 'frcal': self.frcal} (arglist, argdict) = (arglist, argdict) arglist[DATPOS] = out_vis out = Obsdata(*arglist, **argdict) return out
eht-imaging
positive
def help(request): <DeepExtract> intel_machine.re_init() mips_machine.re_init() riscv_machine.re_init() if False: wasm_machine.re_init() </DeepExtract> <DeepExtract> intel_machine.flavor = None riscv_machine.flavor = None mips_machine.flavor = None if False: wasm_machine.flavor = None </DeepExtract> <DeepExtract> site_hdr = 'Emu: a multi-language assembly emulator' site_list = Site.objects.all() for site in site_list: site_hdr = site.header break site_hdr = site_hdr </DeepExtract> return render(request, 'help.html', {HEADER: site_hdr})
def help(request): intel_machine.re_init() mips_machine.re_init() riscv_machine.re_init() if False: wasm_machine.re_init() intel_machine.flavor = None riscv_machine.flavor = None mips_machine.flavor = None if False: wasm_machine.flavor = None site_hdr = 'Emu: a multi-language assembly emulator' site_list = Site.objects.all() for site in site_list: site_hdr = site.header break site_hdr = site_hdr return render(request, 'help.html', {HEADER: site_hdr})
Emu86
positive
def get_config(self) -> Dict[str, Any]: config = super().get_config() def try_serialize(parameter, name): if isinstance(parameter, str): return parameter try: return tf.keras.utils.legacy.serialize_keras_object(parameter) except (ValueError, TypeError): if isinstance(parameter, tf.Variable): return 'variable' raise TypeError(f"Can't serialize {name} of type {type(parameter)}.") <DeepExtract> if isinstance(self.kernel_parameter, str): kernel_parameter = self.kernel_parameter try: kernel_parameter = tf.keras.utils.legacy.serialize_keras_object(self.kernel_parameter) except (ValueError, TypeError): if isinstance(self.kernel_parameter, tf.Variable): kernel_parameter = 'variable' raise TypeError(f"Can't serialize {'kernel'} of type {type(self.kernel_parameter)}.") </DeepExtract> <DeepExtract> if isinstance(self.bias_parameter, str): bias_parameter = self.bias_parameter try: bias_parameter = tf.keras.utils.legacy.serialize_keras_object(self.bias_parameter) except (ValueError, TypeError): if isinstance(self.bias_parameter, tf.Variable): bias_parameter = 'variable' raise TypeError(f"Can't serialize {'bias'} of type {type(self.bias_parameter)}.") </DeepExtract> config.update(filters=self.filters, kernel_support=self.kernel_support, corr=self.corr, strides_down=self.strides_down, strides_up=self.strides_up, padding=self.padding, extra_pad_end=self.extra_pad_end, channel_separable=self.channel_separable, data_format=self.data_format, activation=tf.keras.activations.serialize(self.activation), use_bias=self.use_bias, use_explicit=self.use_explicit, kernel_parameter=kernel_parameter, bias_parameter=bias_parameter, kernel_initializer=tf.keras.initializers.serialize(self.kernel_initializer), bias_initializer=tf.keras.initializers.serialize(self.bias_initializer), kernel_regularizer=tf.keras.regularizers.serialize(self.kernel_regularizer), bias_regularizer=tf.keras.regularizers.serialize(self.bias_regularizer)) return config
def get_config(self) -> Dict[str, Any]: config = super().get_config() def try_serialize(parameter, name): if isinstance(parameter, str): return parameter try: return tf.keras.utils.legacy.serialize_keras_object(parameter) except (ValueError, TypeError): if isinstance(parameter, tf.Variable): return 'variable' raise TypeError(f"Can't serialize {name} of type {type(parameter)}.") if isinstance(self.kernel_parameter, str): kernel_parameter = self.kernel_parameter try: kernel_parameter = tf.keras.utils.legacy.serialize_keras_object(self.kernel_parameter) except (ValueError, TypeError): if isinstance(self.kernel_parameter, tf.Variable): kernel_parameter = 'variable' raise TypeError(f"Can't serialize {'kernel'} of type {type(self.kernel_parameter)}.") if isinstance(self.bias_parameter, str): bias_parameter = self.bias_parameter try: bias_parameter = tf.keras.utils.legacy.serialize_keras_object(self.bias_parameter) except (ValueError, TypeError): if isinstance(self.bias_parameter, tf.Variable): bias_parameter = 'variable' raise TypeError(f"Can't serialize {'bias'} of type {type(self.bias_parameter)}.") config.update(filters=self.filters, kernel_support=self.kernel_support, corr=self.corr, strides_down=self.strides_down, strides_up=self.strides_up, padding=self.padding, extra_pad_end=self.extra_pad_end, channel_separable=self.channel_separable, data_format=self.data_format, activation=tf.keras.activations.serialize(self.activation), use_bias=self.use_bias, use_explicit=self.use_explicit, kernel_parameter=kernel_parameter, bias_parameter=bias_parameter, kernel_initializer=tf.keras.initializers.serialize(self.kernel_initializer), bias_initializer=tf.keras.initializers.serialize(self.bias_initializer), kernel_regularizer=tf.keras.regularizers.serialize(self.kernel_regularizer), bias_regularizer=tf.keras.regularizers.serialize(self.bias_regularizer)) return config
compression
positive
def set_url_config(self, function_name: str, qualifier: str=None, auth_type: str=IAM_AUTH_TYPE, cors: dict=None, principal: str=None, source_arn: str=None): _LOG.info(f'Setting url config for lambda: {function_name} with alias: {qualifier}') <DeepExtract> params = dict(FunctionName=function_name) if qualifier: params['Qualifier'] = qualifier try: existing_url = self.client.get_function_url_config(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': existing_url = None raise e </DeepExtract> if cors: if not cors.get('allow_origins'): cors['allow_origins'] = ['*'] if not existing_url: _LOG.info('Existing url config was not found. Creating...') function_url = self.create_url_config(function_name=function_name, qualifier=qualifier, auth_type=auth_type, cors=cors)['FunctionUrl'] else: _LOG.info('Existing url config was found. Updating...') existing_type = existing_url['AuthType'] if existing_type != auth_type or existing_type == IAM_AUTH_TYPE: _LOG.warning('User has changed auth type or may have changed principal or source arn. Removing old permission') <DeepExtract> params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[existing_type]) if qualifier: params['Qualifier'] = qualifier try: self.client.remove_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException' and soft: return None raise e </DeepExtract> function_url = self.create_url_config(function_name=function_name, qualifier=qualifier, auth_type=auth_type, cors=cors, update=True)['FunctionUrl'] if auth_type == NONE_AUTH_TYPE: _LOG.warning(f'Auth type is {NONE_AUTH_TYPE}. Setting the necessary resource-based policy') <DeepExtract> action = 'lambda:InvokeFunctionUrl' if auth_type else 'lambda:InvokeFunction' if not AUTH_TYPE_TO_STATEMENT_ID[auth_type]: AUTH_TYPE_TO_STATEMENT_ID[auth_type] = str(uuid.uuid1()) params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[auth_type], Action=action, Principal='*') if auth_type: params['FunctionUrlAuthType'] = auth_type if source_arn: params['SourceArn'] = source_arn if qualifier: params['Qualifier'] = qualifier try: return self.client.add_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceConflictException' and True: return None raise e </DeepExtract> elif auth_type == IAM_AUTH_TYPE and principal: _LOG.warning(f'Auth type is {IAM_AUTH_TYPE}. Setting the necessary resource-based policy') <DeepExtract> action = 'lambda:InvokeFunctionUrl' if auth_type else 'lambda:InvokeFunction' if not AUTH_TYPE_TO_STATEMENT_ID[auth_type]: AUTH_TYPE_TO_STATEMENT_ID[auth_type] = str(uuid.uuid1()) params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[auth_type], Action=action, Principal=principal) if auth_type: params['FunctionUrlAuthType'] = auth_type if source_arn: params['SourceArn'] = source_arn if qualifier: params['Qualifier'] = qualifier try: return self.client.add_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceConflictException' and exists_ok: return None raise e </DeepExtract> return function_url
def set_url_config(self, function_name: str, qualifier: str=None, auth_type: str=IAM_AUTH_TYPE, cors: dict=None, principal: str=None, source_arn: str=None): _LOG.info(f'Setting url config for lambda: {function_name} with alias: {qualifier}') params = dict(FunctionName=function_name) if qualifier: params['Qualifier'] = qualifier try: existing_url = self.client.get_function_url_config(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': existing_url = None raise e if cors: if not cors.get('allow_origins'): cors['allow_origins'] = ['*'] if not existing_url: _LOG.info('Existing url config was not found. Creating...') function_url = self.create_url_config(function_name=function_name, qualifier=qualifier, auth_type=auth_type, cors=cors)['FunctionUrl'] else: _LOG.info('Existing url config was found. Updating...') existing_type = existing_url['AuthType'] if existing_type != auth_type or existing_type == IAM_AUTH_TYPE: _LOG.warning('User has changed auth type or may have changed principal or source arn. Removing old permission') params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[existing_type]) if qualifier: params['Qualifier'] = qualifier try: self.client.remove_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException' and soft: return None raise e function_url = self.create_url_config(function_name=function_name, qualifier=qualifier, auth_type=auth_type, cors=cors, update=True)['FunctionUrl'] if auth_type == NONE_AUTH_TYPE: _LOG.warning(f'Auth type is {NONE_AUTH_TYPE}. Setting the necessary resource-based policy') action = 'lambda:InvokeFunctionUrl' if auth_type else 'lambda:InvokeFunction' if not AUTH_TYPE_TO_STATEMENT_ID[auth_type]: AUTH_TYPE_TO_STATEMENT_ID[auth_type] = str(uuid.uuid1()) params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[auth_type], Action=action, Principal='*') if auth_type: params['FunctionUrlAuthType'] = auth_type if source_arn: params['SourceArn'] = source_arn if qualifier: params['Qualifier'] = qualifier try: return self.client.add_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceConflictException' and True: return None raise e elif auth_type == IAM_AUTH_TYPE and principal: _LOG.warning(f'Auth type is {IAM_AUTH_TYPE}. Setting the necessary resource-based policy') action = 'lambda:InvokeFunctionUrl' if auth_type else 'lambda:InvokeFunction' if not AUTH_TYPE_TO_STATEMENT_ID[auth_type]: AUTH_TYPE_TO_STATEMENT_ID[auth_type] = str(uuid.uuid1()) params = dict(FunctionName=function_name, StatementId=AUTH_TYPE_TO_STATEMENT_ID[auth_type], Action=action, Principal=principal) if auth_type: params['FunctionUrlAuthType'] = auth_type if source_arn: params['SourceArn'] = source_arn if qualifier: params['Qualifier'] = qualifier try: return self.client.add_permission(**params) except ClientError as e: if e.response['Error']['Code'] == 'ResourceConflictException' and exists_ok: return None raise e return function_url
aws-syndicate
positive
def __getattr__(self, key, load_if_lazy=True): try: return super(Document, self).__getattr__(key) except AttributeError: pass try: if key in self._properties: return self._properties[key] if key in self._attributes: return self._attributes[key] if self._lazy: <DeepExtract> if True and (not self._autoload): logger.debug('Autoloading is disabled, not reverting the document implicitly...') return self._lazy = False logger.debug('Reverting to database state (%s, %s)' % (self.__class__.__name__, str(self.pk))) if self._db_loader: obj = self._db_loader() else: backend = backend or self._backend if not backend: raise AttributeError('No backend given!') if self.pk is None: return obj = backend.get(self.__class__, {self.get_pk_name(): self.pk}) self._attributes = obj.attributes self.initialize() </DeepExtract> return self._attributes[key] except KeyError: raise AttributeError(key)
def __getattr__(self, key, load_if_lazy=True): try: return super(Document, self).__getattr__(key) except AttributeError: pass try: if key in self._properties: return self._properties[key] if key in self._attributes: return self._attributes[key] if self._lazy: if True and (not self._autoload): logger.debug('Autoloading is disabled, not reverting the document implicitly...') return self._lazy = False logger.debug('Reverting to database state (%s, %s)' % (self.__class__.__name__, str(self.pk))) if self._db_loader: obj = self._db_loader() else: backend = backend or self._backend if not backend: raise AttributeError('No backend given!') if self.pk is None: return obj = backend.get(self.__class__, {self.get_pk_name(): self.pk}) self._attributes = obj.attributes self.initialize() return self._attributes[key] except KeyError: raise AttributeError(key)
blitzdb
positive
def reset(self): timestep = self._env.reset() <DeepExtract> if not timestep.first(): self._steps += 1 self._episode_len += 1 if timestep.last(): self._episode += 1 self._episode_return += timestep.reward or 0.0 self._total_return += timestep.reward or 0.0 if self._log_by_step: if _logarithmic_logging(self._steps) or self._log_every: self._log_bsuite_data() elif timestep.last(): if _logarithmic_logging(self._episode) or self._log_every: self._log_bsuite_data() if timestep.last(): self._episode_len = 0 self._episode_return = 0.0 if self._episode == self._env.bsuite_num_episodes: self.flush() </DeepExtract> return timestep
def reset(self): timestep = self._env.reset() if not timestep.first(): self._steps += 1 self._episode_len += 1 if timestep.last(): self._episode += 1 self._episode_return += timestep.reward or 0.0 self._total_return += timestep.reward or 0.0 if self._log_by_step: if _logarithmic_logging(self._steps) or self._log_every: self._log_bsuite_data() elif timestep.last(): if _logarithmic_logging(self._episode) or self._log_every: self._log_bsuite_data() if timestep.last(): self._episode_len = 0 self._episode_return = 0.0 if self._episode == self._env.bsuite_num_episodes: self.flush() return timestep
bsuite
positive
def tokenize(self): token = '' EOF = False while not EOF: nextchar = self.instream.read(1) if not nextchar: EOF = True <DeepExtract> if self._isdigit(token[-1]): (type, _) = (0, not self._isdigit(nextchar)) if self._isletter(token[-1]): (type, _) = (1, not self._isletter(nextchar)) (type, _) = (2, self._isdigit(nextchar) or self._isletter(nextchar)) </DeepExtract> yield (token, type) return if token: <DeepExtract> if self._isdigit(token[-1]): (type, switch) = (0, not self._isdigit(nextchar)) if self._isletter(token[-1]): (type, switch) = (1, not self._isletter(nextchar)) (type, switch) = (2, self._isdigit(nextchar) or self._isletter(nextchar)) </DeepExtract> if not switch: token += nextchar else: yield (token, type) token = nextchar else: token += nextchar
def tokenize(self): token = '' EOF = False while not EOF: nextchar = self.instream.read(1) if not nextchar: EOF = True if self._isdigit(token[-1]): (type, _) = (0, not self._isdigit(nextchar)) if self._isletter(token[-1]): (type, _) = (1, not self._isletter(nextchar)) (type, _) = (2, self._isdigit(nextchar) or self._isletter(nextchar)) yield (token, type) return if token: if self._isdigit(token[-1]): (type, switch) = (0, not self._isdigit(nextchar)) if self._isletter(token[-1]): (type, switch) = (1, not self._isletter(nextchar)) (type, switch) = (2, self._isdigit(nextchar) or self._isletter(nextchar)) if not switch: token += nextchar else: yield (token, type) token = nextchar else: token += nextchar
dateparser
positive
def update_cost_estimate(self, itemtxs_qs: Optional[ItemTransactionModelQuerySet]=None, commit: bool=False): """ Updates the cost estimate of the EstimateModel instance. Parameters ---------- itemtxs_qs: ItemTransactionModelQuerySet Prefetched ItemTransactionModelQuerySet. A new ItemTransactionModelQuerySet will be fetched from DB if not provided. If provided will be validated. commit: bool If True, the new revenue estimate will be committed into the DB. """ <DeepExtract> if not itemtxs_qs: itemtxs_qs = self.itemtransactionmodel_set.select_related('item_model').all() else: self.validate_item_transaction_qs(itemtxs_qs) itemtxs_qs = itemtxs_qs </DeepExtract> estimates = {'labor': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_labor())), 'material': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_material())), 'equipment': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_equipment())), 'other': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_other() or not a.item_model_id or (not a.item_model.item_type) or a.item_model.is_lump_sum()))} self.labor_estimate = estimates['labor'] self.material_estimate = estimates['material'] self.equipment_estimate = estimates['equipment'] self.other_estimate = estimates['other'] if commit: <DeepExtract> if self.can_generate_estimate_number(): self.generate_estimate_number(commit=False) super(EstimateModelAbstract, self).save(**kwargs) </DeepExtract>
def update_cost_estimate(self, itemtxs_qs: Optional[ItemTransactionModelQuerySet]=None, commit: bool=False): """ Updates the cost estimate of the EstimateModel instance. Parameters ---------- itemtxs_qs: ItemTransactionModelQuerySet Prefetched ItemTransactionModelQuerySet. A new ItemTransactionModelQuerySet will be fetched from DB if not provided. If provided will be validated. commit: bool If True, the new revenue estimate will be committed into the DB. """ if not itemtxs_qs: itemtxs_qs = self.itemtransactionmodel_set.select_related('item_model').all() else: self.validate_item_transaction_qs(itemtxs_qs) itemtxs_qs = itemtxs_qs estimates = {'labor': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_labor())), 'material': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_material())), 'equipment': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_equipment())), 'other': sum((a.ce_cost_estimate for a in itemtxs_qs if a.item_model.is_other() or not a.item_model_id or (not a.item_model.item_type) or a.item_model.is_lump_sum()))} self.labor_estimate = estimates['labor'] self.material_estimate = estimates['material'] self.equipment_estimate = estimates['equipment'] self.other_estimate = estimates['other'] if commit: if self.can_generate_estimate_number(): self.generate_estimate_number(commit=False) super(EstimateModelAbstract, self).save(**kwargs) </DeepExtract>
django-ledger
positive
def merge_concepts(self, concept_ids, overwrite=False): """ merge concepts in a model When overwrite is False, if the concept does not exist in the model it will be appended. Otherwise, the original one will be kept. Args: concept_ids: a list of concept id overwrite: True or False. If True, the existing concepts will be replaced Returns: the Model object """ if overwrite is True: action = 'overwrite' else: action = 'merge' <DeepExtract> res = self.api.patch_inputs(action=action, inputs=[image]) one = res['inputs'][0] model = self._to_obj(one) </DeepExtract> return model
def merge_concepts(self, concept_ids, overwrite=False): """ merge concepts in a model When overwrite is False, if the concept does not exist in the model it will be appended. Otherwise, the original one will be kept. Args: concept_ids: a list of concept id overwrite: True or False. If True, the existing concepts will be replaced Returns: the Model object """ if overwrite is True: action = 'overwrite' else: action = 'merge' res = self.api.patch_inputs(action=action, inputs=[image]) one = res['inputs'][0] model = self._to_obj(one) return model
clarifai-python
positive
def forward(self, x): identity = x out = self.conv1(x) <DeepExtract> out = getattr(self, self.norm1_name) </DeepExtract> out = self.relu(out) out = self.conv2(out) <DeepExtract> out = getattr(self, self.norm2_name) </DeepExtract> if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out
def forward(self, x): identity = x out = self.conv1(x) out = getattr(self, self.norm1_name) out = self.relu(out) out = self.conv2(out) out = getattr(self, self.norm2_name) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out
Cross-iterationBatchNorm
positive
def __init__(self, cfg): super().__init__() <DeepExtract> pass </DeepExtract> if torch.cuda.is_available() and cfg.USE_CUDA: self.device = torch.device('cuda') else: self.device = torch.device('cpu') self.start_epoch = self.epoch = 0 self.max_epoch = cfg.OPTIM.MAX_EPOCH self.output_dir = cfg.OUTPUT_DIR self.cfg = cfg <DeepExtract> dm = DataManager(self.cfg) self.train_loader_x = dm.train_loader_x self.train_loader_u = dm.train_loader_u self.val_loader = dm.val_loader self.test_loader = dm.test_loader self.num_classes = dm.num_classes self.num_source_domains = dm.num_source_domains self.lab2cname = dm.lab2cname self.dm = dm </DeepExtract> <DeepExtract> cfg = self.cfg print('Building model') self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes) if cfg.MODEL.INIT_WEIGHTS: load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS) self.model.to(self.device) print(f'# params: {count_num_param(self.model):,}') self.optim = build_optimizer(self.model, cfg.OPTIM) self.sched = build_lr_scheduler(self.optim, cfg.OPTIM) self.register_model('model', self.model, self.optim, self.sched) device_count = torch.cuda.device_count() if device_count > 1: print(f'Detected {device_count} GPUs (use nn.DataParallel)') self.model = nn.DataParallel(self.model) </DeepExtract> self.evaluator = build_evaluator(cfg, lab2cname=self.lab2cname) self.best_result = -np.inf
def __init__(self, cfg): super().__init__() pass if torch.cuda.is_available() and cfg.USE_CUDA: self.device = torch.device('cuda') else: self.device = torch.device('cpu') self.start_epoch = self.epoch = 0 self.max_epoch = cfg.OPTIM.MAX_EPOCH self.output_dir = cfg.OUTPUT_DIR self.cfg = cfg dm = DataManager(self.cfg) self.train_loader_x = dm.train_loader_x self.train_loader_u = dm.train_loader_u self.val_loader = dm.val_loader self.test_loader = dm.test_loader self.num_classes = dm.num_classes self.num_source_domains = dm.num_source_domains self.lab2cname = dm.lab2cname self.dm = dm cfg = self.cfg print('Building model') self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes) if cfg.MODEL.INIT_WEIGHTS: load_pretrained_weights(self.model, cfg.MODEL.INIT_WEIGHTS) self.model.to(self.device) print(f'# params: {count_num_param(self.model):,}') self.optim = build_optimizer(self.model, cfg.OPTIM) self.sched = build_lr_scheduler(self.optim, cfg.OPTIM) self.register_model('model', self.model, self.optim, self.sched) device_count = torch.cuda.device_count() if device_count > 1: print(f'Detected {device_count} GPUs (use nn.DataParallel)') self.model = nn.DataParallel(self.model) self.evaluator = build_evaluator(cfg, lab2cname=self.lab2cname) self.best_result = -np.inf
Dassl.pytorch
positive
def implementation(self, for_type=None, for_types=None): """Return a decorator that will register the implementation. Example: @multimethod def add(x, y): pass @add.implementation(for_type=int) def add(x, y): return x + y @add.implementation(for_type=SomeType) def add(x, y): return int(x) + int(y) """ <DeepExtract> if for_type: if for_types: raise ValueError('Cannot pass both for_type and for_types.') for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError('for_types must be passed as a tuple of types (classes).') else: raise ValueError('Must pass either for_type or for_types.') for_types = for_types </DeepExtract> def _decorator(implementation): <DeepExtract> unbound_implementation = self.__get_unbound_function(implementation) for_types = self.__get_types(for_type, for_types) for t in for_types: self._write_lock.acquire() try: self.implementations.append((t, unbound_implementation)) finally: self._write_lock.release() </DeepExtract> return self return _decorator
def implementation(self, for_type=None, for_types=None): """Return a decorator that will register the implementation. Example: @multimethod def add(x, y): pass @add.implementation(for_type=int) def add(x, y): return x + y @add.implementation(for_type=SomeType) def add(x, y): return int(x) + int(y) """ if for_type: if for_types: raise ValueError('Cannot pass both for_type and for_types.') for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError('for_types must be passed as a tuple of types (classes).') else: raise ValueError('Must pass either for_type or for_types.') for_types = for_types def _decorator(implementation): unbound_implementation = self.__get_unbound_function(implementation) for_types = self.__get_types(for_type, for_types) for t in for_types: self._write_lock.acquire() try: self.implementations.append((t, unbound_implementation)) finally: self._write_lock.release() return self return _decorator
dotty
positive
def testSeparableConv2DSameWithInputOddSize(self): (n, n2) = (5, 3) <DeepExtract> if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) </DeepExtract> <DeepExtract> if None in [1, 3, 3, 1]: dw = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: dw = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) </DeepExtract> dw = tf.reshape(dw, [3, 3, 1, 1]) tf.get_variable('Conv/depthwise_weights', initializer=dw) tf.get_variable('Conv/pointwise_weights', initializer=tf.ones([1, 1, 1, 1])) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, regularize_depthwise=True, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
def testSeparableConv2DSameWithInputOddSize(self): (n, n2) = (5, 3) if None in [1, n, n, 1]: x = tf.placeholder(tf.float32, (1, n, n, 1)) else: x = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(n), [n, 1]) + np.reshape(np.arange(n), [1, n]), [1, n, n, 1]), [1, 1, 1, 1])) if None in [1, 3, 3, 1]: dw = tf.placeholder(tf.float32, (1, 3, 3, 1)) else: dw = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(3), [3, 1]) + np.reshape(np.arange(3), [1, 3]), [1, 3, 3, 1]), [1, 1, 1, 1])) dw = tf.reshape(dw, [3, 3, 1, 1]) tf.get_variable('Conv/depthwise_weights', initializer=dw) tf.get_variable('Conv/pointwise_weights', initializer=tf.ones([1, 1, 1, 1])) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = xception.separable_conv2d_same(x, 1, 3, depth_multiplier=1, regularize_depthwise=True, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.separable_conv2d(x, 1, [3, 3], depth_multiplier=1, stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval())
data-science-bowl-2018
positive
def forward(self, X): assert X.size(1) == self.D if X.dim() == 3: (B, D) = (X.size(0), self.D) X = X.transpose(1, 2).contiguous() elif X.dim() == 4: (B, D) = (X.size(0), self.D) X = X.view(B, D, -1).transpose(1, 2).contiguous() else: raise RuntimeError('Encoding Layer unknown input dims!') <DeepExtract> if self.training: self.scale.data.uniform_(-1, 0) else: self.scale.data.zero_().add_(-0.5) </DeepExtract> A = F.softmax(scaled_l2(X, self.codewords, self.scale), dim=2) E = aggregate(A, X, self.codewords) <DeepExtract> if self.training: self.scale.data.uniform_(-1, 0) else: self.scale.data.zero_().add_(-0.5) </DeepExtract> return E
def forward(self, X): assert X.size(1) == self.D if X.dim() == 3: (B, D) = (X.size(0), self.D) X = X.transpose(1, 2).contiguous() elif X.dim() == 4: (B, D) = (X.size(0), self.D) X = X.view(B, D, -1).transpose(1, 2).contiguous() else: raise RuntimeError('Encoding Layer unknown input dims!') if self.training: self.scale.data.uniform_(-1, 0) else: self.scale.data.zero_().add_(-0.5) A = F.softmax(scaled_l2(X, self.codewords, self.scale), dim=2) E = aggregate(A, X, self.codewords) if self.training: self.scale.data.uniform_(-1, 0) else: self.scale.data.zero_().add_(-0.5) return E
DANet
positive
def _start_reset_password(self): <DeepExtract> return self.assertContains(response=self.reset_password(self.email), text='Please check your mailbox for further password reset instructions.', status_code=status.HTTP_202_ACCEPTED) </DeepExtract> return self.assertResetPasswordEmail(self.email)
def _start_reset_password(self): return self.assertContains(response=self.reset_password(self.email), text='Please check your mailbox for further password reset instructions.', status_code=status.HTTP_202_ACCEPTED) return self.assertResetPasswordEmail(self.email)
desec-stack
positive
def get_size(self, filename): """ Return size of a stream in the OLE container, in bytes. :param filename: path of stream in storage tree (see openstream for syntax) :returns: size in bytes (long integer) :exception IOError: if file not found :exception TypeError: if this is not a stream. """ <DeepExtract> if isinstance(filename, basestring): filename = filename.split('/') node = self.root for name in filename: for kid in node.kids: if kid.name.lower() == name.lower(): break else: raise IOError('file not found') node = kid sid = node.sid </DeepExtract> entry = self.direntries[sid] if entry.entry_type != STGTY_STREAM: raise TypeError('object is not an OLE stream') return entry.size
def get_size(self, filename): """ Return size of a stream in the OLE container, in bytes. :param filename: path of stream in storage tree (see openstream for syntax) :returns: size in bytes (long integer) :exception IOError: if file not found :exception TypeError: if this is not a stream. """ if isinstance(filename, basestring): filename = filename.split('/') node = self.root for name in filename: for kid in node.kids: if kid.name.lower() == name.lower(): break else: raise IOError('file not found') node = kid sid = node.sid entry = self.direntries[sid] if entry.entry_type != STGTY_STREAM: raise TypeError('object is not an OLE stream') return entry.size
cyberstakes-writeps-2018
positive
def cp_download(self, *, src, bucket_name, dest, max_retries=5): job_id = str(uuid.uuid4()) awscli_output = '/tmp/awscli_{0}.output'.format(job_id) objects = [] <DeepExtract> cmd = self._aws_cli_cmd.copy() if self.endpoint_url is not None: cmd.extend(['--endpoint-url', self.endpoint_url]) cmd = cmd </DeepExtract> cmd.extend(['s3', 'cp']) if self._config.kms_id is not None: cmd.extend(['--sse', 'aws:kms', '--sse-kms-key-id', self._config.kms_id]) cmd.extend(['s3://{}/{}'.format(bucket_name, src), dest]) <DeepExtract> logging.debug(' '.join(cmd)) with open(awscli_output, 'w') as output: process = subprocess.Popen(cmd, env=self._env, bufsize=0, stdout=output, stderr=subprocess.STDOUT, universal_newlines=True) if process.wait() == 0: os.remove(awscli_output) return raise IOError('awscli cp failed. Max attempts exceeded. Check {} for more informations.'.format(awscli_output)) </DeepExtract> return objects
def cp_download(self, *, src, bucket_name, dest, max_retries=5): job_id = str(uuid.uuid4()) awscli_output = '/tmp/awscli_{0}.output'.format(job_id) objects = [] cmd = self._aws_cli_cmd.copy() if self.endpoint_url is not None: cmd.extend(['--endpoint-url', self.endpoint_url]) cmd = cmd cmd.extend(['s3', 'cp']) if self._config.kms_id is not None: cmd.extend(['--sse', 'aws:kms', '--sse-kms-key-id', self._config.kms_id]) cmd.extend(['s3://{}/{}'.format(bucket_name, src), dest]) logging.debug(' '.join(cmd)) with open(awscli_output, 'w') as output: process = subprocess.Popen(cmd, env=self._env, bufsize=0, stdout=output, stderr=subprocess.STDOUT, universal_newlines=True) if process.wait() == 0: os.remove(awscli_output) return raise IOError('awscli cp failed. Max attempts exceeded. Check {} for more informations.'.format(awscli_output)) return objects
cassandra-medusa
positive
def _get_render_func(optimize=True, ret_str=False): """ Uses `optimize=True` by default because it makes the output easier to read and more like dust's docs split `ret_str=True` into `_get_render_string()` Note that this doesn't save the render_code/render_func. It is compiled as needed. """ <DeepExtract> if not self.source: ast = None try: dast = ParseTree.from_source(self.source).to_dust_ast() except ParseError as pe: pe.source_file = self.source_file raise if raw: ast = dast ast = self.env.filter_ast(dast, optimize) </DeepExtract> if not ast: return (None, None) <DeepExtract> compiler = Compiler(self.env) (python_code, python_func) = compiler.compile(ast) (render_code, render_func) = (python_code, python_func) </DeepExtract> return (render_code, render_func)
def _get_render_func(optimize=True, ret_str=False): """ Uses `optimize=True` by default because it makes the output easier to read and more like dust's docs split `ret_str=True` into `_get_render_string()` Note that this doesn't save the render_code/render_func. It is compiled as needed. """ if not self.source: ast = None try: dast = ParseTree.from_source(self.source).to_dust_ast() except ParseError as pe: pe.source_file = self.source_file raise if raw: ast = dast ast = self.env.filter_ast(dast, optimize) if not ast: return (None, None) compiler = Compiler(self.env) (python_code, python_func) = compiler.compile(ast) (render_code, render_func) = (python_code, python_func) return (render_code, render_func)
ashes
positive
def build_verifier(password: str, *, salt: typing.Optional[bytes]=None, iterations: int=DEFAULT_ITERATIONS) -> str: """Build the SCRAM verifier for the given password. Returns a string in the following format: "<MECHANISM>$<iterations>:<salt>$<StoredKey>:<ServerKey>" The salt and keys are base64-encoded values. """ password = saslprep(password).encode('utf-8') if salt is None: <DeepExtract> salt = os.urandom(length) </DeepExtract> <DeepExtract> H_i = U_i = HMAC(password, salt + b'\x00\x00\x00\x01') for _ in range(iterations - 1): U_i = HMAC(password, U_i) H_i = XOR(H_i, U_i) salted_password = H_i </DeepExtract> <DeepExtract> client_key = HMAC(salted_password, b'Client Key') </DeepExtract> <DeepExtract> stored_key = hashlib.sha256(client_key).digest() </DeepExtract> <DeepExtract> server_key = HMAC(salted_password, b'Server Key') </DeepExtract> return f'SCRAM-SHA-256${iterations}:{B64(salt)}${B64(stored_key)}:{B64(server_key)}'
def build_verifier(password: str, *, salt: typing.Optional[bytes]=None, iterations: int=DEFAULT_ITERATIONS) -> str: """Build the SCRAM verifier for the given password. Returns a string in the following format: "<MECHANISM>$<iterations>:<salt>$<StoredKey>:<ServerKey>" The salt and keys are base64-encoded values. """ password = saslprep(password).encode('utf-8') if salt is None: salt = os.urandom(length) H_i = U_i = HMAC(password, salt + b'\x00\x00\x00\x01') for _ in range(iterations - 1): U_i = HMAC(password, U_i) H_i = XOR(H_i, U_i) salted_password = H_i client_key = HMAC(salted_password, b'Client Key') stored_key = hashlib.sha256(client_key).digest() server_key = HMAC(salted_password, b'Server Key') return f'SCRAM-SHA-256${iterations}:{B64(salt)}${B64(stored_key)}:{B64(server_key)}'
edgedb-python
positive
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ <DeepExtract> if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] input_shape = shape </DeepExtract> batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.') token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings <DeepExtract> output_tensor = layer_norm(output, name) output_tensor = dropout(output_tensor, dropout_prob) output = output_tensor </DeepExtract> return output
def embedding_postprocessor(input_tensor, use_token_type=False, token_type_ids=None, token_type_vocab_size=16, token_type_embedding_name='token_type_embeddings', use_position_embeddings=True, position_embedding_name='position_embeddings', initializer_range=0.02, max_position_embeddings=512, dropout_prob=0.1): """Performs various post-processing on a word embedding tensor. Args: input_tensor: float Tensor of shape [batch_size, seq_length, embedding_size]. use_token_type: bool. Whether to add embeddings for `token_type_ids`. token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. Must be specified if `use_token_type` is True. token_type_vocab_size: int. The vocabulary size of `token_type_ids`. token_type_embedding_name: string. The name of the embedding table variable for token type ids. use_position_embeddings: bool. Whether to add position embeddings for the position of each token in the sequence. position_embedding_name: string. The name of the embedding table variable for positional embeddings. initializer_range: float. Range of the weight initialization. max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. Raises: ValueError: One of the tensor shapes or input values is invalid. """ if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] input_shape = shape batch_size = input_shape[0] seq_length = input_shape[1] width = input_shape[2] output = input_tensor if use_token_type: if token_type_ids is None: raise ValueError('`token_type_ids` must be specified if`use_token_type` is True.') token_type_table = tf.get_variable(name=token_type_embedding_name, shape=[token_type_vocab_size, width], initializer=create_initializer(initializer_range)) flat_token_type_ids = tf.reshape(token_type_ids, [-1]) one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) token_type_embeddings = tf.reshape(token_type_embeddings, [batch_size, seq_length, width]) output += token_type_embeddings if use_position_embeddings: assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) with tf.control_dependencies([assert_op]): full_position_embeddings = tf.get_variable(name=position_embedding_name, shape=[max_position_embeddings, width], initializer=create_initializer(initializer_range)) position_embeddings = tf.slice(full_position_embeddings, [0, 0], [seq_length, -1]) num_dims = len(output.shape.as_list()) position_broadcast_shape = [] for _ in range(num_dims - 2): position_broadcast_shape.append(1) position_broadcast_shape.extend([seq_length, width]) position_embeddings = tf.reshape(position_embeddings, position_broadcast_shape) output += position_embeddings output_tensor = layer_norm(output, name) output_tensor = dropout(output_tensor, dropout_prob) output = output_tensor return output
ChineseEHRBert
positive
def _build_and_fit_model(self, trial, *args, **kwargs): model = self._try_build(trial.hyperparameters) <DeepExtract> dataset = kwargs['x'] pipeline = self.hyper_pipeline.build(trial.hyperparameters, dataset) pipeline.fit(dataset) dataset = pipeline.transform(dataset) self.hypermodel.set_io_shapes(data_utils.dataset_shape(dataset)) if 'validation_data' in kwargs: validation_data = pipeline.transform(kwargs['validation_data']) else: validation_data = None (pipeline, kwargs['x'], kwargs['validation_data']) = (pipeline, dataset, validation_data) </DeepExtract> pipeline.save(self._pipeline_path(trial.trial_id)) <DeepExtract> x = kwargs['x'].map(lambda x, y: x) def get_output_layers(tensor): output_layers = [] tensor = nest.flatten(tensor)[0] for layer in model.layers: if isinstance(layer, keras.layers.InputLayer): continue input_node = nest.flatten(layer.input)[0] if input_node is tensor: if isinstance(layer, preprocessing.PreprocessingLayer): output_layers.append(layer) return output_layers dq = collections.deque() for (index, input_node) in enumerate(nest.flatten(model.input)): in_x = x.map(lambda *args: nest.flatten(args)[index]) for layer in get_output_layers(input_node): dq.append((layer, in_x)) while len(dq): (layer, in_x) = dq.popleft() layer.adapt(in_x) out_x = in_x.map(layer) for next_layer in get_output_layers(layer.output): dq.append((next_layer, out_x)) return model </DeepExtract> (_, history) = utils.fit_with_adaptive_batch_size(model, self.hypermodel.batch_size, **kwargs) return history
def _build_and_fit_model(self, trial, *args, **kwargs): model = self._try_build(trial.hyperparameters) dataset = kwargs['x'] pipeline = self.hyper_pipeline.build(trial.hyperparameters, dataset) pipeline.fit(dataset) dataset = pipeline.transform(dataset) self.hypermodel.set_io_shapes(data_utils.dataset_shape(dataset)) if 'validation_data' in kwargs: validation_data = pipeline.transform(kwargs['validation_data']) else: validation_data = None (pipeline, kwargs['x'], kwargs['validation_data']) = (pipeline, dataset, validation_data) pipeline.save(self._pipeline_path(trial.trial_id)) x = kwargs['x'].map(lambda x, y: x) def get_output_layers(tensor): output_layers = [] tensor = nest.flatten(tensor)[0] for layer in model.layers: if isinstance(layer, keras.layers.InputLayer): continue input_node = nest.flatten(layer.input)[0] if input_node is tensor: if isinstance(layer, preprocessing.PreprocessingLayer): output_layers.append(layer) return output_layers dq = collections.deque() for (index, input_node) in enumerate(nest.flatten(model.input)): in_x = x.map(lambda *args: nest.flatten(args)[index]) for layer in get_output_layers(input_node): dq.append((layer, in_x)) while len(dq): (layer, in_x) = dq.popleft() layer.adapt(in_x) out_x = in_x.map(layer) for next_layer in get_output_layers(layer.output): dq.append((next_layer, out_x)) return model (_, history) = utils.fit_with_adaptive_batch_size(model, self.hypermodel.batch_size, **kwargs) return history
autokeras
positive
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False): diameter = 2 * radius + 1 <DeepExtract> (m, n) = [(ss - 1.0) / 2.0 for ss in (diameter, diameter)] (y, x) = np.ogrid[-m:m + 1, -n:n + 1] h = np.exp(-(x * x + y * y) / (2 * diameter / 6 * diameter / 6)) h[h < np.finfo(h.dtype).eps * h.max()] = 0 gaussian = h </DeepExtract> value = np.array(value, dtype=np.float32).reshape(-1, 1, 1) dim = value.shape[0] reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value if is_offset and dim == 2: delta = np.arange(diameter * 2 + 1) - radius reg[0] = reg[0] - delta.reshape(1, -1) reg[1] = reg[1] - delta.reshape(-1, 1) (x, y) = (int(center[0]), int(center[1])) (height, width) = heatmap.shape[0:2] (left, right) = (min(x, radius), min(width - x, radius + 1)) (top, bottom) = (min(y, radius), min(height - y, radius + 1)) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] masked_reg = reg[:, radius - top:radius + bottom, radius - left:radius + right] if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: idx = (masked_gaussian >= masked_heatmap).reshape(1, masked_gaussian.shape[0], masked_gaussian.shape[1]) masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap return regmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False): diameter = 2 * radius + 1 (m, n) = [(ss - 1.0) / 2.0 for ss in (diameter, diameter)] (y, x) = np.ogrid[-m:m + 1, -n:n + 1] h = np.exp(-(x * x + y * y) / (2 * diameter / 6 * diameter / 6)) h[h < np.finfo(h.dtype).eps * h.max()] = 0 gaussian = h value = np.array(value, dtype=np.float32).reshape(-1, 1, 1) dim = value.shape[0] reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value if is_offset and dim == 2: delta = np.arange(diameter * 2 + 1) - radius reg[0] = reg[0] - delta.reshape(1, -1) reg[1] = reg[1] - delta.reshape(-1, 1) (x, y) = (int(center[0]), int(center[1])) (height, width) = heatmap.shape[0:2] (left, right) = (min(x, radius), min(width - x, radius + 1)) (top, bottom) = (min(y, radius), min(height - y, radius + 1)) masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right] masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right] masked_reg = reg[:, radius - top:radius + bottom, radius - left:radius + right] if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: idx = (masked_gaussian >= masked_heatmap).reshape(1, masked_gaussian.shape[0], masked_gaussian.shape[1]) masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap return regmap
CenterTrack
positive
def addNewMapping(self, remotePath, localPath): printl('', self, 'S') tree = getXmlContent(self.location) newId = int(self.lastMappingId) + 1 printl('newId: ' + str(newId), self, 'D') printl('remotePath: ' + str(remotePath), self, 'D') printl('localPath: ' + str(localPath), self, 'D') existingServer = False for server in tree.findall('server'): printl('servername: ' + str(server.get('id')), self, 'D') if str(server.get('id')) == str(self.serverID): existingServer = True server.append(etree.Element('mapping id="' + str(newId) + '" remotePathPart="' + remotePath + '" localPathPart="' + localPath + '"')) writeXmlContent(tree, self.location) if not existingServer: printl('expanding server list', self, 'D') tree.append(etree.Element('server id="' + str(self.serverID) + '"')) writeXmlContent(tree, self.location) <DeepExtract> printl('', self, 'S') tree = getXmlContent(self.location) newId = int(self.lastMappingId) + 1 printl('newId: ' + str(newId), self, 'D') printl('remotePath: ' + str(remotePath), self, 'D') printl('localPath: ' + str(localPath), self, 'D') existingServer = False for server in tree.findall('server'): printl('servername: ' + str(server.get('id')), self, 'D') if str(server.get('id')) == str(self.serverID): existingServer = True server.append(etree.Element('mapping id="' + str(newId) + '" remotePathPart="' + remotePath + '" localPathPart="' + localPath + '"')) writeXmlContent(tree, self.location) if not existingServer: printl('expanding server list', self, 'D') tree.append(etree.Element('server id="' + str(self.serverID) + '"')) writeXmlContent(tree, self.location) self.addNewMapping(remotePath, localPath) printl('', self, 'C') </DeepExtract> printl('', self, 'C')
def addNewMapping(self, remotePath, localPath): printl('', self, 'S') tree = getXmlContent(self.location) newId = int(self.lastMappingId) + 1 printl('newId: ' + str(newId), self, 'D') printl('remotePath: ' + str(remotePath), self, 'D') printl('localPath: ' + str(localPath), self, 'D') existingServer = False for server in tree.findall('server'): printl('servername: ' + str(server.get('id')), self, 'D') if str(server.get('id')) == str(self.serverID): existingServer = True server.append(etree.Element('mapping id="' + str(newId) + '" remotePathPart="' + remotePath + '" localPathPart="' + localPath + '"')) writeXmlContent(tree, self.location) if not existingServer: printl('expanding server list', self, 'D') tree.append(etree.Element('server id="' + str(self.serverID) + '"')) writeXmlContent(tree, self.location) printl('', self, 'S') tree = getXmlContent(self.location) newId = int(self.lastMappingId) + 1 printl('newId: ' + str(newId), self, 'D') printl('remotePath: ' + str(remotePath), self, 'D') printl('localPath: ' + str(localPath), self, 'D') existingServer = False for server in tree.findall('server'): printl('servername: ' + str(server.get('id')), self, 'D') if str(server.get('id')) == str(self.serverID): existingServer = True server.append(etree.Element('mapping id="' + str(newId) + '" remotePathPart="' + remotePath + '" localPathPart="' + localPath + '"')) writeXmlContent(tree, self.location) if not existingServer: printl('expanding server list', self, 'D') tree.append(etree.Element('server id="' + str(self.serverID) + '"')) writeXmlContent(tree, self.location) self.addNewMapping(remotePath, localPath) printl('', self, 'C') printl('', self, 'C')
DreamPlex
positive
def assertNsEqual(self, proto, ns): pytd = textwrap.dedent(proto) <DeepExtract> with open(TMP_FILE, 'w') as pytd_file: pytd_file.write(pytd) p = pytd2proto.Postprocessor(config_headers=['clif/python/types.h'], include_paths=[os.environ['CLIF_DIR']]) with open(TMP_FILE, 'r') as pytd_file: pb = p.Translate(pytd_file) ast = pb </DeepExtract> m = pyext.Module('my.path.py.ext') for d in ast.decls: list(m.WrapDecl(d)) if m.types: different_ns = set((types.Namespace(t) for t in m.types)) self.assertEqual(set(ns), different_ns) else: self.assertFalse(ns)
def assertNsEqual(self, proto, ns): pytd = textwrap.dedent(proto) with open(TMP_FILE, 'w') as pytd_file: pytd_file.write(pytd) p = pytd2proto.Postprocessor(config_headers=['clif/python/types.h'], include_paths=[os.environ['CLIF_DIR']]) with open(TMP_FILE, 'r') as pytd_file: pb = p.Translate(pytd_file) ast = pb m = pyext.Module('my.path.py.ext') for d in ast.decls: list(m.WrapDecl(d)) if m.types: different_ns = set((types.Namespace(t) for t in m.types)) self.assertEqual(set(ns), different_ns) else: self.assertFalse(ns)
clif
positive
def __init__(self, graph): self._marked = defaultdict(bool) self._id = {} self._count = 0 order = DepthFirstOrder(graph.reverse()) for v in order.reverse_postfix(): if not self._marked[v]: <DeepExtract> self._marked[v] = True for adjacent_vertex in graph.get_adjacent_vertices(v): if not self._marked[adjacent_vertex]: self.dfs(graph, adjacent_vertex) </DeepExtract> self._count += 1
def __init__(self, graph): self._marked = defaultdict(bool) self._id = {} self._count = 0 order = DepthFirstOrder(graph.reverse()) for v in order.reverse_postfix(): if not self._marked[v]: self._marked[v] = True for adjacent_vertex in graph.get_adjacent_vertices(v): if not self._marked[adjacent_vertex]: self.dfs(graph, adjacent_vertex) self._count += 1
algorithms-sedgewick-python
positive
def test_reflect_xdown(self): <DeepExtract> profile = pb.ProfileBase() profile.xup_coordinates = np.linspace(-1.0, 1.0, 5) profile.yup_coordinates = np.array([0.0, 0.75, 1.0, 0.75, 0.0]) profile.xdown_coordinates = np.linspace(-1.0, 1.0, 5) profile.ydown_coordinates = np.zeros(5) profile = profile </DeepExtract> profile.reflect() flipped_xdown_coordinates = np.array([1.0, 0.5, 0.0, -0.5, -1.0]) np.testing.assert_equal(profile.xdown_coordinates, flipped_xdown_coordinates)
def test_reflect_xdown(self): profile = pb.ProfileBase() profile.xup_coordinates = np.linspace(-1.0, 1.0, 5) profile.yup_coordinates = np.array([0.0, 0.75, 1.0, 0.75, 0.0]) profile.xdown_coordinates = np.linspace(-1.0, 1.0, 5) profile.ydown_coordinates = np.zeros(5) profile = profile profile.reflect() flipped_xdown_coordinates = np.array([1.0, 0.5, 0.0, -0.5, -1.0]) np.testing.assert_equal(profile.xdown_coordinates, flipped_xdown_coordinates)
BladeX
positive
def parse_module(module, inputs, param_exclude='.*AuxLogits.*', param_include=None): <DeepExtract> state_dict = torch.jit._unique_state_dict(module, keep_vars=True) if param_exclude is not None: param_exclude = re.compile(param_exclude) if param_include is not None: param_include = re.compile(param_include) new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if param_exclude is not None and param_exclude.match(k) is not None: print('\nremove input element {} from nodesIn\n'.format(k)) continue if param_include is not None and param_include.match(k) is None: continue new_state_dict[k] = v params = zip(new_state_dict.keys(), new_state_dict.values()) params = params </DeepExtract> (trace, out) = torch.jit._get_trace_graph(module, inputs) _set_opset_version(12) trace_graph = _optimize_graph(trace, torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, params_dict={}) logger.debug('trace_graph: {}'.format(trace_graph)) if int(os.environ.get('AUTOLIRPA_DEBUG_GRAPH', 0)) > 0: print('Graph before ONNX convertion:') print(trace) print('ONNX graph:') print(trace_graph) if not isinstance(inputs, tuple): inputs = (inputs,) <DeepExtract> input_all = [] input_used = [] scope = {} for n in trace_graph.inputs(): input_all.append(n.debugName()) for n in trace_graph.nodes(): n_inputs = [get_node_name(i) for i in n.inputs()] for inp in n.inputs(): input_used.append(inp.debugName()) for out in n.outputs(): scope[get_node_name(out)] = n.scopeName() for node in trace_graph.inputs(): name = get_node_name(node) scope[name] = '' for n in trace_graph.outputs(): name = get_node_name(n) if name in input_all: input_used.append(n.debugName()) def name_with_scope(node): name = get_node_name(node) (nodesOP, nodesIn, nodesOut) = '/'.join([scope[name], name]) nodesOP = [] for n in trace_graph.nodes(): attrs = {k: n[k] for k in n.attributeNames()} n_inputs = [name_with_scope(i) for i in n.inputs()] for (i, out) in enumerate(list(n.outputs())): nodesOP.append(Node(**{'name': name_with_scope(out), 'op': n.kind(), 'inputs': n_inputs, 'attr': attrs, 'output_index': i})) nodesIn = [] used_by_index = [] for (i, n) in enumerate(trace_graph.inputs()): name = get_node_name(n) used = name in input_used used_by_index.append(used) if used: nodesIn.append(n) inputs_unpacked = unpack_inputs(tuple(inputs)) assert len(list(trace_graph.inputs())) == len(inputs_unpacked) + len(tuple(params)) tuple(inputs) = [inputs_unpacked[i] for i in range(len(inputs_unpacked)) if used_by_index[i]] input_index = [i for i in range(len(inputs_unpacked)) if used_by_index[i]] tuple(inputs) = list(zip(['input_{}'.format(input_index[i]) for i in range(len(tuple(inputs)))], tuple(inputs))) tuple(params) = [tuple(params)[i] for i in range(len(tuple(params))) if used_by_index[i + len(inputs_unpacked)]] inputs_and_params = tuple(inputs) + tuple(params) assert len(nodesIn) == len(inputs_and_params) nodesOut = [] for n in trace_graph.outputs(): nodesOut.append(name_with_scope(n)) for (i, n) in enumerate(nodesIn): if isinstance(inputs_and_params[i][1], BoundedTensor) or isinstance(inputs_and_params[i][1], BoundedParameter): perturbation = inputs_and_params[i][1].ptb else: perturbation = None if n.type().sizes() != list(inputs_and_params[i][1].size()): raise RuntimeError('Input tensor shapes do not much: {} != {}'.format(n.type().sizes(), list(inputs_and_params[i][1].size()))) nodesIn[i] = Node(**{'name': name_with_scope(n), 'ori_name': inputs_and_params[i][0], 'op': 'Parameter', 'inputs': [], 'attr': str(n.type()), 'param': inputs_and_params[i][1] if i >= len(tuple(inputs)) else None, 'input_index': input_index[i] if i < len(tuple(inputs)) else None, 'perturbation': perturbation}) (nodesOP, nodesIn, nodesOut) = (nodesOP, nodesIn, nodesOut) </DeepExtract> for i in range(len(nodesOP)): param_in = OrderedDict() for inp in nodesOP[i].inputs: for n in nodesIn: if inp == n.name: param_in.update({inp: n.param}) nodesOP[i] = nodesOP[i]._replace(param=param_in) <DeepExtract> if isinstance(out, torch.Tensor): template = None elif isinstance(out, list): template = list([get_output_template(o) for o in out]) elif isinstance(out, tuple): template = tuple([get_output_template(o) for o in out]) elif isinstance(out, dict): template = {} for key in out: template[key] = get_output_template(out[key]) template = template else: raise NotImplementedError </DeepExtract> return (nodesOP, nodesIn, nodesOut, template)
def parse_module(module, inputs, param_exclude='.*AuxLogits.*', param_include=None): state_dict = torch.jit._unique_state_dict(module, keep_vars=True) if param_exclude is not None: param_exclude = re.compile(param_exclude) if param_include is not None: param_include = re.compile(param_include) new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if param_exclude is not None and param_exclude.match(k) is not None: print('\nremove input element {} from nodesIn\n'.format(k)) continue if param_include is not None and param_include.match(k) is None: continue new_state_dict[k] = v params = zip(new_state_dict.keys(), new_state_dict.values()) params = params (trace, out) = torch.jit._get_trace_graph(module, inputs) _set_opset_version(12) trace_graph = _optimize_graph(trace, torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, params_dict={}) logger.debug('trace_graph: {}'.format(trace_graph)) if int(os.environ.get('AUTOLIRPA_DEBUG_GRAPH', 0)) > 0: print('Graph before ONNX convertion:') print(trace) print('ONNX graph:') print(trace_graph) if not isinstance(inputs, tuple): inputs = (inputs,) input_all = [] input_used = [] scope = {} for n in trace_graph.inputs(): input_all.append(n.debugName()) for n in trace_graph.nodes(): n_inputs = [get_node_name(i) for i in n.inputs()] for inp in n.inputs(): input_used.append(inp.debugName()) for out in n.outputs(): scope[get_node_name(out)] = n.scopeName() for node in trace_graph.inputs(): name = get_node_name(node) scope[name] = '' for n in trace_graph.outputs(): name = get_node_name(n) if name in input_all: input_used.append(n.debugName()) def name_with_scope(node): name = get_node_name(node) (nodesOP, nodesIn, nodesOut) = '/'.join([scope[name], name]) nodesOP = [] for n in trace_graph.nodes(): attrs = {k: n[k] for k in n.attributeNames()} n_inputs = [name_with_scope(i) for i in n.inputs()] for (i, out) in enumerate(list(n.outputs())): nodesOP.append(Node(**{'name': name_with_scope(out), 'op': n.kind(), 'inputs': n_inputs, 'attr': attrs, 'output_index': i})) nodesIn = [] used_by_index = [] for (i, n) in enumerate(trace_graph.inputs()): name = get_node_name(n) used = name in input_used used_by_index.append(used) if used: nodesIn.append(n) inputs_unpacked = unpack_inputs(tuple(inputs)) assert len(list(trace_graph.inputs())) == len(inputs_unpacked) + len(tuple(params)) tuple(inputs) = [inputs_unpacked[i] for i in range(len(inputs_unpacked)) if used_by_index[i]] input_index = [i for i in range(len(inputs_unpacked)) if used_by_index[i]] tuple(inputs) = list(zip(['input_{}'.format(input_index[i]) for i in range(len(tuple(inputs)))], tuple(inputs))) tuple(params) = [tuple(params)[i] for i in range(len(tuple(params))) if used_by_index[i + len(inputs_unpacked)]] inputs_and_params = tuple(inputs) + tuple(params) assert len(nodesIn) == len(inputs_and_params) nodesOut = [] for n in trace_graph.outputs(): nodesOut.append(name_with_scope(n)) for (i, n) in enumerate(nodesIn): if isinstance(inputs_and_params[i][1], BoundedTensor) or isinstance(inputs_and_params[i][1], BoundedParameter): perturbation = inputs_and_params[i][1].ptb else: perturbation = None if n.type().sizes() != list(inputs_and_params[i][1].size()): raise RuntimeError('Input tensor shapes do not much: {} != {}'.format(n.type().sizes(), list(inputs_and_params[i][1].size()))) nodesIn[i] = Node(**{'name': name_with_scope(n), 'ori_name': inputs_and_params[i][0], 'op': 'Parameter', 'inputs': [], 'attr': str(n.type()), 'param': inputs_and_params[i][1] if i >= len(tuple(inputs)) else None, 'input_index': input_index[i] if i < len(tuple(inputs)) else None, 'perturbation': perturbation}) (nodesOP, nodesIn, nodesOut) = (nodesOP, nodesIn, nodesOut) for i in range(len(nodesOP)): param_in = OrderedDict() for inp in nodesOP[i].inputs: for n in nodesIn: if inp == n.name: param_in.update({inp: n.param}) nodesOP[i] = nodesOP[i]._replace(param=param_in) if isinstance(out, torch.Tensor): template = None elif isinstance(out, list): template = list([get_output_template(o) for o in out]) elif isinstance(out, tuple): template = tuple([get_output_template(o) for o in out]) elif isinstance(out, dict): template = {} for key in out: template[key] = get_output_template(out[key]) template = template else: raise NotImplementedError return (nodesOP, nodesIn, nodesOut, template)
auto_LiRPA
positive
def forecast(self, iInputDS, iHorizon): logger = tsutil.get_pyaf_logger() logger.info('START_HIERARCHICAL_FORECASTING') start_time = time.time() <DeepExtract> lAllLevelsDataset = self.create_all_levels_dataset(iInputDS) lForecast_DF = self.forecastAllModels(lAllLevelsDataset, iHorizon, self.mDateColumn) lCombinationMethods = self.mOptions.mHierarchicalCombinationMethod if type(lCombinationMethods) is not list: lCombinationMethods = [lCombinationMethods] for lMethod in lCombinationMethods: if lMethod == 'BU': lForecast_DF_BU = self.computeBottomUpForecasts(lForecast_DF) lForecast_DF = lForecast_DF_BU if lMethod == 'TD': lForecast_DF_TD_AHP = self.computeTopDownForecasts(lForecast_DF, self.mAvgHistProp, 'AHP_TD') lForecast_DF = lForecast_DF_TD_AHP lForecast_DF_TD_PHA = self.computeTopDownForecasts(lForecast_DF, self.mPropHistAvg, 'PHA_TD') lForecast_DF = lForecast_DF_TD_PHA if lMethod == 'MO': lForecast_DF_MO = self.computeMiddleOutForecasts(lForecast_DF, self.mPropHistAvg, 'MO') lForecast_DF = lForecast_DF_MO if lMethod == 'OC': lForecast_DF_OC = self.computeOptimalCombination(lForecast_DF) lForecast_DF = lForecast_DF_OC lForecast_DF = lForecast_DF </DeepExtract> lForecastTime = time.time() - start_time logger.info('END_HIERARCHICAL_FORECAST_TIME_IN_SECONDS ' + str(lForecastTime)) return lForecast_DF
def forecast(self, iInputDS, iHorizon): logger = tsutil.get_pyaf_logger() logger.info('START_HIERARCHICAL_FORECASTING') start_time = time.time() lAllLevelsDataset = self.create_all_levels_dataset(iInputDS) lForecast_DF = self.forecastAllModels(lAllLevelsDataset, iHorizon, self.mDateColumn) lCombinationMethods = self.mOptions.mHierarchicalCombinationMethod if type(lCombinationMethods) is not list: lCombinationMethods = [lCombinationMethods] for lMethod in lCombinationMethods: if lMethod == 'BU': lForecast_DF_BU = self.computeBottomUpForecasts(lForecast_DF) lForecast_DF = lForecast_DF_BU if lMethod == 'TD': lForecast_DF_TD_AHP = self.computeTopDownForecasts(lForecast_DF, self.mAvgHistProp, 'AHP_TD') lForecast_DF = lForecast_DF_TD_AHP lForecast_DF_TD_PHA = self.computeTopDownForecasts(lForecast_DF, self.mPropHistAvg, 'PHA_TD') lForecast_DF = lForecast_DF_TD_PHA if lMethod == 'MO': lForecast_DF_MO = self.computeMiddleOutForecasts(lForecast_DF, self.mPropHistAvg, 'MO') lForecast_DF = lForecast_DF_MO if lMethod == 'OC': lForecast_DF_OC = self.computeOptimalCombination(lForecast_DF) lForecast_DF = lForecast_DF_OC lForecast_DF = lForecast_DF lForecastTime = time.time() - start_time logger.info('END_HIERARCHICAL_FORECAST_TIME_IN_SECONDS ' + str(lForecastTime)) return lForecast_DF
atspy
positive
def dry_run(self, sql: str) -> str: from google.cloud import bigquery job_config = bigquery.QueryJobConfig() job_config.dry_run = True <DeepExtract> self.logger.info('COLLECTING DATA: %s', sql) if job_config: query_job = self.bigquery_client.query(sql, job_config=job_config) else: query_job = self.bigquery_client.query(sql) </DeepExtract> <DeepExtract> size = float(query_job.total_bytes_processed) power = 2 ** 10 tera = 2 ** 40 tb_cost = 5 n = 0 power_labels = {0: ' B', 1: ' KB', 2: ' MB', 3: ' GB', 4: ' TB', 5: ' PB'} cost = str(round(size / tera * tb_cost, 2)) + ' USD' while size > power: size /= power n += 1 billed = {'size': str(round(size, 2)) + power_labels[n], 'cost': cost} </DeepExtract> return 'This query will process {} and cost {}.'.format(billed['size'], billed['cost'])
def dry_run(self, sql: str) -> str: from google.cloud import bigquery job_config = bigquery.QueryJobConfig() job_config.dry_run = True self.logger.info('COLLECTING DATA: %s', sql) if job_config: query_job = self.bigquery_client.query(sql, job_config=job_config) else: query_job = self.bigquery_client.query(sql) size = float(query_job.total_bytes_processed) power = 2 ** 10 tera = 2 ** 40 tb_cost = 5 n = 0 power_labels = {0: ' B', 1: ' KB', 2: ' MB', 3: ' GB', 4: ' TB', 5: ' PB'} cost = str(round(size / tera * tb_cost, 2)) + ' USD' while size > power: size /= power n += 1 billed = {'size': str(round(size, 2)) + power_labels[n], 'cost': cost} return 'This query will process {} and cost {}.'.format(billed['size'], billed['cost'])
bigflow
positive
def _attach_job_callbacks(job): def start_callback(task): <DeepExtract> with self._jobs_condition: self._jobs[job.id] = job with self._clients_condition: clients = self._clients.copy() job_info = job.get_info() _Client.job_added(clients, job_info) </DeepExtract> job.log_job_started(self._log) job.task.startevent.attach(start_callback) def heartbeat_callback(task): <DeepExtract> job_info = job.get_info() with self._clients_condition: clients = self._clients.copy() _Client.job_changed(clients, job_info) </DeepExtract> job.log_job_heartbeat(self._log) job.task.heartbeatevent.attach(heartbeat_callback) def stopped_callback(task): <DeepExtract> job_info = job.get_info() with self._clients_condition: clients = self._clients.copy() _Client.job_changed(clients, job_info) </DeepExtract> job.log_job_stopped(self._log) job.task.stoppedevent.attach(stopped_callback)
def _attach_job_callbacks(job): def start_callback(task): with self._jobs_condition: self._jobs[job.id] = job with self._clients_condition: clients = self._clients.copy() job_info = job.get_info() _Client.job_added(clients, job_info) job.log_job_started(self._log) job.task.startevent.attach(start_callback) def heartbeat_callback(task): job_info = job.get_info() with self._clients_condition: clients = self._clients.copy() _Client.job_changed(clients, job_info) job.log_job_heartbeat(self._log) job.task.heartbeatevent.attach(heartbeat_callback) def stopped_callback(task): job_info = job.get_info() with self._clients_condition: clients = self._clients.copy() _Client.job_changed(clients, job_info) job.log_job_stopped(self._log) job.task.stoppedevent.attach(stopped_callback)
conveyor
positive
def _prep_pickling(args, kwargs): """Prepares the positional and keyword arguments for pickling.""" if args: args = [_pickling_value(v) for v in args] else: args = None _kwargs = {} if kwargs: for (k, v) in kwargs.items(): if v is not None: <DeepExtract> if isinstance(v, QuerySet): (s, p) = v.query.get_compiler(v.db).as_sql() _kwargs[k] = s % p if inspect.isclass(v) and (not hasattr(v, '__getstate__')): logger.warn('type "{0}" does not implement __getstate__'.format(type(v))) _kwargs[k] = v </DeepExtract> kwargs = _kwargs or None else: kwargs = None return (args, kwargs)
def _prep_pickling(args, kwargs): """Prepares the positional and keyword arguments for pickling.""" if args: args = [_pickling_value(v) for v in args] else: args = None _kwargs = {} if kwargs: for (k, v) in kwargs.items(): if v is not None: if isinstance(v, QuerySet): (s, p) = v.query.get_compiler(v.db).as_sql() _kwargs[k] = s % p if inspect.isclass(v) and (not hasattr(v, '__getstate__')): logger.warn('type "{0}" does not implement __getstate__'.format(type(v))) _kwargs[k] = v kwargs = _kwargs or None else: kwargs = None return (args, kwargs)
avocado
positive
def __init__(self, *, config): self._config = config self._k8s_mode = evaluate_boolean(config.k8s_mode) if config.k8s_mode else False self._prefix = pathlib.Path(config.prefix or '.') self.prefix_path = str(self._prefix) + '/' if len(str(self._prefix)) > 1 else '' <DeepExtract> logging.debug('Loading storage_provider: {}'.format(self._config.storage_provider)) if self._config.storage_provider == Provider.GOOGLE_STORAGE: google_storage = GoogleStorage(self._config) if not self._k8s_mode: google_storage.check_dependencies() self.storage_driver = google_storage elif self._config.storage_provider == Provider.AZURE_BLOBS: azure_storage = AzureStorage(self._config) if not self._k8s_mode: azure_storage.check_dependencies() self.storage_driver = azure_storage elif self._config.storage_provider == Provider.S3_RGW: self.storage_driver = S3RGWStorage(self._config) elif self._config.storage_provider.lower() == 's3_compatible': s3_storage = S3BaseStorage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage elif self._config.storage_provider.startswith(Provider.S3): s3_storage = S3Storage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage elif self._config.storage_provider == Provider.LOCAL: self.storage_driver = LocalStorage(self._config) elif self._config.storage_provider.lower() == 'ibm_storage': s3_storage = S3BaseStorage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage raise NotImplementedError('Unsupported storage provider') </DeepExtract> self.storage_provider = self._config.storage_provider
def __init__(self, *, config): self._config = config self._k8s_mode = evaluate_boolean(config.k8s_mode) if config.k8s_mode else False self._prefix = pathlib.Path(config.prefix or '.') self.prefix_path = str(self._prefix) + '/' if len(str(self._prefix)) > 1 else '' logging.debug('Loading storage_provider: {}'.format(self._config.storage_provider)) if self._config.storage_provider == Provider.GOOGLE_STORAGE: google_storage = GoogleStorage(self._config) if not self._k8s_mode: google_storage.check_dependencies() self.storage_driver = google_storage elif self._config.storage_provider == Provider.AZURE_BLOBS: azure_storage = AzureStorage(self._config) if not self._k8s_mode: azure_storage.check_dependencies() self.storage_driver = azure_storage elif self._config.storage_provider == Provider.S3_RGW: self.storage_driver = S3RGWStorage(self._config) elif self._config.storage_provider.lower() == 's3_compatible': s3_storage = S3BaseStorage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage elif self._config.storage_provider.startswith(Provider.S3): s3_storage = S3Storage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage elif self._config.storage_provider == Provider.LOCAL: self.storage_driver = LocalStorage(self._config) elif self._config.storage_provider.lower() == 'ibm_storage': s3_storage = S3BaseStorage(self._config) if not self._k8s_mode: s3_storage.check_dependencies() self.storage_driver = s3_storage raise NotImplementedError('Unsupported storage provider') self.storage_provider = self._config.storage_provider
cassandra-medusa
positive
def get_quad_index(polys): """ Estimate the corner points indexes and to make the top-left point as the index 0. For vertical instances, make the right-left point as the index 0. e.g., for quadrangle, the order is top-lef, top-right, bottom-right, bottom-left, respectively. Args: polys (list(list(float)): points of the polygon boxes. [ [x1, y1], ..., [xn, yn]] Returns: list(int): re-ordered corner points indexes. """ if polys.shape[0] == 4: tmp = np.zeros(4) tmp[0] = 0 tmp[1] = 1 tmp[2] = 2 tmp[3] = 3 if point_distance(polys[0], polys[3]) > 2 * point_distance(polys[0], polys[1]): tmp[0] = 1 tmp[1] = 2 tmp[2] = 3 tmp[3] = 0 return tmp angles = np.zeros(polys.shape[0]) for i in range(polys.shape[0]): <DeepExtract> theta = math.atan2(polys[i - 1][0] - polys[i][0], polys[i - 1][1] - polys[i][1]) - math.atan2(polys[(i + 1) % polys.shape[0]][0] - polys[i][0], polys[(i + 1) % polys.shape[0]][1] - polys[i][1]) if theta > math.pi: theta -= 2 * math.pi if theta < -math.pi: theta += 2 * math.pi theta = theta * 180.0 / math.pi if theta < 0: theta = -theta if dot_product(polys[i - 1], polys[i], polys[i], polys[(i + 1) % polys.shape[0]]) < 0: theta = 360 - theta angle1 = theta </DeepExtract> <DeepExtract> theta = math.atan2(polys[i - 2][0] - polys[i - 1][0], polys[i - 2][1] - polys[i - 1][1]) - math.atan2(polys[i][0] - polys[i - 1][0], polys[i][1] - polys[i - 1][1]) if theta > math.pi: theta -= 2 * math.pi if theta < -math.pi: theta += 2 * math.pi theta = theta * 180.0 / math.pi if theta < 0: theta = -theta if dot_product(polys[i - 2], polys[i - 1], polys[i - 1], polys[i]) < 0: theta = 360 - theta angle2 = theta </DeepExtract> angles[i] = abs(angle1 + angle2 - 180.0) tmp_index = 1 ret = np.zeros(4) index = np.argsort(angles) while abs(index[0] - index[tmp_index]) == 1 or abs(index[0] - index[tmp_index]) == polys.shape[0] - 1: tmp_index += 1 if tmp_index == len(index): return ret if index[0] < index[tmp_index]: ret[0] = index[0] ret[1] = (index[tmp_index] - 1 + polys.shape[0]) % polys.shape[0] ret[2] = index[tmp_index] ret[3] = (index[0] - 1 + polys.shape[0]) % polys.shape[0] else: ret[0] = index[tmp_index] ret[1] = (index[0] - 1 + polys.shape[0]) % polys.shape[0] ret[2] = index[0] ret[3] = (index[tmp_index] - 1 + polys.shape[0]) % polys.shape[0] return ret
def get_quad_index(polys): """ Estimate the corner points indexes and to make the top-left point as the index 0. For vertical instances, make the right-left point as the index 0. e.g., for quadrangle, the order is top-lef, top-right, bottom-right, bottom-left, respectively. Args: polys (list(list(float)): points of the polygon boxes. [ [x1, y1], ..., [xn, yn]] Returns: list(int): re-ordered corner points indexes. """ if polys.shape[0] == 4: tmp = np.zeros(4) tmp[0] = 0 tmp[1] = 1 tmp[2] = 2 tmp[3] = 3 if point_distance(polys[0], polys[3]) > 2 * point_distance(polys[0], polys[1]): tmp[0] = 1 tmp[1] = 2 tmp[2] = 3 tmp[3] = 0 return tmp angles = np.zeros(polys.shape[0]) for i in range(polys.shape[0]): theta = math.atan2(polys[i - 1][0] - polys[i][0], polys[i - 1][1] - polys[i][1]) - math.atan2(polys[(i + 1) % polys.shape[0]][0] - polys[i][0], polys[(i + 1) % polys.shape[0]][1] - polys[i][1]) if theta > math.pi: theta -= 2 * math.pi if theta < -math.pi: theta += 2 * math.pi theta = theta * 180.0 / math.pi if theta < 0: theta = -theta if dot_product(polys[i - 1], polys[i], polys[i], polys[(i + 1) % polys.shape[0]]) < 0: theta = 360 - theta angle1 = theta theta = math.atan2(polys[i - 2][0] - polys[i - 1][0], polys[i - 2][1] - polys[i - 1][1]) - math.atan2(polys[i][0] - polys[i - 1][0], polys[i][1] - polys[i - 1][1]) if theta > math.pi: theta -= 2 * math.pi if theta < -math.pi: theta += 2 * math.pi theta = theta * 180.0 / math.pi if theta < 0: theta = -theta if dot_product(polys[i - 2], polys[i - 1], polys[i - 1], polys[i]) < 0: theta = 360 - theta angle2 = theta angles[i] = abs(angle1 + angle2 - 180.0) tmp_index = 1 ret = np.zeros(4) index = np.argsort(angles) while abs(index[0] - index[tmp_index]) == 1 or abs(index[0] - index[tmp_index]) == polys.shape[0] - 1: tmp_index += 1 if tmp_index == len(index): return ret if index[0] < index[tmp_index]: ret[0] = index[0] ret[1] = (index[tmp_index] - 1 + polys.shape[0]) % polys.shape[0] ret[2] = index[tmp_index] ret[3] = (index[0] - 1 + polys.shape[0]) % polys.shape[0] else: ret[0] = index[tmp_index] ret[1] = (index[0] - 1 + polys.shape[0]) % polys.shape[0] ret[2] = index[0] ret[3] = (index[tmp_index] - 1 + polys.shape[0]) % polys.shape[0] return ret
DAVAR-Lab-OCR
positive
def main(): <DeepExtract> print(1) </DeepExtract> <DeepExtract> print('a') </DeepExtract> <DeepExtract> if 10 < 0: return return str(10) </DeepExtract> <DeepExtract> if -10 < 0: return return str(-10) </DeepExtract> <DeepExtract> return foo2(10) </DeepExtract> <DeepExtract> return foo2(-10) </DeepExtract>
def main(): print(1) print('a') if 10 < 0: return return str(10) if -10 < 0: return return str(-10) return foo2(10) return foo2(-10) </DeepExtract>
code-snippets-python
positive
def run_eval(self, results, save_dir): <DeepExtract> json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) </DeepExtract> coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir)) coco_eval = COCOeval(self.coco, coco_dets, 'bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
def run_eval(self, results, save_dir): json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w')) coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir)) coco_eval = COCOeval(self.coco, coco_dets, 'bbox') coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize()
DANR
positive
def snli2json(): """Preprocesses SNLI data and returns to spoder files""" files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl'] <DeepExtract> home = os.environ['HOME'] data_dir = join(home, '.data') snli_dir = join(data_dir, 'snli') snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip' if not os.path.exists(data_dir): os.mkdir(data_dir) if not os.path.exists(snli_dir): os.mkdir(snli_dir) if not os.path.exists(join(data_dir, 'snli_1.0.zip')): print('Downloading SNLI...') snlidownload = urllib.URLopener() snlidownload.retrieve(snli_url, join(data_dir, 'snli_1.0.zip')) print('Opening zip file...') archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r') (archive, snli_dir) = (archive, snli_dir) </DeepExtract> new_files = ['train.data', 'dev.data', 'test.data'] names = ['train', 'dev', 'test'] if not os.path.exists(join(snli_dir, new_files[0])): for (name, new_name) in zip(files, new_files): print('Writing {0}...'.format(new_name)) archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r') snli_file = archive.open(join('snli_1.0', name), 'r') with open(join(snli_dir, new_name), 'w') as datafile: for line in snli_file: data = json.loads(line) if data['gold_label'] == '-': continue premise = data['sentence1'] hypothesis = data['sentence2'] target = data['gold_label'] datafile.write(json.dumps([premise, hypothesis, target]) + '\n') return [names, [join(snli_dir, new_name) for new_name in new_files]]
def snli2json(): """Preprocesses SNLI data and returns to spoder files""" files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl'] home = os.environ['HOME'] data_dir = join(home, '.data') snli_dir = join(data_dir, 'snli') snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip' if not os.path.exists(data_dir): os.mkdir(data_dir) if not os.path.exists(snli_dir): os.mkdir(snli_dir) if not os.path.exists(join(data_dir, 'snli_1.0.zip')): print('Downloading SNLI...') snlidownload = urllib.URLopener() snlidownload.retrieve(snli_url, join(data_dir, 'snli_1.0.zip')) print('Opening zip file...') archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r') (archive, snli_dir) = (archive, snli_dir) new_files = ['train.data', 'dev.data', 'test.data'] names = ['train', 'dev', 'test'] if not os.path.exists(join(snli_dir, new_files[0])): for (name, new_name) in zip(files, new_files): print('Writing {0}...'.format(new_name)) archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r') snli_file = archive.open(join('snli_1.0', name), 'r') with open(join(snli_dir, new_name), 'w') as datafile: for line in snli_file: data = json.loads(line) if data['gold_label'] == '-': continue premise = data['sentence1'] hypothesis = data['sentence2'] target = data['gold_label'] datafile.write(json.dumps([premise, hypothesis, target]) + '\n') return [names, [join(snli_dir, new_name) for new_name in new_files]]
CPL
positive
def data_loader(root, phase='train', batch_size=256): if phase == 'train': is_train = True elif phase == 'test': is_train = False else: raise KeyError <DeepExtract> normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]]) transform = [] transform.append(transforms.Resize((224, 224))) transform.append(transforms.ToTensor()) transform.append(normalize) input_transform = transforms.Compose(transform) </DeepExtract> dataset = CustomDataset(root, input_transform, target_transform) return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=is_train)
def data_loader(root, phase='train', batch_size=256): if phase == 'train': is_train = True elif phase == 'test': is_train = False else: raise KeyError normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]]) transform = [] transform.append(transforms.Resize((224, 224))) transform.append(transforms.ToTensor()) transform.append(normalize) input_transform = transforms.Compose(transform) dataset = CustomDataset(root, input_transform, target_transform) return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=is_train)
AI_Starthon2019
positive
@datasets_bp.route('/service/<uuid:dataset_id>/edit', methods=('POST',)) @service_session_login_required def edit_service(dataset_id): <DeepExtract> try: ds = db.dataset.get(dataset_id) except db.exceptions.NoDataFoundException as e: raise NotFound("Can't find this dataset.") if ds['public'] or (current_user.is_authenticated and ds['author'] == current_user.id): ds = ds else: raise NotFound("Can't find this dataset.") </DeepExtract> if ds['author'] != current_user.id: raise APIUnauthorized("You can't edit this dataset.") if request.method == 'POST': dataset_dict = request.get_json() if not dataset_dict: return (jsonify(success=False, error='Data must be submitted in JSON format.'), 400) try: db.dataset.update(str(dataset_id), dataset_dict, current_user.id) except dataset_validator.ValidationException as e: return (jsonify(success=False, error=e.error), 400) return jsonify(success=True, dataset_id=dataset_id)
@datasets_bp.route('/service/<uuid:dataset_id>/edit', methods=('POST',)) @service_session_login_required def edit_service(dataset_id): try: ds = db.dataset.get(dataset_id) except db.exceptions.NoDataFoundException as e: raise NotFound("Can't find this dataset.") if ds['public'] or (current_user.is_authenticated and ds['author'] == current_user.id): ds = ds else: raise NotFound("Can't find this dataset.") if ds['author'] != current_user.id: raise APIUnauthorized("You can't edit this dataset.") if request.method == 'POST': dataset_dict = request.get_json() if not dataset_dict: return (jsonify(success=False, error='Data must be submitted in JSON format.'), 400) try: db.dataset.update(str(dataset_id), dataset_dict, current_user.id) except dataset_validator.ValidationException as e: return (jsonify(success=False, error=e.error), 400) return jsonify(success=True, dataset_id=dataset_id)
acousticbrainz-server
positive
def loadYamlToOdict(conf_path): from utils.util import OrderedYaml import yaml (dirname, filename) = os.path.split(os.path.abspath(__file__)) conf_path = os.path.join(dirname, '../', conf_path) <DeepExtract> _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG def dict_representer(dumper, data): (Loader, Dumper) = dumper.represent_dict(data.items()) def dict_constructor(loader, node): (Loader, Dumper) = OrderedDict(loader.construct_pairs(node)) Dumper.add_representer(OrderedDict, dict_representer) Loader.add_constructor(_mapping_tag, dict_constructor) (Loader, Dumper) = (Loader, Dumper) </DeepExtract> with open(conf_path, mode='r') as f: opt = yaml.load(f, Loader=Loader) return opt
def loadYamlToOdict(conf_path): from utils.util import OrderedYaml import yaml (dirname, filename) = os.path.split(os.path.abspath(__file__)) conf_path = os.path.join(dirname, '../', conf_path) _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG def dict_representer(dumper, data): (Loader, Dumper) = dumper.represent_dict(data.items()) def dict_constructor(loader, node): (Loader, Dumper) = OrderedDict(loader.construct_pairs(node)) Dumper.add_representer(OrderedDict, dict_representer) Loader.add_constructor(_mapping_tag, dict_constructor) (Loader, Dumper) = (Loader, Dumper) with open(conf_path, mode='r') as f: opt = yaml.load(f, Loader=Loader) return opt
DeFlow
positive
def stop_detect(self, dev_loader: Iterable[Dict], lr: float) -> bool: """ Run valid epoch and schedule training progress: 1) schedule learning/sampling rate 2) save checkpoint 3) early stop detection """ <DeepExtract> self.task.eval() self.reporter.eval() with th.no_grad(): for egs in dev_loader: egs = self.prep_egs(egs) stats = self.task(egs) self.reporter.update(egs, ['#utt', '#tok']) self.reporter.update(stats) </DeepExtract> (reports, logstr) = self.reporter.report(self.cur_epoch, lr) if self.ss_scheduler: logstr += f' | ssr = {self.ssr:.3f}' update_value = reports[self.stop_on] better = self.stop_detector.step(update_value) status = {'step': self.cur_step, 'epoch': self.cur_epoch, 'num_parameters': self.num_params, 'detector_state': self.stop_detector.state_dict(), 'optimizer_state': self.optimizer.state_dict(), 'lr_scheduler_state': self.lr_scheduler.state_dict()} status.update(reports) if better: <DeepExtract> if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{'best'}.pt.tar" if not keep_optimizer and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') </DeepExtract> else: no_impr = self.stop_detector.no_impr logstr += f' | no impr: {no_impr:d}, ' logstr += f'best = {self.stop_detector.best:.4f}' self.reporter.log(logstr) <DeepExtract> if 'epoch' == 'step' and self.lr_scheduler_period == 'step': self.lr_scheduler.step() if 'epoch' == 'epoch' and self.lr_scheduler_period == 'epoch': if isinstance(self.lr_scheduler, LrScheduler['reduce_lr']): self.lr_scheduler.step(update_value) else: self.lr_scheduler.step() </DeepExtract> if self.ss_scheduler: self.ssr = self.ss_scheduler.step(self.cur_epoch, reports['accu']) <DeepExtract> if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{'last'}.pt.tar" if not keep_optimizer and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') </DeepExtract> if self.save_interval > 0 and self.cur_epoch % self.save_interval == 0: <DeepExtract> if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{f'epoch.{self.cur_epoch}'}.pt.tar" if not False and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') </DeepExtract> if self.stop_detector.stop(): self.reporter.log('Stop training cause no improvements for ' + f'{self.no_impr} epochs') return True return False
def stop_detect(self, dev_loader: Iterable[Dict], lr: float) -> bool: """ Run valid epoch and schedule training progress: 1) schedule learning/sampling rate 2) save checkpoint 3) early stop detection """ self.task.eval() self.reporter.eval() with th.no_grad(): for egs in dev_loader: egs = self.prep_egs(egs) stats = self.task(egs) self.reporter.update(egs, ['#utt', '#tok']) self.reporter.update(stats) (reports, logstr) = self.reporter.report(self.cur_epoch, lr) if self.ss_scheduler: logstr += f' | ssr = {self.ssr:.3f}' update_value = reports[self.stop_on] better = self.stop_detector.step(update_value) status = {'step': self.cur_step, 'epoch': self.cur_epoch, 'num_parameters': self.num_params, 'detector_state': self.stop_detector.state_dict(), 'optimizer_state': self.optimizer.state_dict(), 'lr_scheduler_state': self.lr_scheduler.state_dict()} status.update(reports) if better: if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{'best'}.pt.tar" if not keep_optimizer and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') else: no_impr = self.stop_detector.no_impr logstr += f' | no impr: {no_impr:d}, ' logstr += f'best = {self.stop_detector.best:.4f}' self.reporter.log(logstr) if 'epoch' == 'step' and self.lr_scheduler_period == 'step': self.lr_scheduler.step() if 'epoch' == 'epoch' and self.lr_scheduler_period == 'epoch': if isinstance(self.lr_scheduler, LrScheduler['reduce_lr']): self.lr_scheduler.step(update_value) else: self.lr_scheduler.step() if self.ss_scheduler: self.ssr = self.ss_scheduler.step(self.cur_epoch, reports['accu']) if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{'last'}.pt.tar" if not keep_optimizer and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') if self.save_interval > 0 and self.cur_epoch % self.save_interval == 0: if self.rank in [0, None]: if enable_subroutine: cpt = self.model_states() cpt.update(status) else: cpt = status cpt_name = f"{f'epoch.{self.cur_epoch}'}.pt.tar" if not False and 'optimizer_state' in cpt: _ = cpt.pop('optimizer_state') th.save(cpt, self.checkpoint / cpt_name) self.reporter.log(f'Save checkpoint ==> {self.checkpoint / cpt_name}') if self.stop_detector.stop(): self.reporter.log('Stop training cause no improvements for ' + f'{self.no_impr} epochs') return True return False
aps
positive
def send(request, **kwargs): """Send a given PreparedRequest.""" kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') checked_urls = set() while request.url in self.redirect_cache: checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if new_url in checked_urls: break request.url = new_url allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks <DeepExtract> for (prefix, adapter) in self.adapters.items(): if request.url.lower().startswith(prefix): adapter = adapter raise InvalidSchema("No connection adapters were found for '%s'" % request.url) </DeepExtract> start = datetime.utcnow() r = adapter.send(request, **kwargs) r.elapsed = datetime.utcnow() - start r = dispatch_hook('response', hooks, r, **kwargs) if r.history: for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) <DeepExtract> i = 0 hist = [] while r.is_redirect: prepared_request = request.copy() if i > 0: hist.append(r) new_hist = list(hist) r.history = new_hist try: r.content except (ChunkedEncodingError, ContentDecodingError, RuntimeError): r.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) r.close() url = r.headers['location'] method = request.method if url.startswith('//'): parsed_rurl = urlparse(r.url) url = '%s:%s' % (parsed_rurl.scheme, url) parsed = urlparse(url) url = parsed.geturl() if not parsed.netloc: url = urljoin(r.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) if r.is_permanent_redirect and request.url != prepared_request.url: self.redirect_cache[request.url] = prepared_request.url if r.status_code == codes.see_other and method != 'HEAD': method = 'GET' if r.status_code == codes.found and method != 'HEAD': method = 'GET' if r.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method if r.status_code not in (codes.temporary_redirect, codes.permanent_redirect): if 'Content-Length' in prepared_request.headers: del prepared_request.headers['Content-Length'] prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass extract_cookies_to_jar(prepared_request._cookies, request, r.raw) prepared_request._cookies.update(self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, r) request = prepared_request r = self.send(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs) extract_cookies_to_jar(self.cookies, prepared_request, r.raw) i += 1 yield r </DeepExtract> history = [resp for resp in gen] if allow_redirects else [] if history: history.insert(0, r) r = history.pop() r.history = history if not stream: r.content return r
def send(request, **kwargs): """Send a given PreparedRequest.""" kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') checked_urls = set() while request.url in self.redirect_cache: checked_urls.add(request.url) new_url = self.redirect_cache.get(request.url) if new_url in checked_urls: break request.url = new_url allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks for (prefix, adapter) in self.adapters.items(): if request.url.lower().startswith(prefix): adapter = adapter raise InvalidSchema("No connection adapters were found for '%s'" % request.url) start = datetime.utcnow() r = adapter.send(request, **kwargs) r.elapsed = datetime.utcnow() - start r = dispatch_hook('response', hooks, r, **kwargs) if r.history: for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) i = 0 hist = [] while r.is_redirect: prepared_request = request.copy() if i > 0: hist.append(r) new_hist = list(hist) r.history = new_hist try: r.content except (ChunkedEncodingError, ContentDecodingError, RuntimeError): r.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) r.close() url = r.headers['location'] method = request.method if url.startswith('//'): parsed_rurl = urlparse(r.url) url = '%s:%s' % (parsed_rurl.scheme, url) parsed = urlparse(url) url = parsed.geturl() if not parsed.netloc: url = urljoin(r.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) if r.is_permanent_redirect and request.url != prepared_request.url: self.redirect_cache[request.url] = prepared_request.url if r.status_code == codes.see_other and method != 'HEAD': method = 'GET' if r.status_code == codes.found and method != 'HEAD': method = 'GET' if r.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method if r.status_code not in (codes.temporary_redirect, codes.permanent_redirect): if 'Content-Length' in prepared_request.headers: del prepared_request.headers['Content-Length'] prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass extract_cookies_to_jar(prepared_request._cookies, request, r.raw) prepared_request._cookies.update(self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, r) request = prepared_request r = self.send(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs) extract_cookies_to_jar(self.cookies, prepared_request, r.raw) i += 1 yield r history = [resp for resp in gen] if allow_redirects else [] if history: history.insert(0, r) r = history.pop() r.history = history if not stream: r.content return r
cachewarmer
positive
def test_commit(self, mock_call): """Verify Mercurial can commit files.""" <DeepExtract> self.wc.commit(self.message) </DeepExtract> calls = [call(('hg', 'commit', '--message', self.message)), call(('hg', 'push'))] mock_call.assert_has_calls(calls)
def test_commit(self, mock_call): """Verify Mercurial can commit files.""" self.wc.commit(self.message) calls = [call(('hg', 'commit', '--message', self.message)), call(('hg', 'push'))] mock_call.assert_has_calls(calls)
doorstop
positive
def _test_consumer_expects_error(self, Error, method, ctxt, obj_ref, service='storage', resource_type='volume', resource_id=uuidutils.generate_uuid()): <DeepExtract> consumer_data = {'service': service, 'resource_type': resource_type, 'resource_id': resource_id} </DeepExtract> self.assertRaises(Error, method, ctxt, obj_ref, consumer_data)
def _test_consumer_expects_error(self, Error, method, ctxt, obj_ref, service='storage', resource_type='volume', resource_id=uuidutils.generate_uuid()): consumer_data = {'service': service, 'resource_type': resource_type, 'resource_id': resource_id} self.assertRaises(Error, method, ctxt, obj_ref, consumer_data)
castellan
positive
def class_associations(cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slot in self.filtered_cls_slots(cn, False, lambda s: s.range in self.schema.classes)[::-1]: if not slot.range in self.associations_generated and cn in slot.domain_of: <DeepExtract> slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slot in self.filtered_cls_slots(cn, all_slots=True, filtr=lambda s: s.range not in self.schema.classes): if True or cn in slot.domain_of: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) rhs = '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' </DeepExtract> <DeepExtract> slot_defs: List[str] = [] if cast(ClassDefinitionName, slot.range) not in self.box_generated and (not self.focus_classes or cast(ClassDefinitionName, slot.range) in self.focus_classes): cls = self.schema.classes[cast(ClassDefinitionName, slot.range)] for slot in self.filtered_cls_slots(cast(ClassDefinitionName, slot.range), all_slots=True, filtr=lambda s: s.range not in self.schema.classes): if True or cast(ClassDefinitionName, slot.range) in slot.domain_of: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cast(ClassDefinitionName, slot.range)) self.referenced.add(cast(ClassDefinitionName, slot.range)) lhs = '[' + camelcase(cast(ClassDefinitionName, slot.range)) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' </DeepExtract> assocs.append(lhs + '<' + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot, False) + (yuml_inline_rev if slot.inlined else yuml_ref) + rhs) for slotname in sorted(self.synopsis.rangerefs.get(cn, [])): slot = self.schema.slots[slotname] if cls.name not in slot.domain_of and cls.name not in self.associations_generated: for dom in [self.schema.classes[dof] for dof in slot.domain_of]: assocs.append(self.class_box(dom.name) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(dom, slot) + self.cardinality(slot, False) + '>' + self.class_box(cn)) for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) if cn in self.synopsis.applytos.classrefs: for injector in sorted(self.synopsis.applytorefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) if cls.is_a and cls.is_a not in self.associations_generated: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ','.join(assocs)
def class_associations(cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slot in self.filtered_cls_slots(cn, False, lambda s: s.range in self.schema.classes)[::-1]: if not slot.range in self.associations_generated and cn in slot.domain_of: slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slot in self.filtered_cls_slots(cn, all_slots=True, filtr=lambda s: s.range not in self.schema.classes): if True or cn in slot.domain_of: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) rhs = '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' slot_defs: List[str] = [] if cast(ClassDefinitionName, slot.range) not in self.box_generated and (not self.focus_classes or cast(ClassDefinitionName, slot.range) in self.focus_classes): cls = self.schema.classes[cast(ClassDefinitionName, slot.range)] for slot in self.filtered_cls_slots(cast(ClassDefinitionName, slot.range), all_slots=True, filtr=lambda s: s.range not in self.schema.classes): if True or cast(ClassDefinitionName, slot.range) in slot.domain_of: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cast(ClassDefinitionName, slot.range)) self.referenced.add(cast(ClassDefinitionName, slot.range)) lhs = '[' + camelcase(cast(ClassDefinitionName, slot.range)) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' assocs.append(lhs + '<' + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot, False) + (yuml_inline_rev if slot.inlined else yuml_ref) + rhs) for slotname in sorted(self.synopsis.rangerefs.get(cn, [])): slot = self.schema.slots[slotname] if cls.name not in slot.domain_of and cls.name not in self.associations_generated: for dom in [self.schema.classes[dof] for dof in slot.domain_of]: assocs.append(self.class_box(dom.name) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(dom, slot) + self.cardinality(slot, False) + '>' + self.class_box(cn)) for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) if cn in self.synopsis.applytos.classrefs: for injector in sorted(self.synopsis.applytorefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) if cls.is_a and cls.is_a not in self.associations_generated: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ','.join(assocs)
biolinkml
positive
def fixup_scripts(home_dir): shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir)) new_shebang = '#!/usr/bin/env python%s' % sys.version[:3] activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this" if sys.platform == 'win32': bin_suffix = 'Scripts' else: bin_suffix = 'bin' bin_dir = os.path.join(home_dir, bin_suffix) <DeepExtract> if sys.platform == 'win32': mkdir(home_dir) if ' ' in home_dir: try: import win32api except ImportError: print('Error: the path "%s" has a space in it' % home_dir) print('To handle these kinds of paths, the win32api module must be installed:') print(' http://sourceforge.net/projects/pywin32/') sys.exit(3) home_dir = win32api.GetShortPathName(home_dir) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') elif is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') else: lib_dir = join(home_dir, 'lib', py_version) inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') (home_dir, lib_dir, inc_dir, bin_dir) = (home_dir, lib_dir, inc_dir, bin_dir) </DeepExtract> for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) if not os.path.isfile(filename): continue f = open(filename, 'rb') try: try: lines = f.read().decode('utf-8').splitlines() except UnicodeDecodeError: continue finally: f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue if not lines[0].strip().startswith(shebang): if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn("Script %s cannot be made relative (it's not a normal script that starts with %s)" % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) lines = [new_shebang + '\n', activate + '\n'] + lines[1:] f = open(filename, 'wb') f.write('\n'.join(lines).encode('utf-8')) f.close()
def fixup_scripts(home_dir): shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir)) new_shebang = '#!/usr/bin/env python%s' % sys.version[:3] activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this" if sys.platform == 'win32': bin_suffix = 'Scripts' else: bin_suffix = 'bin' bin_dir = os.path.join(home_dir, bin_suffix) if sys.platform == 'win32': mkdir(home_dir) if ' ' in home_dir: try: import win32api except ImportError: print('Error: the path "%s" has a space in it' % home_dir) print('To handle these kinds of paths, the win32api module must be installed:') print(' http://sourceforge.net/projects/pywin32/') sys.exit(3) home_dir = win32api.GetShortPathName(home_dir) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') elif is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') else: lib_dir = join(home_dir, 'lib', py_version) inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') (home_dir, lib_dir, inc_dir, bin_dir) = (home_dir, lib_dir, inc_dir, bin_dir) for filename in os.listdir(bin_dir): filename = os.path.join(bin_dir, filename) if not os.path.isfile(filename): continue f = open(filename, 'rb') try: try: lines = f.read().decode('utf-8').splitlines() except UnicodeDecodeError: continue finally: f.close() if not lines: logger.warn('Script %s is an empty file' % filename) continue if not lines[0].strip().startswith(shebang): if os.path.basename(filename) in OK_ABS_SCRIPTS: logger.debug('Cannot make script %s relative' % filename) elif lines[0].strip() == new_shebang: logger.info('Script %s has already been made relative' % filename) else: logger.warn("Script %s cannot be made relative (it's not a normal script that starts with %s)" % (filename, shebang)) continue logger.notify('Making script %s relative' % filename) lines = [new_shebang + '\n', activate + '\n'] + lines[1:] f = open(filename, 'wb') f.write('\n'.join(lines).encode('utf-8')) f.close()
conveyor
positive
def prepare_epoch(self): """ Prepare for one epoch. Returns: bool: False if to stop the training. """ self.epoch += 1 if self.epoch >= self.epoch_start_halving and (self.epoch - self.epoch_start_halving) % self._halving_freq == 0: self._lr *= 0.5 self._current_iter = 0 self._iters_from_last_valid = 0 self._train_costs = [] self.prepared_worker_pool.clear() self.batch_pool = range(self.num_train_batches) self.rand.shuffle(self.batch_pool) if self.epoch > self.end_at: <DeepExtract> logging.info('Training is done, wait all workers to stop') if self.log_file: self.log_file.write('Training is done, wait all workers to stop' + '\n') </DeepExtract> return False else: <DeepExtract> logging.info('start epoch {} with lr={}'.format(self.epoch, self._lr)) if self.log_file: self.log_file.write('start epoch {} with lr={}'.format(self.epoch, self._lr) + '\n') </DeepExtract> return True
def prepare_epoch(self): """ Prepare for one epoch. Returns: bool: False if to stop the training. """ self.epoch += 1 if self.epoch >= self.epoch_start_halving and (self.epoch - self.epoch_start_halving) % self._halving_freq == 0: self._lr *= 0.5 self._current_iter = 0 self._iters_from_last_valid = 0 self._train_costs = [] self.prepared_worker_pool.clear() self.batch_pool = range(self.num_train_batches) self.rand.shuffle(self.batch_pool) if self.epoch > self.end_at: logging.info('Training is done, wait all workers to stop') if self.log_file: self.log_file.write('Training is done, wait all workers to stop' + '\n') return False else: logging.info('start epoch {} with lr={}'.format(self.epoch, self._lr)) if self.log_file: self.log_file.write('start epoch {} with lr={}'.format(self.epoch, self._lr) + '\n') return True
deepy
positive
def choice(self, key: str, values: Union[List[int], List[float], List[str], List[bool]], merge: bool=False) -> 'SearchSpace': """Adds a hyperparameter with a list of possible values. Args: key: Name of the hyper-parameter. values: List of possible value for the hyperparameter. merge: If false (default), raises an error if the hyper-parameter already exist. If true, adds values to the parameter if it already exist. Returns: The conditional SearchSpace corresponding to the values in "values". """ if not values: raise ValueError('The list of values is empty') <DeepExtract> for field in self._fields: if field.name == key: field = field field = None </DeepExtract> if field is None: if merge: raise ValueError(f'Merge=true but the field {key} does not already exist') field = self._fields.add(name=key) if self._parent_values: field.parent_discrete_values.MergeFrom(self._parent_values) elif not merge: raise ValueError(f'The field {key} already exist') dst_values = hyperparameter_pb2.HyperParameterSpace.DiscreteCandidates() for value in values: dst_value = dst_values.possible_values.add() if isinstance(value, bool): dst_value.categorical = 'true' if value else 'false' elif isinstance(value, int): dst_value.integer = value elif isinstance(value, float): dst_value.real = value elif isinstance(value, str): dst_value.categorical = value else: raise ValueError(f'Not supported value type: {value}:{type(value)}') field.discrete_candidates.possible_values.extend(dst_values.possible_values[:]) return SearchSpace(field.children, parent_values=dst_values)
def choice(self, key: str, values: Union[List[int], List[float], List[str], List[bool]], merge: bool=False) -> 'SearchSpace': """Adds a hyperparameter with a list of possible values. Args: key: Name of the hyper-parameter. values: List of possible value for the hyperparameter. merge: If false (default), raises an error if the hyper-parameter already exist. If true, adds values to the parameter if it already exist. Returns: The conditional SearchSpace corresponding to the values in "values". """ if not values: raise ValueError('The list of values is empty') for field in self._fields: if field.name == key: field = field field = None if field is None: if merge: raise ValueError(f'Merge=true but the field {key} does not already exist') field = self._fields.add(name=key) if self._parent_values: field.parent_discrete_values.MergeFrom(self._parent_values) elif not merge: raise ValueError(f'The field {key} already exist') dst_values = hyperparameter_pb2.HyperParameterSpace.DiscreteCandidates() for value in values: dst_value = dst_values.possible_values.add() if isinstance(value, bool): dst_value.categorical = 'true' if value else 'false' elif isinstance(value, int): dst_value.integer = value elif isinstance(value, float): dst_value.real = value elif isinstance(value, str): dst_value.categorical = value else: raise ValueError(f'Not supported value type: {value}:{type(value)}') field.discrete_candidates.possible_values.extend(dst_values.possible_values[:]) return SearchSpace(field.children, parent_values=dst_values)
decision-forests
positive
def test_model_adult_structured_preprocessing_with_semantic(self): <DeepExtract> dataset_directory = os.path.join(ydf_test_data_path(), 'dataset') train_path = os.path.join(dataset_directory, 'adult_train.csv') test_path = os.path.join(dataset_directory, 'adult_test.csv') train = pd.read_csv(train_path) test = pd.read_csv(test_path) label = 'income' def clean(ds): ds[label] = np.where(ds[label] == '>50K', 1, 0) dataset = ds train = clean(train) test = clean(test) dataset = prepare_dataset(train, test, label, num_classes=2) </DeepExtract> <DeepExtract> if Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.AUTOMATIC_FEATURE_DISCOVERY: model = keras.RandomForestModel(**args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.FEATURES_WITHOUT_SEMANTIC: features = build_feature_usages(dataset, include_semantic=False) model = keras.RandomForestModel(features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.FEATURES_WITH_SEMANTIC: features = build_feature_usages(dataset, include_semantic=True) model = keras.RandomForestModel(features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.DENSE_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = layers.Concatenate()(processed_inputs) preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_DICTIONARY_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = {value.name: value for value in processed_inputs} preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_LIST_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = {value.name: value for value in processed_inputs} preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) features = [] for key in processed_inputs.keys(): features.append(keras.FeatureUsage(key)) model = keras.RandomForestModel(preprocessing=preprocessing, features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.DENSE_FEATURE_COLUMN: feature_columns = build_feature_columns(dataset, dense=True) preprocessing = layers.DenseFeatures(feature_columns) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.ANY_FEATURE_COLUMN: feature_columns = build_feature_columns(dataset, dense=False) preprocessing = layers.DenseFeatures(feature_columns) model = keras.RandomForestModel(preprocessing=preprocessing, **args) else: assert False model = model </DeepExtract> <DeepExtract> self._check_adult_model(model=model, dataset=dataset, minimum_accuracy=0.859, check_serialization=check_serialization) </DeepExtract>
def test_model_adult_structured_preprocessing_with_semantic(self): dataset_directory = os.path.join(ydf_test_data_path(), 'dataset') train_path = os.path.join(dataset_directory, 'adult_train.csv') test_path = os.path.join(dataset_directory, 'adult_test.csv') train = pd.read_csv(train_path) test = pd.read_csv(test_path) label = 'income' def clean(ds): ds[label] = np.where(ds[label] == '>50K', 1, 0) dataset = ds train = clean(train) test = clean(test) dataset = prepare_dataset(train, test, label, num_classes=2) if Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.AUTOMATIC_FEATURE_DISCOVERY: model = keras.RandomForestModel(**args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.FEATURES_WITHOUT_SEMANTIC: features = build_feature_usages(dataset, include_semantic=False) model = keras.RandomForestModel(features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.FEATURES_WITH_SEMANTIC: features = build_feature_usages(dataset, include_semantic=True) model = keras.RandomForestModel(features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.DENSE_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = layers.Concatenate()(processed_inputs) preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_DICTIONARY_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = {value.name: value for value in processed_inputs} preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_LIST_PREPROCESSING: (raw_inputs, processed_inputs) = build_preprocessing(dataset) preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC: (raw_inputs, processed_inputs) = build_preprocessing(dataset) processed_inputs = {value.name: value for value in processed_inputs} preprocessing = models.Model(inputs=raw_inputs, outputs=processed_inputs) features = [] for key in processed_inputs.keys(): features.append(keras.FeatureUsage(key)) model = keras.RandomForestModel(preprocessing=preprocessing, features=features, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.DENSE_FEATURE_COLUMN: feature_columns = build_feature_columns(dataset, dense=True) preprocessing = layers.DenseFeatures(feature_columns) model = keras.RandomForestModel(preprocessing=preprocessing, **args) elif Signature.STRUCTURED_PREPROCESSING_WITH_SEMANTIC == Signature.ANY_FEATURE_COLUMN: feature_columns = build_feature_columns(dataset, dense=False) preprocessing = layers.DenseFeatures(feature_columns) model = keras.RandomForestModel(preprocessing=preprocessing, **args) else: assert False model = model self._check_adult_model(model=model, dataset=dataset, minimum_accuracy=0.859, check_serialization=check_serialization) </DeepExtract>
decision-forests
positive
def toggle(self): if self.state is True: <DeepExtract> pass </DeepExtract> else: <DeepExtract> pass </DeepExtract>
def toggle(self): if self.state is True: pass else: pass </DeepExtract>
busy-beaver
positive
def addDirectoryRecursively(self, directory, verbose=False, removedSongsSHA256={}): if config.config['immutable_database']: print("Error: Can't add directory %s : The database is configured as immutable" % directory) return None songsIDs = [] for (dirpath, dirnames, filenames) in os.walk(directory, topdown=True): if verbose: print('New dir: %s' % dirpath) filenames.sort() dirnames.sort() for filename in filenames: if any((fnmatch.fnmatch(filename.lower(), pattern) for pattern in self.ignore_files)): continue path = os.path.join(dirpath, filename) <DeepExtract> if config.config['immutable_database']: print("Error: Can't add song %s : The database is configured as immutable" % path) id_ = None if MusicDatabase.isSongInDatabase(path): if verbose: print('Already in db: %s' % path) id_ = None print(f'Adding song {path}') song = Song(path, rootDir=directory) if not song.isValid: msg = f'Song {path} is not valid' raise Exception(msg) try: removedSongs = removedSongsSHA256[song.audioSha256sum()] except KeyError: pass else: if not MusicDatabase.songExistsInDatabase(path=song.path()): if len(removedSongs) > 1: msg = f'Choose the removed song that was moved to {path}:' options = [song.path() for song in removedSongs] selected = ask_user_to_choose_one_option(options, msg) removedSong = removedSongs[selected] else: removedSong = removedSongs[0] song.moveFrom(removedSong) else: removedPaths = '\n'.join([x.path() for x in removedSongs]) print(f"{removedPaths} was/were removed and {song.path()} is being updated but was already in database, so the removed song(s) won't be moved to it.") removedSong = None MusicDatabase.addSong(song) if False: MusicDatabase.commit() id_ = song.id </DeepExtract> if id_: songsIDs.append(id_) MusicDatabase.commit() for excludeDir in self.excludeDirectories: try: dirnames.remove(excludeDir) except ValueError: pass return songsIDs
def addDirectoryRecursively(self, directory, verbose=False, removedSongsSHA256={}): if config.config['immutable_database']: print("Error: Can't add directory %s : The database is configured as immutable" % directory) return None songsIDs = [] for (dirpath, dirnames, filenames) in os.walk(directory, topdown=True): if verbose: print('New dir: %s' % dirpath) filenames.sort() dirnames.sort() for filename in filenames: if any((fnmatch.fnmatch(filename.lower(), pattern) for pattern in self.ignore_files)): continue path = os.path.join(dirpath, filename) if config.config['immutable_database']: print("Error: Can't add song %s : The database is configured as immutable" % path) id_ = None if MusicDatabase.isSongInDatabase(path): if verbose: print('Already in db: %s' % path) id_ = None print(f'Adding song {path}') song = Song(path, rootDir=directory) if not song.isValid: msg = f'Song {path} is not valid' raise Exception(msg) try: removedSongs = removedSongsSHA256[song.audioSha256sum()] except KeyError: pass else: if not MusicDatabase.songExistsInDatabase(path=song.path()): if len(removedSongs) > 1: msg = f'Choose the removed song that was moved to {path}:' options = [song.path() for song in removedSongs] selected = ask_user_to_choose_one_option(options, msg) removedSong = removedSongs[selected] else: removedSong = removedSongs[0] song.moveFrom(removedSong) else: removedPaths = '\n'.join([x.path() for x in removedSongs]) print(f"{removedPaths} was/were removed and {song.path()} is being updated but was already in database, so the removed song(s) won't be moved to it.") removedSong = None MusicDatabase.addSong(song) if False: MusicDatabase.commit() id_ = song.id if id_: songsIDs.append(id_) MusicDatabase.commit() for excludeDir in self.excludeDirectories: try: dirnames.remove(excludeDir) except ValueError: pass return songsIDs
bard
positive
def _add_props_to_sym(node): sym = node.item <DeepExtract> if sym.direct_dep is self.n: sym.direct_dep = node.dep if node.dep is self.n: sym.direct_dep = sym.direct_dep if sym.direct_dep is self.y or node.dep is self.y: sym.direct_dep = self.y sym.direct_dep = (OR, sym.direct_dep, node.dep) </DeepExtract> sym.defaults += node.defaults sym.ranges += node.ranges sym.selects += node.selects sym.implies += node.implies for (target, cond) in node.selects: <DeepExtract> if target.rev_dep is self.n: target.rev_dep = self._make_and(sym, cond) if self._make_and(sym, cond) is self.n: target.rev_dep = target.rev_dep if target.rev_dep is self.y or self._make_and(sym, cond) is self.y: target.rev_dep = self.y target.rev_dep = (OR, target.rev_dep, self._make_and(sym, cond)) </DeepExtract> for (target, cond) in node.implies: <DeepExtract> if target.weak_rev_dep is self.n: target.weak_rev_dep = self._make_and(sym, cond) if self._make_and(sym, cond) is self.n: target.weak_rev_dep = target.weak_rev_dep if target.weak_rev_dep is self.y or self._make_and(sym, cond) is self.y: target.weak_rev_dep = self.y target.weak_rev_dep = (OR, target.weak_rev_dep, self._make_and(sym, cond)) </DeepExtract>
def _add_props_to_sym(node): sym = node.item if sym.direct_dep is self.n: sym.direct_dep = node.dep if node.dep is self.n: sym.direct_dep = sym.direct_dep if sym.direct_dep is self.y or node.dep is self.y: sym.direct_dep = self.y sym.direct_dep = (OR, sym.direct_dep, node.dep) sym.defaults += node.defaults sym.ranges += node.ranges sym.selects += node.selects sym.implies += node.implies for (target, cond) in node.selects: if target.rev_dep is self.n: target.rev_dep = self._make_and(sym, cond) if self._make_and(sym, cond) is self.n: target.rev_dep = target.rev_dep if target.rev_dep is self.y or self._make_and(sym, cond) is self.y: target.rev_dep = self.y target.rev_dep = (OR, target.rev_dep, self._make_and(sym, cond)) for (target, cond) in node.implies: if target.weak_rev_dep is self.n: target.weak_rev_dep = self._make_and(sym, cond) if self._make_and(sym, cond) is self.n: target.weak_rev_dep = target.weak_rev_dep if target.weak_rev_dep is self.y or self._make_and(sym, cond) is self.y: target.weak_rev_dep = self.y target.weak_rev_dep = (OR, target.weak_rev_dep, self._make_and(sym, cond)) </DeepExtract>
cello
positive
def execute(self, args): res = RTResult() interpreter = Interpreter() <DeepExtract> new_context = Context(self.name, self.context, self.pos_start) new_context.symbol_table = SymbolTable(new_context.parent.symbol_table) exec_ctx = new_context </DeepExtract> res.register(self.check_and_populate_args(self.arg_names, args, exec_ctx)) if res.should_return(): return res value = res.register(interpreter.visit(self.body_node, exec_ctx)) if res.should_return() and res.func_return_value == None: return res ret_value = (value if self.should_auto_return else None) or res.func_return_value or Number.null return res.success(ret_value)
def execute(self, args): res = RTResult() interpreter = Interpreter() new_context = Context(self.name, self.context, self.pos_start) new_context.symbol_table = SymbolTable(new_context.parent.symbol_table) exec_ctx = new_context res.register(self.check_and_populate_args(self.arg_names, args, exec_ctx)) if res.should_return(): return res value = res.register(interpreter.visit(self.body_node, exec_ctx)) if res.should_return() and res.func_return_value == None: return res ret_value = (value if self.should_auto_return else None) or res.func_return_value or Number.null return res.success(ret_value)
Dip
positive
def test_delete_verify_change_reason_explicity(self): p = Poll.objects.create(question="what's up?", pub_date=today) poll_id = p.id p.delete() update_change_reason(p, 'wrongEntry') (delete_record, create_record) = Poll.history.all() <DeepExtract> for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': None, 'history_type': '+'}.items(): self.assertEqual(getattr(create_record, key), value) self.assertEqual(create_record.history_object.__class__, Poll) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': None, 'history_type': '+'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(create_record.history_object, key), value) </DeepExtract> <DeepExtract> for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': 'wrongEntry', 'history_type': '-'}.items(): self.assertEqual(getattr(delete_record, key), value) self.assertEqual(delete_record.history_object.__class__, Poll) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': 'wrongEntry', 'history_type': '-'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(delete_record.history_object, key), value) </DeepExtract>
def test_delete_verify_change_reason_explicity(self): p = Poll.objects.create(question="what's up?", pub_date=today) poll_id = p.id p.delete() update_change_reason(p, 'wrongEntry') (delete_record, create_record) = Poll.history.all() for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': None, 'history_type': '+'}.items(): self.assertEqual(getattr(create_record, key), value) self.assertEqual(create_record.history_object.__class__, Poll) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': None, 'history_type': '+'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(create_record.history_object, key), value) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': 'wrongEntry', 'history_type': '-'}.items(): self.assertEqual(getattr(delete_record, key), value) self.assertEqual(delete_record.history_object.__class__, Poll) for (key, value) in {'question': "what's up?", 'pub_date': today, 'id': poll_id, 'history_change_reason': 'wrongEntry', 'history_type': '-'}.items(): if key not in ['history_type', 'history_change_reason']: self.assertEqual(getattr(delete_record.history_object, key), value) </DeepExtract>
django-simple-history
positive
def getNetworkApplianceSecurityIntrusion(apiKey, networkId): url = '/networks/' + str(networkId) + '/appliance/security/intrusion' <DeepExtract> if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'get'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not p_requestBody is None): r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) </DeepExtract> return (success, errors, response)
def getNetworkApplianceSecurityIntrusion(apiKey, networkId): url = '/networks/' + str(networkId) + '/appliance/security/intrusion' if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: qArrayFix = {} for item in p_queryItems: if isinstance(p_queryItems[item], list): qArrayFix['%s[]' % item] = p_queryItems[item] else: qArrayFix[item] = p_queryItems[item] query = '?' + urlencode(qArrayFix, True) url = API_BASE_URL + url + query verb = 'get'.upper() session = NoRebuildAuthSession() verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}} try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb in verbs: if verbs[verb]['hasBody'] and (not p_requestBody is None): r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = API_RETRY_DEFAULT_WAIT if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) return (success, errors, response)
automation-scripts
positive
def resolution_heuristic(self, problem): """Resolve using custom insertion if possible.""" if isinstance(self.pattern, DnaNotationPattern): <DeepExtract> matches = self.pattern.find_matches(problem.sequence, self.location) score = -abs(len(matches) - self.occurences) if score == 0: message = 'Passed. Pattern found at positions %s' % matches elif self.occurences == 0: message = 'Failed. Pattern not found.' else: message = 'Failed. Pattern found %d times instead of %d wanted, at locations %s' % (len(matches), self.occurences, matches) evaluation = SpecEvaluation(self, problem, score, message=message, locations=[self.location], data=dict(matches=matches)) </DeepExtract> if evaluation.passes: return n_matches = len(evaluation.data['matches']) if n_matches < self.occurences: other_constraints = [c for c in problem.constraints if c is not self] new_problem = problem for i in range(self.occurences - n_matches): new_occurence_cst = self.copy_with_changes(occurences=n_matches + i + 1) new_problem = DnaOptimizationProblem(sequence=new_problem.sequence, constraints=other_constraints + [new_occurence_cst], mutation_space=problem.mutation_space) new_occurence_cst.insert_pattern_in_problem(new_problem) problem.sequence = new_problem.sequence return problem.resolve_constraints_locally()
def resolution_heuristic(self, problem): """Resolve using custom insertion if possible.""" if isinstance(self.pattern, DnaNotationPattern): matches = self.pattern.find_matches(problem.sequence, self.location) score = -abs(len(matches) - self.occurences) if score == 0: message = 'Passed. Pattern found at positions %s' % matches elif self.occurences == 0: message = 'Failed. Pattern not found.' else: message = 'Failed. Pattern found %d times instead of %d wanted, at locations %s' % (len(matches), self.occurences, matches) evaluation = SpecEvaluation(self, problem, score, message=message, locations=[self.location], data=dict(matches=matches)) if evaluation.passes: return n_matches = len(evaluation.data['matches']) if n_matches < self.occurences: other_constraints = [c for c in problem.constraints if c is not self] new_problem = problem for i in range(self.occurences - n_matches): new_occurence_cst = self.copy_with_changes(occurences=n_matches + i + 1) new_problem = DnaOptimizationProblem(sequence=new_problem.sequence, constraints=other_constraints + [new_occurence_cst], mutation_space=problem.mutation_space) new_occurence_cst.insert_pattern_in_problem(new_problem) problem.sequence = new_problem.sequence return problem.resolve_constraints_locally()
DnaChisel
positive
def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) rel = R(x) for y in rel: if N[y] == 0: <DeepExtract> stack.append(y) d = len(stack) N[y] = d F[y] = FP(y) rel = R(y) for y in rel: if N[y] == 0: traverse(y, N, stack, F, X, R, FP) N[y] = min(N[y], N[y]) for a in F.get(y, []): if a not in F[y]: F[y].append(a) if N[y] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[y] element = stack.pop() while element != y: N[stack[-1]] = MAXINT F[stack[-1]] = F[y] element = stack.pop() </DeepExtract> N[x] = min(N[x], N[y]) for a in F.get(y, []): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop()
def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) rel = R(x) for y in rel: if N[y] == 0: stack.append(y) d = len(stack) N[y] = d F[y] = FP(y) rel = R(y) for y in rel: if N[y] == 0: traverse(y, N, stack, F, X, R, FP) N[y] = min(N[y], N[y]) for a in F.get(y, []): if a not in F[y]: F[y].append(a) if N[y] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[y] element = stack.pop() while element != y: N[stack[-1]] = MAXINT F[stack[-1]] = F[y] element = stack.pop() N[x] = min(N[x], N[y]) for a in F.get(y, []): if a not in F[x]: F[x].append(a) if N[x] == d: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop() while element != x: N[stack[-1]] = MAXINT F[stack[-1]] = F[x] element = stack.pop()
demo2program
positive
@botslib.log_session def outcommunicate(self): outputdir = botslib.join(self.channeldict['path']) botslib.dirshouldbethere(outputdir) filename_mask = self.channeldict['filename'] if self.channeldict['filename'] else '*' if '{overwrite}' in filename_mask: filename_mask = filename_mask.replace('{overwrite}', '') mode = 'wb' else: mode = 'ab' for row in botslib.query('SELECT idta,filename,numberofresends\n FROM ta\n WHERE idta>%(rootidta)s\n AND status=%(status)s\n AND statust=%(statust)s\n AND tochannel=%(tochannel)s ', {'tochannel': self.channeldict['idchannel'], 'rootidta': self.rootidta, 'status': FILEOUT, 'statust': OK}): try: ta_from = botslib.OldTransaction(row[str('idta')]) ta_to = ta_from.copyta(status=EXTERNOUT) <DeepExtract> class infilestr(str): """ class for the {infile} parameter """ def __format__(self, format_spec): if not format_spec: tofilename = unicode(self) (name, ext) = os.path.splitext(unicode(self)) if format_spec == 'ext': if ext.startswith('.'): ext = ext[1:] tofilename = ext if format_spec == 'name': tofilename = name raise botslib.CommunicationOutError(_('Error in format of "{filename}": unknown format: "%(format)s".'), {'format': format_spec}) ta_from.unique = unicode(botslib.unique(self.channeldict['idchannel'])) tofilename = filename_mask.replace('*', '{unique}') ta_from.synall() if '{infile' in tofilename: ta_list = botslib.trace_origin(ta=ta_from, where={'status': EXTERNIN}) if ta_list: ta_from.infilename = infilestr(os.path.basename(ta_list[-1].filename)) else: ta_from.infilename = '' if '{datetime' in tofilename: if botsglobal.ini.getboolean('acceptance', 'runacceptancetest', False): ta_from.datetime = datetime.datetime.strptime('2013-01-23 01:23:45', '%Y-%m-%d %H:%M:%S') else: ta_from.datetime = datetime.datetime.now() try: tofilename = tofilename.format(**ta_from.__dict__) except: txt = botslib.txtexc() raise botslib.CommunicationOutError(_('Error in formatting outgoing filename "%(filename)s". Error: "%(error)s".'), {'filename': tofilename, 'error': txt}) if self.userscript and hasattr(self.userscript, 'filename'): tofilename = botslib.runscript(self.userscript, self.scriptname, 'filename', channeldict=self.channeldict, filename=tofilename, ta=ta_from) else: tofilename = tofilename </DeepExtract> tofilename = botslib.join(outputdir, tofilename) tofile = open(tofilename, mode) fromfile = botslib.opendata_bin(row[str('filename')], 'rb') shutil.copyfileobj(fromfile, tofile, 1048576) fromfile.close() tofile.close() if botslib.tryrunscript(self.userscript, self.scriptname, 'main', channeldict=self.channeldict, filename=tofilename, ta=ta_from): if self.channeldict['remove']: os.remove(tofilename) except: txt = botslib.txtexc() ta_to.update(statust=ERROR, errortext=txt, numberofresends=row[str('numberofresends')] + 1) else: ta_to.update(statust=DONE, filename=tofilename, numberofresends=row[str('numberofresends')] + 1) finally: ta_from.update(statust=DONE)
@botslib.log_session def outcommunicate(self): outputdir = botslib.join(self.channeldict['path']) botslib.dirshouldbethere(outputdir) filename_mask = self.channeldict['filename'] if self.channeldict['filename'] else '*' if '{overwrite}' in filename_mask: filename_mask = filename_mask.replace('{overwrite}', '') mode = 'wb' else: mode = 'ab' for row in botslib.query('SELECT idta,filename,numberofresends\n FROM ta\n WHERE idta>%(rootidta)s\n AND status=%(status)s\n AND statust=%(statust)s\n AND tochannel=%(tochannel)s ', {'tochannel': self.channeldict['idchannel'], 'rootidta': self.rootidta, 'status': FILEOUT, 'statust': OK}): try: ta_from = botslib.OldTransaction(row[str('idta')]) ta_to = ta_from.copyta(status=EXTERNOUT) class infilestr(str): """ class for the {infile} parameter """ def __format__(self, format_spec): if not format_spec: tofilename = unicode(self) (name, ext) = os.path.splitext(unicode(self)) if format_spec == 'ext': if ext.startswith('.'): ext = ext[1:] tofilename = ext if format_spec == 'name': tofilename = name raise botslib.CommunicationOutError(_('Error in format of "{filename}": unknown format: "%(format)s".'), {'format': format_spec}) ta_from.unique = unicode(botslib.unique(self.channeldict['idchannel'])) tofilename = filename_mask.replace('*', '{unique}') ta_from.synall() if '{infile' in tofilename: ta_list = botslib.trace_origin(ta=ta_from, where={'status': EXTERNIN}) if ta_list: ta_from.infilename = infilestr(os.path.basename(ta_list[-1].filename)) else: ta_from.infilename = '' if '{datetime' in tofilename: if botsglobal.ini.getboolean('acceptance', 'runacceptancetest', False): ta_from.datetime = datetime.datetime.strptime('2013-01-23 01:23:45', '%Y-%m-%d %H:%M:%S') else: ta_from.datetime = datetime.datetime.now() try: tofilename = tofilename.format(**ta_from.__dict__) except: txt = botslib.txtexc() raise botslib.CommunicationOutError(_('Error in formatting outgoing filename "%(filename)s". Error: "%(error)s".'), {'filename': tofilename, 'error': txt}) if self.userscript and hasattr(self.userscript, 'filename'): tofilename = botslib.runscript(self.userscript, self.scriptname, 'filename', channeldict=self.channeldict, filename=tofilename, ta=ta_from) else: tofilename = tofilename tofilename = botslib.join(outputdir, tofilename) tofile = open(tofilename, mode) fromfile = botslib.opendata_bin(row[str('filename')], 'rb') shutil.copyfileobj(fromfile, tofile, 1048576) fromfile.close() tofile.close() if botslib.tryrunscript(self.userscript, self.scriptname, 'main', channeldict=self.channeldict, filename=tofilename, ta=ta_from): if self.channeldict['remove']: os.remove(tofilename) except: txt = botslib.txtexc() ta_to.update(statust=ERROR, errortext=txt, numberofresends=row[str('numberofresends')] + 1) else: ta_to.update(statust=DONE, filename=tofilename, numberofresends=row[str('numberofresends')] + 1) finally: ta_from.update(statust=DONE)
bots
positive
def test_337_centos7_postgres_user_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled CentOS 7 and python2, THEN we can create an image with an PostgreSql DB service being installed and enabled. AND in this variant it runs under User=postgres right there from PID-1 started implicity in --user mode.""" if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl <DeepExtract> name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir </DeepExtract> name = 'centos7-postgres' dockerfile = 'centos7-postgres-user.dockerfile' <DeepExtract> image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' </DeepExtract> <DeepExtract> savename = os.path.splitext(os.path.basename(dockerfile))[0] </DeepExtract> saveto = SAVETO images = IMAGES psql = PSQL_TOOL runtime = RUNTIME <DeepExtract> if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out </DeepExtract> testpass = 'Pass.' + password cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --build-arg TESTPASS={testpass} --tag {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} run -d --name {testname} {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] </DeepExtract> cmd = 'for i in 1 2 3 4 5 6 7 8 9; do echo -n "[$i] "; pg_isready -h {container} && break; sleep 2; done' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> login = 'export PGUSER=testuser_11; export PGPASSWORD=' + testpass query = 'SELECT rolname FROM pg_roles' cmd = "{login}; {psql} -h {container} -d postgres -c '{query}' > {testdir}/{testname}.txt" <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = 'grep testuser_ok {testdir}/{testname}.txt' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> uid = 'postgres' cmd = '{docker} exec {testname} id -u {uid}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() out = out </DeepExtract> if out: uid = decodes(out).strip() cmd = '{docker} exec {testname} ls {runtime}{uid}/run' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = "{docker} exec {testname} bash -c 'for i in 1 2 3 4 5 ; do wc -l {runtime}{uid}/run/postgresql.service.status && break; sleep 2; done'" <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} cp {testname}:{runtime}{uid}/run/postgresql.service.status {testdir}/postgresql.service.status' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} exec {testname} ps axu' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() (out, end) = (decodes(out), run.returncode) </DeepExtract> logg.info(' %s =>%s\n%s', cmd, end, out) self.assertTrue(greps(out, 'postgres.*python.*systemctl')) self.assertFalse(greps(out, 'root')) cmd = '{docker} stop {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rm --force {testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) </DeepExtract> cmd = '{docker} rmi {images}:{testname}' <DeepExtract> if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) </DeepExtract> <DeepExtract> testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
def test_337_centos7_postgres_user_dockerfile(self): """ WHEN using a dockerfile for systemd-enabled CentOS 7 and python2, THEN we can create an image with an PostgreSql DB service being installed and enabled. AND in this variant it runs under User=postgres right there from PID-1 started implicity in --user mode.""" if not os.path.exists(DOCKER_SOCKET): self.skipTest('docker-based test') if not os.path.exists(PSQL_TOOL): self.skipTest('postgres tools missing on host') docker = _docker curl = _curl name = self.caller_testname() if suffix: testname = name + '_' + suffix testname = name testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) os.makedirs(newdir) testdir = newdir name = 'centos7-postgres' dockerfile = 'centos7-postgres-user.dockerfile' image = '' for line in open(dockerfile): m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line) if m: image = m.group(1) break m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line) if m: image = m.group(1).strip() break logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image) if image: addhosts = self.start_mirror(image, extras) addhosts = '' savename = os.path.splitext(os.path.basename(dockerfile))[0] saveto = SAVETO images = IMAGES psql = PSQL_TOOL runtime = RUNTIME if _password: password = _password out = 'Password.' out += random.choice(string.ascii_uppercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(string.ascii_lowercase) out += random.choice(',.-+') out += random.choice('0123456789') out += random.choice('0123456789') password = out testpass = 'Pass.' + password cmd = '{docker} build . -f {dockerfile} {addhosts} --build-arg PASSWORD={password} --build-arg TESTPASS={testpass} --tag {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} run -d --name {testname} {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) docker = _docker cmd = '{docker} inspect {name}' values = output(cmd.format(**locals())) values = json.loads(values) if not values or 'NetworkSettings' not in values[0]: logg.critical(' docker inspect %s => %s ', testname, values) container = values[0]['NetworkSettings']['IPAddress'] cmd = 'for i in 1 2 3 4 5 6 7 8 9; do echo -n "[$i] "; pg_isready -h {container} && break; sleep 2; done' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) login = 'export PGUSER=testuser_11; export PGPASSWORD=' + testpass query = 'SELECT rolname FROM pg_roles' cmd = "{login}; {psql} -h {container} -d postgres -c '{query}' > {testdir}/{testname}.txt" if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = 'grep testuser_ok {testdir}/{testname}.txt' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) uid = 'postgres' cmd = '{docker} exec {testname} id -u {uid}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() out = out if out: uid = decodes(out).strip() cmd = '{docker} exec {testname} ls {runtime}{uid}/run' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = "{docker} exec {testname} bash -c 'for i in 1 2 3 4 5 ; do wc -l {runtime}{uid}/run/postgresql.service.status && break; sleep 2; done'" if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} cp {testname}:{runtime}{uid}/run/postgresql.service.status {testdir}/postgresql.service.status' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} exec {testname} ps axu' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE) (out, err) = run.communicate() (out, end) = (decodes(out), run.returncode) logg.info(' %s =>%s\n%s', cmd, end, out) self.assertTrue(greps(out, 'postgres.*python.*systemctl')) self.assertFalse(greps(out, 'root')) cmd = '{docker} stop {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rm --force {testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.check_call(cmd.format(**locals()), shell=shell) cmd = '{docker} rmi {images}:{testname}' if isinstance(cmd.format(**locals()), basestring): logg.info(': %s', cmd.format(**locals())) else: logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())])) return subprocess.call(cmd.format(**locals()), shell=shell) testname = testname or self.caller_testname() newdir = 'tmp/tmp.' + testname if os.path.isdir(newdir): shutil.rmtree(newdir) return newdir </DeepExtract>
docker-systemctl-images
positive
@s3_request def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource('s3') <DeepExtract> parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError('bad s3 path {}'.format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith('/'): s3_path = s3_path[1:] (bucket_name, s3_path) = (bucket_name, s3_path) </DeepExtract> s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
@s3_request def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource('s3') parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError('bad s3 path {}'.format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith('/'): s3_path = s3_path[1:] (bucket_name, s3_path) = (bucket_name, s3_path) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
ACE
positive
def getFoamRuntime(): global docker_container if docker_container == None: docker_container = DockerContainer() if docker_container.usedocker: return 'PosixDocker' <DeepExtract> global docker_container if docker_container == None: docker_container = DockerContainer() if docker_container.usedocker: installation_path = '' prefs = getPreferencesLocation() installation_path = FreeCAD.ParamGet(prefs).GetString('InstallationPath', '') setFoamDir(installation_path) if not installation_path: installation_path = detectFoamDir() if installation_path: installation_path = os.path.normpath(installation_path) installation_path = installation_path </DeepExtract> if installation_path is None: raise IOError('OpenFOAM installation path not set and not detected') runtime = None if platform.system() == 'Windows': if os.path.exists(os.path.join(installation_path, 'msys64', 'home', 'ofuser', '.blueCFDCore')): runtime = 'BlueCFD' elif os.path.exists(os.path.join(installation_path, '..', 'msys64', 'home', 'ofuser', '.blueCFDCore')): runtime = 'BlueCFD2' elif os.path.exists(os.path.join(installation_path, 'msys64', 'home', 'ofuser')): runtime = 'MinGW' elif os.path.exists(os.path.join(installation_path, 'Windows', 'Scripts')): runtime = 'WindowsDocker' elif os.path.exists(os.path.join(getFoamDir(), 'etc', 'bashrc')): runtime = 'BashWSL' else: if not len(getFoamDir()): runtime = 'PosixPreloaded' if os.path.exists(os.path.join(getFoamDir(), 'etc', 'bashrc')): runtime = 'Posix' if not runtime: raise IOError('The directory {} is not a recognised OpenFOAM installation'.format(installation_path)) return runtime
def getFoamRuntime(): global docker_container if docker_container == None: docker_container = DockerContainer() if docker_container.usedocker: return 'PosixDocker' global docker_container if docker_container == None: docker_container = DockerContainer() if docker_container.usedocker: installation_path = '' prefs = getPreferencesLocation() installation_path = FreeCAD.ParamGet(prefs).GetString('InstallationPath', '') setFoamDir(installation_path) if not installation_path: installation_path = detectFoamDir() if installation_path: installation_path = os.path.normpath(installation_path) installation_path = installation_path if installation_path is None: raise IOError('OpenFOAM installation path not set and not detected') runtime = None if platform.system() == 'Windows': if os.path.exists(os.path.join(installation_path, 'msys64', 'home', 'ofuser', '.blueCFDCore')): runtime = 'BlueCFD' elif os.path.exists(os.path.join(installation_path, '..', 'msys64', 'home', 'ofuser', '.blueCFDCore')): runtime = 'BlueCFD2' elif os.path.exists(os.path.join(installation_path, 'msys64', 'home', 'ofuser')): runtime = 'MinGW' elif os.path.exists(os.path.join(installation_path, 'Windows', 'Scripts')): runtime = 'WindowsDocker' elif os.path.exists(os.path.join(getFoamDir(), 'etc', 'bashrc')): runtime = 'BashWSL' else: if not len(getFoamDir()): runtime = 'PosixPreloaded' if os.path.exists(os.path.join(getFoamDir(), 'etc', 'bashrc')): runtime = 'Posix' if not runtime: raise IOError('The directory {} is not a recognised OpenFOAM installation'.format(installation_path)) return runtime
CfdOF
positive
def compile_embedder(self, vocab: Vocabulary) -> TextFieldEmbedder: """Creates the embedder based on the configured input features Parameters ---------- vocab The vocabulary for which to create the embedder Returns ------- embedder """ <DeepExtract> configuration = {feature.namespace: feature.config for feature in self} configuration = copy.deepcopy(configuration) </DeepExtract> try: configuration['word']['embedder']['pretrained_file'] = None except KeyError: pass text_field_embedder = TextFieldEmbedder.from_params(Params({'token_embedders': {feature_namespace: config['embedder'] for (feature_namespace, config) in configuration.items()}}), vocab=vocab) if self.word is not None: setattr(getattr(text_field_embedder, f'token_embedder_{self.word.namespace}'), '_pretrained_file', self.word.weights_file) return text_field_embedder
def compile_embedder(self, vocab: Vocabulary) -> TextFieldEmbedder: """Creates the embedder based on the configured input features Parameters ---------- vocab The vocabulary for which to create the embedder Returns ------- embedder """ configuration = {feature.namespace: feature.config for feature in self} configuration = copy.deepcopy(configuration) try: configuration['word']['embedder']['pretrained_file'] = None except KeyError: pass text_field_embedder = TextFieldEmbedder.from_params(Params({'token_embedders': {feature_namespace: config['embedder'] for (feature_namespace, config) in configuration.items()}}), vocab=vocab) if self.word is not None: setattr(getattr(text_field_embedder, f'token_embedder_{self.word.namespace}'), '_pretrained_file', self.word.weights_file) return text_field_embedder
biome-text
positive
@member_required def new(request, board_id): <DeepExtract> if user_is_member(request.user): member = request.user.member member = None </DeepExtract> <DeepExtract> try: board = get_user_boards(request.user).get(id=board_id) except Board.DoesNotExist: raise Http404 </DeepExtract> work_hours_package = WeeklyRecurrentCard(creator=member, board=board) return model_views.new(request, instance=work_hours_package, form_class=WeeklyRecurrentCardForm, extra_form_parameters={'member': member, 'board': board}, template_path='recurrent_cards/new.html', ok_url=reverse('boards:recurrent_cards:view_list', args=(board_id,)))
@member_required def new(request, board_id): if user_is_member(request.user): member = request.user.member member = None try: board = get_user_boards(request.user).get(id=board_id) except Board.DoesNotExist: raise Http404 work_hours_package = WeeklyRecurrentCard(creator=member, board=board) return model_views.new(request, instance=work_hours_package, form_class=WeeklyRecurrentCardForm, extra_form_parameters={'member': member, 'board': board}, template_path='recurrent_cards/new.html', ok_url=reverse('boards:recurrent_cards:view_list', args=(board_id,)))
djanban
positive
def _dispatch(self, action, client_id=None): if self._closed: raise RuntimeError('page is closed') <DeepExtract> self._version += 1 </DeepExtract> if not self._started: return action['version'] = self._version self._ws_server.dispatch_from_thread(action, client_id)
def _dispatch(self, action, client_id=None): if self._closed: raise RuntimeError('page is closed') self._version += 1 if not self._started: return action['version'] = self._version self._ws_server.dispatch_from_thread(action, client_id)
awe
positive
def staxx_query(self, data): <DeepExtract> headers = {'Content-Type': 'application/json'} </DeepExtract> <DeepExtract> auth_data = {'username': self.username, 'password': self.password} </DeepExtract> r = requests.post(self.auth_url, json=auth_data, headers=headers, verify=self.ssl) r.raise_for_status() token_id = r.json()['token_id'] pull_data = {'token': token_id, 'query': data, 'type': 'json'} p = requests.post(self.query_url, json=pull_data, headers=headers, verify=self.ssl) p.raise_for_status() return p.json()
def staxx_query(self, data): headers = {'Content-Type': 'application/json'} auth_data = {'username': self.username, 'password': self.password} r = requests.post(self.auth_url, json=auth_data, headers=headers, verify=self.ssl) r.raise_for_status() token_id = r.json()['token_id'] pull_data = {'token': token_id, 'query': data, 'type': 'json'} p = requests.post(self.query_url, json=pull_data, headers=headers, verify=self.ssl) p.raise_for_status() return p.json()
Cortex-Analyzers
positive
def test_alias_max_concurreny(self): ref_value = 10 config = TransferConfig(max_concurrency=ref_value) <DeepExtract> assert getattr(config, 'max_request_concurrency') == ref_value assert getattr(config, 'max_concurrency') == ref_value </DeepExtract> new_value = 15 config.max_concurrency = new_value <DeepExtract> assert getattr(config, 'max_request_concurrency') == new_value assert getattr(config, 'max_concurrency') == new_value </DeepExtract>
def test_alias_max_concurreny(self): ref_value = 10 config = TransferConfig(max_concurrency=ref_value) assert getattr(config, 'max_request_concurrency') == ref_value assert getattr(config, 'max_concurrency') == ref_value new_value = 15 config.max_concurrency = new_value assert getattr(config, 'max_request_concurrency') == new_value assert getattr(config, 'max_concurrency') == new_value </DeepExtract>
boto3
positive
def testParams(self): <DeepExtract> p = parser.Parser('? == 1 and ? == 2', params=[1, 2]) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal(1), ast.Literal(1)), ast.Equivalence(ast.Literal(2), ast.Literal(2))), p.parse()) </DeepExtract> <DeepExtract> p = parser.Parser('{1} == 1 and {0} == 2', params=[1, 2]) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal(2), ast.Literal(1)), ast.Equivalence(ast.Literal(1), ast.Literal(2))), p.parse()) </DeepExtract> <DeepExtract> p = parser.Parser('{bar} = 1 and {foo} = 2', params=dict(bar='foo', foo=1)) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal('foo'), ast.Literal(1)), ast.Equivalence(ast.Literal(1), ast.Literal(2))), p.parse()) </DeepExtract>
def testParams(self): p = parser.Parser('? == 1 and ? == 2', params=[1, 2]) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal(1), ast.Literal(1)), ast.Equivalence(ast.Literal(2), ast.Literal(2))), p.parse()) p = parser.Parser('{1} == 1 and {0} == 2', params=[1, 2]) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal(2), ast.Literal(1)), ast.Equivalence(ast.Literal(1), ast.Literal(2))), p.parse()) p = parser.Parser('{bar} = 1 and {foo} = 2', params=dict(bar='foo', foo=1)) self.assertEqual(ast.Intersection(ast.Equivalence(ast.Literal('foo'), ast.Literal(1)), ast.Equivalence(ast.Literal(1), ast.Literal(2))), p.parse()) </DeepExtract>
dotty
positive
def nim_script(module, params): """ Apply a script to customize nim client targets. arguments: module (dict): The Ansible module params (dict): The module parameters for the command. note: Exits with fail_json in case of error """ async_script = '' if params['asynchronous']: async_script = 'yes' log_async = 'asynchronous' else: async_script = 'no' log_async = 'synchronous' module.log('NIM - {0} customize operation on {1} with {2} script'.format(log_async, params['targets'], params['script'])) <DeepExtract> clients = [] for target in params['targets']: if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) clients.extend(list(results['nim_node']['vios'])) continue if target.lower() == 'standalone' or target.lower() == 'vios': clients.extend(list(results['nim_node'][target.lower()])) continue rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in results['nim_node']['standalone']: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in results['nim_node']['standalone']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) for curr_name in results['nim_node']['vios']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) continue if target in results['nim_node']['standalone'] or target in results['nim_node']['vios'] or target == 'master': clients.append(target) results['targets'] = list(set(clients)) </DeepExtract> if not results['targets']: results['msg'] = "No matching target found for targets '{0}'.".format(params['targets']) module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) module.debug('NIM - Target list: {0}'.format(results['targets'])) cmd = ['nim', '-o', 'cust', '-a', 'script=' + params['script'], '-a', 'async=' + async_script] cmd += results['targets'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['rc'] = rc results['stdout'] = stdout results['stderr'] = stderr module.log('cmd: {0}'.format(results['cmd'])) module.log('rc: {0}'.format(rc)) module.log('stdout: {0}'.format(stdout)) module.log('stderr: {0}'.format(stderr)) if rc != 0: results['msg'] = 'Failed to apply script.' module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) results['changed'] = True
def nim_script(module, params): """ Apply a script to customize nim client targets. arguments: module (dict): The Ansible module params (dict): The module parameters for the command. note: Exits with fail_json in case of error """ async_script = '' if params['asynchronous']: async_script = 'yes' log_async = 'asynchronous' else: async_script = 'no' log_async = 'synchronous' module.log('NIM - {0} customize operation on {1} with {2} script'.format(log_async, params['targets'], params['script'])) clients = [] for target in params['targets']: if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) clients.extend(list(results['nim_node']['vios'])) continue if target.lower() == 'standalone' or target.lower() == 'vios': clients.extend(list(results['nim_node'][target.lower()])) continue rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in results['nim_node']['standalone']: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in results['nim_node']['standalone']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) for curr_name in results['nim_node']['vios']: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = list(results['nim_node']['standalone']) continue if target in results['nim_node']['standalone'] or target in results['nim_node']['vios'] or target == 'master': clients.append(target) results['targets'] = list(set(clients)) if not results['targets']: results['msg'] = "No matching target found for targets '{0}'.".format(params['targets']) module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) module.debug('NIM - Target list: {0}'.format(results['targets'])) cmd = ['nim', '-o', 'cust', '-a', 'script=' + params['script'], '-a', 'async=' + async_script] cmd += results['targets'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['rc'] = rc results['stdout'] = stdout results['stderr'] = stderr module.log('cmd: {0}'.format(results['cmd'])) module.log('rc: {0}'.format(rc)) module.log('stdout: {0}'.format(stdout)) module.log('stderr: {0}'.format(stderr)) if rc != 0: results['msg'] = 'Failed to apply script.' module.log('NIM - Error: ' + results['msg']) module.fail_json(**results) results['changed'] = True
ansible-power-aix
positive
def median_combiner(classifier_ensemble, X): """Ensemble combination using the Median rule. Parameters ---------- classifier_ensemble : list of shape = [n_classifiers] Containing the ensemble of classifiers used in the aggregation scheme. X : array of shape (n_samples, n_features) The input data. Returns ------- predicted_label : array of shape (n_samples) The label of each query sample predicted using the majority voting rule """ <DeepExtract> list_proba = [] if estimator_features is None: for (idx, clf) in enumerate(classifier_ensemble): list_proba.append(clf.predict_proba(X)) else: for (idx, clf) in enumerate(classifier_ensemble): list_proba.append(clf.predict_proba(X[:, estimator_features[idx]])) ensemble_proba = np.array(list_proba).transpose((1, 0, 2)) </DeepExtract> return median_rule(ensemble_proba)
def median_combiner(classifier_ensemble, X): """Ensemble combination using the Median rule. Parameters ---------- classifier_ensemble : list of shape = [n_classifiers] Containing the ensemble of classifiers used in the aggregation scheme. X : array of shape (n_samples, n_features) The input data. Returns ------- predicted_label : array of shape (n_samples) The label of each query sample predicted using the majority voting rule """ list_proba = [] if estimator_features is None: for (idx, clf) in enumerate(classifier_ensemble): list_proba.append(clf.predict_proba(X)) else: for (idx, clf) in enumerate(classifier_ensemble): list_proba.append(clf.predict_proba(X[:, estimator_features[idx]])) ensemble_proba = np.array(list_proba).transpose((1, 0, 2)) return median_rule(ensemble_proba)
DESlib
positive
def computeActionFromValues(self, state): """ The policy is the best action in the given state according to the values currently stored in self.values. You may break ties any way you see fit. Note that if there are no legal actions, which is the case at the terminal state, you should return None. """ '*** YOUR CODE HERE ***' self.qValues = util.Counter() for action in self.mdp.getPossibleActions(state): <DeepExtract> qValue = 0 for (nextState, prob) in self.mdp.getTransitionStatesAndProbs(state, action): reward = self.mdp.getReward(state, action, nextState) qValue += prob * (reward + self.discount * self.getValue(nextState)) qValue = qValue </DeepExtract> self.qValues[action] = qValue return self.qValues.argMax()
def computeActionFromValues(self, state): """ The policy is the best action in the given state according to the values currently stored in self.values. You may break ties any way you see fit. Note that if there are no legal actions, which is the case at the terminal state, you should return None. """ '*** YOUR CODE HERE ***' self.qValues = util.Counter() for action in self.mdp.getPossibleActions(state): qValue = 0 for (nextState, prob) in self.mdp.getTransitionStatesAndProbs(state, action): reward = self.mdp.getReward(state, action, nextState) qValue += prob * (reward + self.discount * self.getValue(nextState)) qValue = qValue self.qValues[action] = qValue return self.qValues.argMax()
comp90054-cheat
positive
def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via `return_header` it will return a tuple of payload and header. """ <DeepExtract> if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: self.make_signer(salt, self.algorithm).unsign(want_bytes(s)) = self.make_signer(salt, self.algorithm).unsign(want_bytes(s)).decode('utf-8') (self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), header) = serializer.loads(self.make_signer(salt, self.algorithm).unsign(want_bytes(s))) except Exception as e: raise BadPayload('Could not load the payload because an exception occurred on unserializing the data', original_error=e) </DeepExtract> if header.get('alg') != self.algorithm_name: raise BadHeader('Algorithm mismatch', header=header, payload=payload) if return_header: return (payload, header) return payload
def loads(self, s, salt=None, return_header=False): """Reverse of :meth:`dumps`. If requested via `return_header` it will return a tuple of payload and header. """ if serializer is None: serializer = self.serializer is_text = self.is_text_serializer else: is_text = is_text_serializer(serializer) try: if is_text: self.make_signer(salt, self.algorithm).unsign(want_bytes(s)) = self.make_signer(salt, self.algorithm).unsign(want_bytes(s)).decode('utf-8') (self.make_signer(salt, self.algorithm).unsign(want_bytes(s)), header) = serializer.loads(self.make_signer(salt, self.algorithm).unsign(want_bytes(s))) except Exception as e: raise BadPayload('Could not load the payload because an exception occurred on unserializing the data', original_error=e) if header.get('alg') != self.algorithm_name: raise BadHeader('Algorithm mismatch', header=header, payload=payload) if return_header: return (payload, header) return payload
appengine-toolkit
positive
def diff_files(file1, file2): global filter_nwb, single_file global f1, f2 <DeepExtract> global ci ci = {'only_in': {'A': {'group': [], 'dataset': [], 'attribute': [], 'other': []}, 'B': {'group': [], 'dataset': [], 'attribute': [], 'other': []}}, 'node_types_different': [], 'hard_links': {'A': {'group': [], 'dataset': []}, 'B': {'group': [], 'dataset': []}}, 'tmp_soft_links': {'A': {'group': {}, 'dataset': {}, 'unknown': {}}, 'B': {'group': {}, 'dataset': {}, 'unknown': {}}}, 'soft_links': {'A': {'group': [], 'dataset': [], 'unknown': []}, 'B': {'group': [], 'dataset': [], 'unknown': []}}, 'ext_links': {'A': [], 'B': []}, 'unknown_node_types': [], 'types_differ_values_same': {'dataset': [], 'attribute': []}, 'types_same_values_differ': {'dataset': [], 'attribute': []}, 'values_and_types_differ': {'dataset': [], 'attribute': []}, 'everything_matches': {'dataset': [], 'attribute': []}, 'total_paired_found': {'group': 0, 'dataset': 0, 'attribute': 0}, 'empty_paired_groups': [], 'only_compressed_in': {'A': [], 'B': []}, 'values_match_but_sizes_different': {'dataset': [], 'attribute': []}, 'locations': {'A': {}, 'B': {}}, 'warning': [], 'error': []} </DeepExtract> if not filter_nwb: if single_file: print('<%% command was: python %s %s %%>' % (sys.argv[0], file1)) else: print('<%% command was: python %s %s %s %%>' % (sys.argv[0], file1, file2)) if single_file: print('<%% Generating signature for %s %%>' % file1) else: print('<%% comparing %s (A) and %s (B) %%>' % (file1, file2)) print('') <DeepExtract> try: f = h5py.File(file1, 'r') except IOError: print("Unable to open file '%s'" % file1) display_doc() sys.exit(1) f1 = f </DeepExtract> if not single_file: <DeepExtract> try: f = h5py.File(file2, 'r') except IOError: print("Unable to open file '%s'" % file2) display_doc() sys.exit(1) f2 = f </DeepExtract> else: f2 = f1 <DeepExtract> ggp = (f1['/'], f2['/'], '/') to_check = [ggp] while to_check: ggp = to_check.pop(0) member_groups = diff_groups2(ggp) to_check.extend(member_groups) </DeepExtract> <DeepExtract> global ci, alpha_sort find_hard_links() format_soft_links() if single_file: assert da_empty(ci['only_in']['A']) assert da_empty(ci['only_in']['B']) assert len(ci['node_types_different']) == 0 else: display_sub_messages(ci['only_in']['A'], 'only in A', zero_msg='Good') display_sub_messages(ci['only_in']['B'], 'only in B', zero_msg='Good') display_messages(ci['node_types_different'], 'node types differ', zero_msg='Good') if single_file: display_sub_messages(ci['hard_links']['A'], 'hard links', combine=False) display_sub_messages(ci['soft_links']['A'], 'soft links', combine=False) display_messages(ci['ext_links']['A'], 'ext_links') display_messages(ci['unknown_node_types'], 'unknown node types', zero_msg='Good') else: display_sub_messages(ci['hard_links']['A'], 'hard links in A', combine=False) display_sub_messages(ci['hard_links']['B'], 'hard links in B', combine=False) display_sub_messages(ci['soft_links']['A'], 'soft links in A', combine=False) display_sub_messages(ci['soft_links']['B'], 'soft links in B', combine=False) check_link_equivalence() display_messages(ci['ext_links']['A'], 'ext_links in A') display_messages(ci['ext_links']['B'], 'ext_links in B') display_messages(ci['unknown_node_types'], 'unknown node types (in both A and B)', zero_msg='Good') if alpha_sort: ci['everything_matches']['attribute'].sort() ci['everything_matches']['dataset'].sort() sort_msg = 'sorted alphabetically' else: sort_by_size(ci['everything_matches']['attribute']) sort_by_size(ci['everything_matches']['dataset']) sort_msg = 'sorted in decreasing size' ci['empty_paired_groups'].sort() if single_file: assert da_empty(ci['types_same_values_differ']) assert da_empty(ci['types_differ_values_same']) assert da_empty(ci['values_and_types_differ']) assert da_empty(ci['values_match_but_sizes_different']) display_messages(ci['empty_paired_groups'], 'empty groups (no members or attributes)') display_sub_messages(ci['everything_matches'], '(%s)' % sort_msg) else: display_messages(ci['empty_paired_groups'], 'empty paired groups (no members or attributes)') display_sub_messages(ci['types_same_values_differ'], 'types match but values differ') display_sub_messages(ci['types_differ_values_same'], 'types differ but values match') display_sub_messages(ci['values_and_types_differ'], 'values and types differ') display_messages(ci['only_compressed_in']['A'], 'datasets type and values match, but only compressed in A') display_messages(ci['only_compressed_in']['B'], 'datasets type and values match, but only compressed in B') display_sub_messages(ci['values_match_but_sizes_different'], 'values match but sizes different') display_sub_messages(ci['everything_matches'], 'everything matches (%s)' % sort_msg) num_paired_groups = ci['total_paired_found']['group'] num_val_match_datasets = len(ci['types_differ_values_same']['dataset']) + len(ci['everything_matches']['dataset']) + len(ci['values_match_but_sizes_different']['dataset']) + len(ci['only_compressed_in']['A']) + len(ci['only_compressed_in']['B']) num_val_match_attributes = len(ci['types_differ_values_same']['attribute']) + len(ci['everything_matches']['attribute']) + len(ci['values_match_but_sizes_different']['attribute']) num_paired_datasets = ci['total_paired_found']['dataset'] num_paired_attributes = ci['total_paired_found']['attribute'] num_matching_datasets = len(ci['everything_matches']['dataset']) num_matching_attributes = len(ci['everything_matches']['attribute']) num_dont_match_attributes = num_paired_attributes - num_val_match_attributes num_dont_match_datasets = num_paired_datasets - num_val_match_datasets print('-' * 20) for msgtype in ('error', 'warning'): if ci[msgtype]: print('%i %ss:' % (len(ci[msgtype]), msgtype)) print('\n'.join(ci[msgtype])) print('** Summary') if single_file: print('%i groups, %i datasets, %i attributes' % (num_paired_groups, num_paired_datasets, num_paired_attributes)) return print('Unpaired groups: %i only in A, %i only in B' % (len(ci['only_in']['A']['group']), len(ci['only_in']['B']['group']))) print('Unpaired datasets: %i only in A, %i only in B' % (len(ci['only_in']['A']['dataset']), len(ci['only_in']['B']['dataset']))) print('Unpaired attributes: %i only in A, %i only in B' % (len(ci['only_in']['A']['attribute']), len(ci['only_in']['B']['attribute']))) print('Total paired: %i datasets, %i attributes, %i groups' % (num_paired_datasets, num_paired_attributes, num_paired_groups)) print('Total paired with values match: %i/%i datasets, %i/%i attributes.' % (num_val_match_datasets, num_paired_datasets, num_val_match_attributes, num_paired_attributes)) print("Total paired, vals don't match: %i/%i datasets, %i/%i attributes." % (num_dont_match_datasets, num_paired_datasets, num_dont_match_attributes, num_paired_attributes)) print('Total paired, everything match: %i/%i datasets, %i/%s attributes' % (num_matching_datasets, num_paired_datasets, num_matching_attributes, num_paired_attributes)) total_unpaired = 0 for ab in ('A', 'B'): for comp in ('group', 'dataset', 'attribute', 'other'): total_unpaired += len(ci['only_in'][ab][comp]) if total_unpaired > 0: print('Files do not match, there are %i unpaired components' % total_unpaired) elif num_matching_attributes == num_paired_attributes and num_matching_datasets == num_paired_datasets: print('** Files exactly match **') else: check_for_nwb_match(num_dont_match_datasets, num_dont_match_attributes) </DeepExtract>
def diff_files(file1, file2): global filter_nwb, single_file global f1, f2 global ci ci = {'only_in': {'A': {'group': [], 'dataset': [], 'attribute': [], 'other': []}, 'B': {'group': [], 'dataset': [], 'attribute': [], 'other': []}}, 'node_types_different': [], 'hard_links': {'A': {'group': [], 'dataset': []}, 'B': {'group': [], 'dataset': []}}, 'tmp_soft_links': {'A': {'group': {}, 'dataset': {}, 'unknown': {}}, 'B': {'group': {}, 'dataset': {}, 'unknown': {}}}, 'soft_links': {'A': {'group': [], 'dataset': [], 'unknown': []}, 'B': {'group': [], 'dataset': [], 'unknown': []}}, 'ext_links': {'A': [], 'B': []}, 'unknown_node_types': [], 'types_differ_values_same': {'dataset': [], 'attribute': []}, 'types_same_values_differ': {'dataset': [], 'attribute': []}, 'values_and_types_differ': {'dataset': [], 'attribute': []}, 'everything_matches': {'dataset': [], 'attribute': []}, 'total_paired_found': {'group': 0, 'dataset': 0, 'attribute': 0}, 'empty_paired_groups': [], 'only_compressed_in': {'A': [], 'B': []}, 'values_match_but_sizes_different': {'dataset': [], 'attribute': []}, 'locations': {'A': {}, 'B': {}}, 'warning': [], 'error': []} if not filter_nwb: if single_file: print('<%% command was: python %s %s %%>' % (sys.argv[0], file1)) else: print('<%% command was: python %s %s %s %%>' % (sys.argv[0], file1, file2)) if single_file: print('<%% Generating signature for %s %%>' % file1) else: print('<%% comparing %s (A) and %s (B) %%>' % (file1, file2)) print('') try: f = h5py.File(file1, 'r') except IOError: print("Unable to open file '%s'" % file1) display_doc() sys.exit(1) f1 = f if not single_file: try: f = h5py.File(file2, 'r') except IOError: print("Unable to open file '%s'" % file2) display_doc() sys.exit(1) f2 = f else: f2 = f1 ggp = (f1['/'], f2['/'], '/') to_check = [ggp] while to_check: ggp = to_check.pop(0) member_groups = diff_groups2(ggp) to_check.extend(member_groups) global ci, alpha_sort find_hard_links() format_soft_links() if single_file: assert da_empty(ci['only_in']['A']) assert da_empty(ci['only_in']['B']) assert len(ci['node_types_different']) == 0 else: display_sub_messages(ci['only_in']['A'], 'only in A', zero_msg='Good') display_sub_messages(ci['only_in']['B'], 'only in B', zero_msg='Good') display_messages(ci['node_types_different'], 'node types differ', zero_msg='Good') if single_file: display_sub_messages(ci['hard_links']['A'], 'hard links', combine=False) display_sub_messages(ci['soft_links']['A'], 'soft links', combine=False) display_messages(ci['ext_links']['A'], 'ext_links') display_messages(ci['unknown_node_types'], 'unknown node types', zero_msg='Good') else: display_sub_messages(ci['hard_links']['A'], 'hard links in A', combine=False) display_sub_messages(ci['hard_links']['B'], 'hard links in B', combine=False) display_sub_messages(ci['soft_links']['A'], 'soft links in A', combine=False) display_sub_messages(ci['soft_links']['B'], 'soft links in B', combine=False) check_link_equivalence() display_messages(ci['ext_links']['A'], 'ext_links in A') display_messages(ci['ext_links']['B'], 'ext_links in B') display_messages(ci['unknown_node_types'], 'unknown node types (in both A and B)', zero_msg='Good') if alpha_sort: ci['everything_matches']['attribute'].sort() ci['everything_matches']['dataset'].sort() sort_msg = 'sorted alphabetically' else: sort_by_size(ci['everything_matches']['attribute']) sort_by_size(ci['everything_matches']['dataset']) sort_msg = 'sorted in decreasing size' ci['empty_paired_groups'].sort() if single_file: assert da_empty(ci['types_same_values_differ']) assert da_empty(ci['types_differ_values_same']) assert da_empty(ci['values_and_types_differ']) assert da_empty(ci['values_match_but_sizes_different']) display_messages(ci['empty_paired_groups'], 'empty groups (no members or attributes)') display_sub_messages(ci['everything_matches'], '(%s)' % sort_msg) else: display_messages(ci['empty_paired_groups'], 'empty paired groups (no members or attributes)') display_sub_messages(ci['types_same_values_differ'], 'types match but values differ') display_sub_messages(ci['types_differ_values_same'], 'types differ but values match') display_sub_messages(ci['values_and_types_differ'], 'values and types differ') display_messages(ci['only_compressed_in']['A'], 'datasets type and values match, but only compressed in A') display_messages(ci['only_compressed_in']['B'], 'datasets type and values match, but only compressed in B') display_sub_messages(ci['values_match_but_sizes_different'], 'values match but sizes different') display_sub_messages(ci['everything_matches'], 'everything matches (%s)' % sort_msg) num_paired_groups = ci['total_paired_found']['group'] num_val_match_datasets = len(ci['types_differ_values_same']['dataset']) + len(ci['everything_matches']['dataset']) + len(ci['values_match_but_sizes_different']['dataset']) + len(ci['only_compressed_in']['A']) + len(ci['only_compressed_in']['B']) num_val_match_attributes = len(ci['types_differ_values_same']['attribute']) + len(ci['everything_matches']['attribute']) + len(ci['values_match_but_sizes_different']['attribute']) num_paired_datasets = ci['total_paired_found']['dataset'] num_paired_attributes = ci['total_paired_found']['attribute'] num_matching_datasets = len(ci['everything_matches']['dataset']) num_matching_attributes = len(ci['everything_matches']['attribute']) num_dont_match_attributes = num_paired_attributes - num_val_match_attributes num_dont_match_datasets = num_paired_datasets - num_val_match_datasets print('-' * 20) for msgtype in ('error', 'warning'): if ci[msgtype]: print('%i %ss:' % (len(ci[msgtype]), msgtype)) print('\n'.join(ci[msgtype])) print('** Summary') if single_file: print('%i groups, %i datasets, %i attributes' % (num_paired_groups, num_paired_datasets, num_paired_attributes)) return print('Unpaired groups: %i only in A, %i only in B' % (len(ci['only_in']['A']['group']), len(ci['only_in']['B']['group']))) print('Unpaired datasets: %i only in A, %i only in B' % (len(ci['only_in']['A']['dataset']), len(ci['only_in']['B']['dataset']))) print('Unpaired attributes: %i only in A, %i only in B' % (len(ci['only_in']['A']['attribute']), len(ci['only_in']['B']['attribute']))) print('Total paired: %i datasets, %i attributes, %i groups' % (num_paired_datasets, num_paired_attributes, num_paired_groups)) print('Total paired with values match: %i/%i datasets, %i/%i attributes.' % (num_val_match_datasets, num_paired_datasets, num_val_match_attributes, num_paired_attributes)) print("Total paired, vals don't match: %i/%i datasets, %i/%i attributes." % (num_dont_match_datasets, num_paired_datasets, num_dont_match_attributes, num_paired_attributes)) print('Total paired, everything match: %i/%i datasets, %i/%s attributes' % (num_matching_datasets, num_paired_datasets, num_matching_attributes, num_paired_attributes)) total_unpaired = 0 for ab in ('A', 'B'): for comp in ('group', 'dataset', 'attribute', 'other'): total_unpaired += len(ci['only_in'][ab][comp]) if total_unpaired > 0: print('Files do not match, there are %i unpaired components' % total_unpaired) elif num_matching_attributes == num_paired_attributes and num_matching_datasets == num_paired_datasets: print('** Files exactly match **') else: check_for_nwb_match(num_dont_match_datasets, num_dont_match_attributes) </DeepExtract>
api-python
positive
@swagger_auto_schema(tags=['Cases'], manual_parameters=swagger_params.organization_params) def delete(self, request, pk, format=None): <DeepExtract> self.object = self.model.objects.filter(id=pk).first() </DeepExtract> if request.profile.role == 'ADMIN' or request.profile.is_admin or request.profile == self.object.commented_by: self.object.delete() return Response({'error': False, 'message': 'Comment Deleted Successfully'}, status=status.HTTP_200_OK) return Response({'error': True, 'errors': 'You do not have permission to perform this action'}, status=status.HTTP_403_FORBIDDEN)
@swagger_auto_schema(tags=['Cases'], manual_parameters=swagger_params.organization_params) def delete(self, request, pk, format=None): self.object = self.model.objects.filter(id=pk).first() if request.profile.role == 'ADMIN' or request.profile.is_admin or request.profile == self.object.commented_by: self.object.delete() return Response({'error': False, 'message': 'Comment Deleted Successfully'}, status=status.HTTP_200_OK) return Response({'error': True, 'errors': 'You do not have permission to perform this action'}, status=status.HTTP_403_FORBIDDEN)
Django-CRM
positive
def add_fpn(model, stage_info): """Adds FPN connections based on the model described in the FPN paper.""" fpn_dim = cfg.FPN.DIM <DeepExtract> min_level = LOWEST_LVL max_level = HIGHEST_LVL if cfg.FPN.MULTILEVEL_RPN and (not cfg.FPN.MULTILEVEL_ROIS): max_level = cfg.FPN.RPN_MAX_LEVEL min_level = cfg.FPN.RPN_MIN_LEVEL if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS: max_level = cfg.FPN.ROI_MAX_LEVEL min_level = cfg.FPN.ROI_MIN_LEVEL if cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS: max_level = max(cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.ROI_MAX_LEVEL) min_level = min(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.ROI_MIN_LEVEL) (min_level, max_level) = (min_level, max_level) </DeepExtract> model.Conv(stage_info.blobs[0], 'fpn_inner_' + stage_info.blobs[0], dim_in=stage_info.dims[0], dim_out=fpn_dim, kernel=1, pad=0, stride=1, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) for i in range(len(stage_info.blobs) - 1 - (min_level - LOWEST_LVL)): fpn_top = 'fpn_inner_' + stage_info.blobs[i] fpn_lateral = stage_info.blobs[i + 1] fpn_bottom = 'fpn_inner_' + stage_info.blobs[i + 1] dim_top = fpn_dim dim_lateral = stage_info.dims[i + 1] <DeepExtract> lat = model.Conv(fpn_lateral, fpn_bottom if cfg.FPN.INPLACE_LATERAL else fpn_bottom + '_lateral', dim_in=dim_lateral, dim_out=dim_top, kernel=1, pad=0, stride=1, weight_init=('ConstantFill', {'value': 0.0}) if cfg.FPN.ZERO_INIT_LATERAL else ('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) td = model.net.UpsampleNearest(fpn_top, fpn_bottom + '_topdown', scale=2) model.net.Sum([lat, td], fpn_bottom) </DeepExtract> blobs_fpn = [] spatial_scales = [] for i in range(len(stage_info.blobs) - (min_level - LOWEST_LVL)): fpn_blob = model.Conv('fpn_inner_' + stage_info.blobs[i], 'fpn_' + stage_info.blobs[i], dim_in=fpn_dim, dim_out=fpn_dim, kernel=3, pad=1, stride=1, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) blobs_fpn += [fpn_blob] spatial_scales += [stage_info.spatial_scales[i]] if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_LVL + 1: P6_blob_in = blobs_fpn[0] P6_name = P6_blob_in + '_subsampled_2x' P6_blob = model.MaxPool(P6_blob_in, P6_name, kernel=1, pad=0, stride=2) blobs_fpn.insert(0, P6_blob) spatial_scales.insert(0, spatial_scales[0] * 0.5) if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_LVL: fpn_blob = stage_info.blobs[0] dim_in = stage_info.dims[0] for i in range(HIGHEST_LVL + 1, max_level + 1): if i > HIGHEST_LVL + 1: fpn_blob = model.Relu(fpn_blob, fpn_blob) fpn_blob = model.Conv(fpn_blob, 'fpn_' + str(i), dim_in=dim_in, dim_out=fpn_dim, kernel=3, pad=1, stride=2, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) dim_in = fpn_dim blobs_fpn.insert(0, fpn_blob) spatial_scales.insert(0, spatial_scales[0] * 0.5) return (blobs_fpn, fpn_dim, spatial_scales)
def add_fpn(model, stage_info): """Adds FPN connections based on the model described in the FPN paper.""" fpn_dim = cfg.FPN.DIM min_level = LOWEST_LVL max_level = HIGHEST_LVL if cfg.FPN.MULTILEVEL_RPN and (not cfg.FPN.MULTILEVEL_ROIS): max_level = cfg.FPN.RPN_MAX_LEVEL min_level = cfg.FPN.RPN_MIN_LEVEL if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS: max_level = cfg.FPN.ROI_MAX_LEVEL min_level = cfg.FPN.ROI_MIN_LEVEL if cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS: max_level = max(cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.ROI_MAX_LEVEL) min_level = min(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.ROI_MIN_LEVEL) (min_level, max_level) = (min_level, max_level) model.Conv(stage_info.blobs[0], 'fpn_inner_' + stage_info.blobs[0], dim_in=stage_info.dims[0], dim_out=fpn_dim, kernel=1, pad=0, stride=1, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) for i in range(len(stage_info.blobs) - 1 - (min_level - LOWEST_LVL)): fpn_top = 'fpn_inner_' + stage_info.blobs[i] fpn_lateral = stage_info.blobs[i + 1] fpn_bottom = 'fpn_inner_' + stage_info.blobs[i + 1] dim_top = fpn_dim dim_lateral = stage_info.dims[i + 1] lat = model.Conv(fpn_lateral, fpn_bottom if cfg.FPN.INPLACE_LATERAL else fpn_bottom + '_lateral', dim_in=dim_lateral, dim_out=dim_top, kernel=1, pad=0, stride=1, weight_init=('ConstantFill', {'value': 0.0}) if cfg.FPN.ZERO_INIT_LATERAL else ('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) td = model.net.UpsampleNearest(fpn_top, fpn_bottom + '_topdown', scale=2) model.net.Sum([lat, td], fpn_bottom) blobs_fpn = [] spatial_scales = [] for i in range(len(stage_info.blobs) - (min_level - LOWEST_LVL)): fpn_blob = model.Conv('fpn_inner_' + stage_info.blobs[i], 'fpn_' + stage_info.blobs[i], dim_in=fpn_dim, dim_out=fpn_dim, kernel=3, pad=1, stride=1, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) blobs_fpn += [fpn_blob] spatial_scales += [stage_info.spatial_scales[i]] if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_LVL + 1: P6_blob_in = blobs_fpn[0] P6_name = P6_blob_in + '_subsampled_2x' P6_blob = model.MaxPool(P6_blob_in, P6_name, kernel=1, pad=0, stride=2) blobs_fpn.insert(0, P6_blob) spatial_scales.insert(0, spatial_scales[0] * 0.5) if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_LVL: fpn_blob = stage_info.blobs[0] dim_in = stage_info.dims[0] for i in range(HIGHEST_LVL + 1, max_level + 1): if i > HIGHEST_LVL + 1: fpn_blob = model.Relu(fpn_blob, fpn_blob) fpn_blob = model.Conv(fpn_blob, 'fpn_' + str(i), dim_in=dim_in, dim_out=fpn_dim, kernel=3, pad=1, stride=2, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {'value': 0.0})) dim_in = fpn_dim blobs_fpn.insert(0, fpn_blob) spatial_scales.insert(0, spatial_scales[0] * 0.5) return (blobs_fpn, fpn_dim, spatial_scales)
DetectAndTrack
positive
def previous_stage(self, msg: Bool): if self._curr_stage > 1: rospy.set_param('/last_stage_reached', False) self._curr_stage = self._curr_stage - 1 <DeepExtract> self._remove_obstacles() static_obstacles = self._stages[self._curr_stage]['static'] dynamic_obstacles = self._stages[self._curr_stage]['dynamic'] self.obstacles_manager.register_random_static_obstacles(self._stages[self._curr_stage]['static'], num_vertices_min=3, num_vertices_max=4, min_obstacle_radius=0.25, max_obstacle_radius=1.5) self.obstacles_manager.register_random_dynamic_obstacles(self._stages[self._curr_stage]['dynamic'], min_obstacle_radius=0.1, max_obstacle_radius=0.3) print(f'({self.ns}) Stage {self._curr_stage}: Spawning {static_obstacles} static and {dynamic_obstacles} dynamic obstacles!') </DeepExtract> if self.ns == 'eval_sim': rospy.set_param('/curr_stage', self._curr_stage) with self._lock_json: <DeepExtract> with open(self.json_file, 'r') as file: hyperparams = json.load(file) try: hyperparams['curr_stage'] = self._curr_stage except Exception as e: raise Warning(f" {e} \n Parameter 'curr_stage' not found in 'hyperparameters.json'!") else: with open(self.json_file, 'w', encoding='utf-8') as target: json.dump(hyperparams, target, ensure_ascii=False, indent=4) </DeepExtract> else: print(f'({self.ns}) INFO: Tried to trigger previous stage but already reached first one')
def previous_stage(self, msg: Bool): if self._curr_stage > 1: rospy.set_param('/last_stage_reached', False) self._curr_stage = self._curr_stage - 1 self._remove_obstacles() static_obstacles = self._stages[self._curr_stage]['static'] dynamic_obstacles = self._stages[self._curr_stage]['dynamic'] self.obstacles_manager.register_random_static_obstacles(self._stages[self._curr_stage]['static'], num_vertices_min=3, num_vertices_max=4, min_obstacle_radius=0.25, max_obstacle_radius=1.5) self.obstacles_manager.register_random_dynamic_obstacles(self._stages[self._curr_stage]['dynamic'], min_obstacle_radius=0.1, max_obstacle_radius=0.3) print(f'({self.ns}) Stage {self._curr_stage}: Spawning {static_obstacles} static and {dynamic_obstacles} dynamic obstacles!') if self.ns == 'eval_sim': rospy.set_param('/curr_stage', self._curr_stage) with self._lock_json: with open(self.json_file, 'r') as file: hyperparams = json.load(file) try: hyperparams['curr_stage'] = self._curr_stage except Exception as e: raise Warning(f" {e} \n Parameter 'curr_stage' not found in 'hyperparameters.json'!") else: with open(self.json_file, 'w', encoding='utf-8') as target: json.dump(hyperparams, target, ensure_ascii=False, indent=4) else: print(f'({self.ns}) INFO: Tried to trigger previous stage but already reached first one')
arena-rosnav
positive
@staticmethod def build_vocab(train, dev, test, opt): fields = train.fields for field_name in ('src', 'lay', 'lay_e', 'lay_bpe', 'tgt', 'tgt_loss', 'tgt_loss_masked'): fields[field_name].build_vocab(train, max_size=opt.src_vocab_size, min_freq=opt.src_words_min_frequency) for field_name in ('src', 'lay', 'lay_e', 'lay_bpe'): <DeepExtract> merged = Counter() for vocab in [fields[field_name].vocab]: merged += vocab.freqs fields[field_name].vocab = torchtext.vocab.Vocab(merged, specials=list(special_token_list), max_size=vocab_size) </DeepExtract> tgt_merge_name_list = ['tgt', 'tgt_loss', 'tgt_loss_masked'] <DeepExtract> merged = Counter() for vocab in [fields[field_name].vocab for field_name in tgt_merge_name_list]: merged += vocab.freqs tgt_merge = torchtext.vocab.Vocab(merged, specials=list(special_token_list), max_size=vocab_size) </DeepExtract> for field_name in tgt_merge_name_list: fields[field_name].vocab = tgt_merge
@staticmethod def build_vocab(train, dev, test, opt): fields = train.fields for field_name in ('src', 'lay', 'lay_e', 'lay_bpe', 'tgt', 'tgt_loss', 'tgt_loss_masked'): fields[field_name].build_vocab(train, max_size=opt.src_vocab_size, min_freq=opt.src_words_min_frequency) for field_name in ('src', 'lay', 'lay_e', 'lay_bpe'): merged = Counter() for vocab in [fields[field_name].vocab]: merged += vocab.freqs fields[field_name].vocab = torchtext.vocab.Vocab(merged, specials=list(special_token_list), max_size=vocab_size) tgt_merge_name_list = ['tgt', 'tgt_loss', 'tgt_loss_masked'] merged = Counter() for vocab in [fields[field_name].vocab for field_name in tgt_merge_name_list]: merged += vocab.freqs tgt_merge = torchtext.vocab.Vocab(merged, specials=list(special_token_list), max_size=vocab_size) for field_name in tgt_merge_name_list: fields[field_name].vocab = tgt_merge
coarse2fine
positive
def predict(self, fold, val_indexes): prefix = 'fold' + str(fold) + '_' if self.test else '' val_dataset = SequentialDataset(self.ds, val_indexes, stage='test', config=self.config, transforms=self.val_transforms) val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False) <DeepExtract> model = nn.DataParallel(torch.load(os.path.join('..', 'weights', self.folder, 'fold{}_best.pth'.format(fold)))) model.eval() model = model </DeepExtract> pbar = tqdm.tqdm(val_dl, total=len(val_dl)) for data in pbar: samples = data['image'] <DeepExtract> ret = [] for cls in TTA: ret.append(cls(self.config.sigmoid)(model, samples)) scale_tta = False if scale_tta: for scale in [0.8, 1.25]: data = np.moveaxis(np.squeeze(samples.numpy()[0]), 0, -1) (srows, scols) = data.shape[:2] data = cv2.resize(data, (0, 0), fx=scale, fy=scale) (rows, cols) = data.shape[:2] data = cv2.copyMakeBorder(data, 0, 32 - rows % 32, 0, 32 - cols % 32, cv2.BORDER_REFLECT) data = np.expand_dims(np.moveaxis(data, -1, 0), 0) data = torch.from_numpy(data) for cls in TTA: r = cls(self.config.sigmoid)(model, data) r = np.moveaxis(np.squeeze(r), 0, -1) r = r[:rows, :cols, ...] r = cv2.resize(r, (scols, srows)) r = np.expand_dims(np.moveaxis(r, -1, 0), 0) ret.append(r) predicted = np.moveaxis(np.mean(ret, axis=0), 1, -1) </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> pass </DeepExtract>
def predict(self, fold, val_indexes): prefix = 'fold' + str(fold) + '_' if self.test else '' val_dataset = SequentialDataset(self.ds, val_indexes, stage='test', config=self.config, transforms=self.val_transforms) val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.predict_batch_size, num_workers=self.num_workers, drop_last=False) model = nn.DataParallel(torch.load(os.path.join('..', 'weights', self.folder, 'fold{}_best.pth'.format(fold)))) model.eval() model = model pbar = tqdm.tqdm(val_dl, total=len(val_dl)) for data in pbar: samples = data['image'] ret = [] for cls in TTA: ret.append(cls(self.config.sigmoid)(model, samples)) scale_tta = False if scale_tta: for scale in [0.8, 1.25]: data = np.moveaxis(np.squeeze(samples.numpy()[0]), 0, -1) (srows, scols) = data.shape[:2] data = cv2.resize(data, (0, 0), fx=scale, fy=scale) (rows, cols) = data.shape[:2] data = cv2.copyMakeBorder(data, 0, 32 - rows % 32, 0, 32 - cols % 32, cv2.BORDER_REFLECT) data = np.expand_dims(np.moveaxis(data, -1, 0), 0) data = torch.from_numpy(data) for cls in TTA: r = cls(self.config.sigmoid)(model, data) r = np.moveaxis(np.squeeze(r), 0, -1) r = r[:rows, :cols, ...] r = cv2.resize(r, (scols, srows)) r = np.expand_dims(np.moveaxis(r, -1, 0), 0) ret.append(r) predicted = np.moveaxis(np.mean(ret, axis=0), 1, -1) raise NotImplementedError pass </DeepExtract>
dsb2018_topcoders
positive
def test_level_3_missing_l1_l2(self): """Check level 3 node created with missing level 1 and 2""" t3 = self.tag_model.objects.create(name='One/Two/Three') t1 = self.tag_model.objects.get(name='One') t2 = self.tag_model.objects.get(name='One/Two') <DeepExtract> if 'One' is not None: self.assertEqual(t1.name, 'One') if 'One' is not None: self.assertEqual(t1.label, 'One') if 'one' is not None: self.assertEqual(t1.slug, 'one') if 'one' is not None: self.assertEqual(t1.path, 'one') if parent is not None: self.assertEqual(t1.parent, parent) if count is not None: self.assertEqual(t1.count, count) if protected is not None: self.assertEqual(t1.protected, protected) if 1 is not None: self.assertEqual(t1.level, 1) </DeepExtract> <DeepExtract> if 'One/Two' is not None: self.assertEqual(t2.name, 'One/Two') if 'Two' is not None: self.assertEqual(t2.label, 'Two') if 'two' is not None: self.assertEqual(t2.slug, 'two') if 'one/two' is not None: self.assertEqual(t2.path, 'one/two') if t1 is not None: self.assertEqual(t2.parent, t1) if count is not None: self.assertEqual(t2.count, count) if protected is not None: self.assertEqual(t2.protected, protected) if 2 is not None: self.assertEqual(t2.level, 2) </DeepExtract> <DeepExtract> if 'One/Two/Three' is not None: self.assertEqual(t3.name, 'One/Two/Three') if 'Three' is not None: self.assertEqual(t3.label, 'Three') if 'three' is not None: self.assertEqual(t3.slug, 'three') if 'one/two/three' is not None: self.assertEqual(t3.path, 'one/two/three') if t2 is not None: self.assertEqual(t3.parent, t2) if count is not None: self.assertEqual(t3.count, count) if protected is not None: self.assertEqual(t3.protected, protected) if 3 is not None: self.assertEqual(t3.level, 3) </DeepExtract>
def test_level_3_missing_l1_l2(self): """Check level 3 node created with missing level 1 and 2""" t3 = self.tag_model.objects.create(name='One/Two/Three') t1 = self.tag_model.objects.get(name='One') t2 = self.tag_model.objects.get(name='One/Two') if 'One' is not None: self.assertEqual(t1.name, 'One') if 'One' is not None: self.assertEqual(t1.label, 'One') if 'one' is not None: self.assertEqual(t1.slug, 'one') if 'one' is not None: self.assertEqual(t1.path, 'one') if parent is not None: self.assertEqual(t1.parent, parent) if count is not None: self.assertEqual(t1.count, count) if protected is not None: self.assertEqual(t1.protected, protected) if 1 is not None: self.assertEqual(t1.level, 1) if 'One/Two' is not None: self.assertEqual(t2.name, 'One/Two') if 'Two' is not None: self.assertEqual(t2.label, 'Two') if 'two' is not None: self.assertEqual(t2.slug, 'two') if 'one/two' is not None: self.assertEqual(t2.path, 'one/two') if t1 is not None: self.assertEqual(t2.parent, t1) if count is not None: self.assertEqual(t2.count, count) if protected is not None: self.assertEqual(t2.protected, protected) if 2 is not None: self.assertEqual(t2.level, 2) if 'One/Two/Three' is not None: self.assertEqual(t3.name, 'One/Two/Three') if 'Three' is not None: self.assertEqual(t3.label, 'Three') if 'three' is not None: self.assertEqual(t3.slug, 'three') if 'one/two/three' is not None: self.assertEqual(t3.path, 'one/two/three') if t2 is not None: self.assertEqual(t3.parent, t2) if count is not None: self.assertEqual(t3.count, count) if protected is not None: self.assertEqual(t3.protected, protected) if 3 is not None: self.assertEqual(t3.level, 3) </DeepExtract>
django-tagulous
positive
def main(args): os.makedirs(os.path.join(args.outpath, 'img'), exist_ok=True) exp_paths = list(glob.glob(args.in_glob)) exp_data = [read_exp_metrics(p) for p in exp_paths] exp_ids = [get_exp_id(p, namelen=args.namelen) for p in exp_paths] full_exp_metrics = pd.concat(exp_data, keys=exp_ids).applymap(convert_float) full_exp_metrics.dropna(axis=1, how='all', inplace=True) full_exp_metrics.dropna(axis=0, how='all', inplace=True) full_exp_metrics.index.set_names(('experiment', 'filename'), inplace=True) full_exp_metrics.reset_index(inplace=True) full_exp_metrics['img_type'] = [re.match(args.scenetype, f).group('scene') for f in full_exp_metrics['filename']] real_videos_index = full_exp_metrics['experiment'] == args.realname real_flow_l2 = full_exp_metrics.loc[real_videos_index, 'flow_l2'].mean(axis=0, skipna=True) full_exp_metrics['flow_l2_rel'] = full_exp_metrics['flow_l2'] / real_flow_l2 full_exp_metrics.to_csv(os.path.join(args.outpath, 'joint_metrics.tsv'), sep='\t') metrics_by_exp = full_exp_metrics.set_index('experiment') metrics_by_st = full_exp_metrics.set_index('img_type') pointwise_exp_metrics = metrics_by_exp[POINTWISE_METRICS] pointwise_exp_mean = pointwise_exp_metrics.groupby(level=0, axis=0).describe() pointwise_exp_mean = pointwise_exp_mean[[c for c in pointwise_exp_mean.columns if c[1] in ('mean', 'std')]] <DeepExtract> def _do_format(row): mean = row.xs('mean', level=1).iloc[0] pointwise_exp_mean = mean def _transform_group(gr): pointwise_exp_mean = gr.apply(_do_format, axis=1) result = pointwise_exp_mean.groupby(level=0, axis=1).apply(_transform_group) result.columns = pd.MultiIndex.from_tuples([c.split('_', 1) for c in result.columns]) result.sort_index(axis=1, inplace=True) pointwise_exp_mean = result </DeepExtract> pointwise_exp_mean_html = pointwise_exp_mean[['lpips', 'ssim']].to_html(float_format=lambda x: f'{x:.4f}') flow_exp_mean_html = pointwise_exp_mean['flow'].rename(index=EXPERIMENT_NAMES).to_html() lpips_data = pointwise_exp_mean['lpips'][list(CHART_STAGE2NAME.keys())] lpips_data.rename(columns=CHART_STAGE2NAME, inplace=True) lpips_data.rename(index=EXPERIMENT_NAMES, inplace=True) lpips_data.dropna(axis=0, inplace=True) ssim_data = pointwise_exp_mean['ssim'][list(CHART_STAGE2NAME.keys())] ssim_data.rename(columns=CHART_STAGE2NAME, inplace=True) ssim_data.rename(index=EXPERIMENT_NAMES, inplace=True) ssim_data.dropna(axis=0, inplace=True) (fig, (ax1, ax2)) = plt.subplots(1, 2) fig.set_size_inches((7, 3.3)) lpips_data.transpose().plot(ax=ax1, style=STYLES[:lpips_data.shape[0]], linewidth=LINEWIDTH) ssim_data.transpose().plot(ax=ax2, style=STYLES[:ssim_data.shape[0]], linewidth=LINEWIDTH) ax1.set_ylabel('LPIPS (less is better)') ax1.get_legend().remove() ax2.set_ylabel('SSIM (more is better)') ax2.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() fig.savefig(os.path.join(args.outpath, 'img', 'lpips_chart.png')) fig.savefig(os.path.join(args.outpath, 'img', 'lpips_chart.pdf')) plt.close(fig) pointwise_st_mean_html = '' flow_st_mean_html = '' if not args.nost: pointwise_st_metrics = metrics_by_st[POINTWISE_METRICS] pointwise_st_mean = pointwise_st_metrics.groupby(level=0, axis=0).describe() pointwise_st_mean = pointwise_st_mean[[c for c in pointwise_st_mean.columns if c[1] in ('mean', 'std')]] <DeepExtract> def _do_format(row): mean = row.xs('mean', level=1).iloc[0] pointwise_st_mean = mean def _transform_group(gr): pointwise_st_mean = gr.apply(_do_format, axis=1) result = pointwise_st_mean.groupby(level=0, axis=1).apply(_transform_group) result.columns = pd.MultiIndex.from_tuples([c.split('_', 1) for c in result.columns]) result.sort_index(axis=1, inplace=True) pointwise_st_mean = result </DeepExtract> pointwise_st_mean_html = pointwise_st_mean[['lpips', 'ssim']].to_html() flow_st_mean_html = pointwise_st_mean['flow'].to_html(float_format=lambda x: f'{x:.4f}') os.makedirs(os.path.join(args.outpath, 'img'), exist_ok=True) chart_by_exp_html = [] for (mname, mtitle) in SEGM_METRICS.items(): chart_by_exp_html.append(f'<h2>{mtitle}</h2>') for (prefix, ptitle) in SEGM_PREFIXES.items(): col_filter = f'{prefix}{mname}' ncols = sum((1 for c in metrics_by_exp.columns if c.startswith(col_filter))) if ncols == 0: continue cur_values = metrics_by_exp[[f'{col_filter}_{i}' for i in range(ncols)]].groupby(level=0, axis=0).mean() cur_values.columns = list(range(ncols)) cur_values = cur_values.rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) cur_values.transpose().plot(ax=ax, style=STYLES[:cur_values.shape[0]], linewidth=LINEWIDTH) ax.set_title(f'{mtitle} {ptitle}') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = f'segm_by_exp_{col_filter}.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) chart_by_exp_html.append(f'<img src="img/{curname}" />') chart_by_exp_html = '\n'.join(chart_by_exp_html) chart_by_st_html = [] if not args.nost: chart_by_st_html.append('<h1>Segmentation metrics by scene type</h1>') for (mname, mtitle) in SEGM_METRICS.items(): chart_by_st_html.append(f'<h2>{mtitle}</h2>') for (prefix, ptitle) in SEGM_PREFIXES.items(): col_filter = f'{prefix}{mname}' ncols = sum((1 for c in metrics_by_st.columns if c.startswith(col_filter))) if ncols == 0: continue cur_values = metrics_by_st[[f'{col_filter}_{i}' for i in range(ncols)]].groupby(level=0, axis=0).mean() cur_values.columns = list(range(ncols)) cur_values = cur_values.rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) cur_values.transpose().plot(ax=ax, style=STYLES[:cur_values.shape[0]], linewidth=LINEWIDTH) ax.set_title(f'{mtitle} {ptitle}') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = f'segm_by_st_{col_filter}.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) chart_by_st_html.append(f'<img src="img/{curname}" />') chart_by_st_html = '\n'.join(chart_by_st_html) fid_columns = [c for c in metrics_by_exp.columns if c.startswith('fid_')] fid_int_names = {c: int(c[4:]) for c in fid_columns} fid_data = full_exp_metrics.loc[full_exp_metrics['filename'] == 'global_metrics'] fid_by_exp = fid_data.set_index('experiment')[fid_columns].rename(columns=fid_int_names).sort_index(axis=1).sort_index(axis=0).groupby(level=0, axis=0).mean().interpolate(axis=1).rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) fid_by_exp.rename(columns=EXPERIMENT_NAMES, inplace=True) fid_by_exp.transpose().plot(ax=ax, style=STYLES[:fid_by_exp.shape[0]], linewidth=LINEWIDTH) ax.set_title('FID with time by experiment') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = 'fid_by_exp.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) fid_by_exp_html = f'<img src="img/{curname}" />' fid_by_st_html = '' if not args.nost: fid_by_st = fid_data.set_index('img_type')[fid_columns].rename(columns=fid_int_names).sort_index(axis=1).sort_index(axis=0).groupby(level=0, axis=0).mean().interpolate(axis=1).rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) fid_by_st.transpose().plot(ax=ax, style=STYLES[:fid_by_st.shape[0]], linewidth=LINEWIDTH) ax.set_title('FID with time by scene type') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = 'fid_by_st.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) fid_by_st_html = f'<h1>FID by scene type 🡓</h1><img src="img/{curname}" />' html = f'\n <html>\n <style>\n * {{\n text-align: center;\n }}\n table {{\n margin-left: auto;\n margin-right:auto;\n border: 0.2px solid grey;\n border-spacing: 0px;\n }}\n td {{\n padding: 5px 10px;\n }}\n </style>\n <body>\n <h1>Reconstruction quality by experiment</h1>\n {pointwise_exp_mean_html}\n <br />\n <img src="img/lpips_chart.png" />\n <br />\n {pointwise_st_mean_html}\n <hr />\n\n <h1>Animation amount by experiment</h1>\n {flow_exp_mean_html}\n <br />\n {flow_st_mean_html}\n <hr />\n\n <h1>Segmentation metrics by experiment</h1>\n {chart_by_exp_html}\n <hr />\n\n {chart_by_st_html}\n \n <h1>FID by experiment 🡓</h1>\n {fid_by_exp_html}\n <hr />\n \n {fid_by_st_html}\n </body>\n </html>\n ' with open(os.path.join(args.outpath, 'index.html'), 'w') as f: f.write(html)
def main(args): os.makedirs(os.path.join(args.outpath, 'img'), exist_ok=True) exp_paths = list(glob.glob(args.in_glob)) exp_data = [read_exp_metrics(p) for p in exp_paths] exp_ids = [get_exp_id(p, namelen=args.namelen) for p in exp_paths] full_exp_metrics = pd.concat(exp_data, keys=exp_ids).applymap(convert_float) full_exp_metrics.dropna(axis=1, how='all', inplace=True) full_exp_metrics.dropna(axis=0, how='all', inplace=True) full_exp_metrics.index.set_names(('experiment', 'filename'), inplace=True) full_exp_metrics.reset_index(inplace=True) full_exp_metrics['img_type'] = [re.match(args.scenetype, f).group('scene') for f in full_exp_metrics['filename']] real_videos_index = full_exp_metrics['experiment'] == args.realname real_flow_l2 = full_exp_metrics.loc[real_videos_index, 'flow_l2'].mean(axis=0, skipna=True) full_exp_metrics['flow_l2_rel'] = full_exp_metrics['flow_l2'] / real_flow_l2 full_exp_metrics.to_csv(os.path.join(args.outpath, 'joint_metrics.tsv'), sep='\t') metrics_by_exp = full_exp_metrics.set_index('experiment') metrics_by_st = full_exp_metrics.set_index('img_type') pointwise_exp_metrics = metrics_by_exp[POINTWISE_METRICS] pointwise_exp_mean = pointwise_exp_metrics.groupby(level=0, axis=0).describe() pointwise_exp_mean = pointwise_exp_mean[[c for c in pointwise_exp_mean.columns if c[1] in ('mean', 'std')]] def _do_format(row): mean = row.xs('mean', level=1).iloc[0] pointwise_exp_mean = mean def _transform_group(gr): pointwise_exp_mean = gr.apply(_do_format, axis=1) result = pointwise_exp_mean.groupby(level=0, axis=1).apply(_transform_group) result.columns = pd.MultiIndex.from_tuples([c.split('_', 1) for c in result.columns]) result.sort_index(axis=1, inplace=True) pointwise_exp_mean = result pointwise_exp_mean_html = pointwise_exp_mean[['lpips', 'ssim']].to_html(float_format=lambda x: f'{x:.4f}') flow_exp_mean_html = pointwise_exp_mean['flow'].rename(index=EXPERIMENT_NAMES).to_html() lpips_data = pointwise_exp_mean['lpips'][list(CHART_STAGE2NAME.keys())] lpips_data.rename(columns=CHART_STAGE2NAME, inplace=True) lpips_data.rename(index=EXPERIMENT_NAMES, inplace=True) lpips_data.dropna(axis=0, inplace=True) ssim_data = pointwise_exp_mean['ssim'][list(CHART_STAGE2NAME.keys())] ssim_data.rename(columns=CHART_STAGE2NAME, inplace=True) ssim_data.rename(index=EXPERIMENT_NAMES, inplace=True) ssim_data.dropna(axis=0, inplace=True) (fig, (ax1, ax2)) = plt.subplots(1, 2) fig.set_size_inches((7, 3.3)) lpips_data.transpose().plot(ax=ax1, style=STYLES[:lpips_data.shape[0]], linewidth=LINEWIDTH) ssim_data.transpose().plot(ax=ax2, style=STYLES[:ssim_data.shape[0]], linewidth=LINEWIDTH) ax1.set_ylabel('LPIPS (less is better)') ax1.get_legend().remove() ax2.set_ylabel('SSIM (more is better)') ax2.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() fig.savefig(os.path.join(args.outpath, 'img', 'lpips_chart.png')) fig.savefig(os.path.join(args.outpath, 'img', 'lpips_chart.pdf')) plt.close(fig) pointwise_st_mean_html = '' flow_st_mean_html = '' if not args.nost: pointwise_st_metrics = metrics_by_st[POINTWISE_METRICS] pointwise_st_mean = pointwise_st_metrics.groupby(level=0, axis=0).describe() pointwise_st_mean = pointwise_st_mean[[c for c in pointwise_st_mean.columns if c[1] in ('mean', 'std')]] def _do_format(row): mean = row.xs('mean', level=1).iloc[0] pointwise_st_mean = mean def _transform_group(gr): pointwise_st_mean = gr.apply(_do_format, axis=1) result = pointwise_st_mean.groupby(level=0, axis=1).apply(_transform_group) result.columns = pd.MultiIndex.from_tuples([c.split('_', 1) for c in result.columns]) result.sort_index(axis=1, inplace=True) pointwise_st_mean = result pointwise_st_mean_html = pointwise_st_mean[['lpips', 'ssim']].to_html() flow_st_mean_html = pointwise_st_mean['flow'].to_html(float_format=lambda x: f'{x:.4f}') os.makedirs(os.path.join(args.outpath, 'img'), exist_ok=True) chart_by_exp_html = [] for (mname, mtitle) in SEGM_METRICS.items(): chart_by_exp_html.append(f'<h2>{mtitle}</h2>') for (prefix, ptitle) in SEGM_PREFIXES.items(): col_filter = f'{prefix}{mname}' ncols = sum((1 for c in metrics_by_exp.columns if c.startswith(col_filter))) if ncols == 0: continue cur_values = metrics_by_exp[[f'{col_filter}_{i}' for i in range(ncols)]].groupby(level=0, axis=0).mean() cur_values.columns = list(range(ncols)) cur_values = cur_values.rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) cur_values.transpose().plot(ax=ax, style=STYLES[:cur_values.shape[0]], linewidth=LINEWIDTH) ax.set_title(f'{mtitle} {ptitle}') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = f'segm_by_exp_{col_filter}.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) chart_by_exp_html.append(f'<img src="img/{curname}" />') chart_by_exp_html = '\n'.join(chart_by_exp_html) chart_by_st_html = [] if not args.nost: chart_by_st_html.append('<h1>Segmentation metrics by scene type</h1>') for (mname, mtitle) in SEGM_METRICS.items(): chart_by_st_html.append(f'<h2>{mtitle}</h2>') for (prefix, ptitle) in SEGM_PREFIXES.items(): col_filter = f'{prefix}{mname}' ncols = sum((1 for c in metrics_by_st.columns if c.startswith(col_filter))) if ncols == 0: continue cur_values = metrics_by_st[[f'{col_filter}_{i}' for i in range(ncols)]].groupby(level=0, axis=0).mean() cur_values.columns = list(range(ncols)) cur_values = cur_values.rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) cur_values.transpose().plot(ax=ax, style=STYLES[:cur_values.shape[0]], linewidth=LINEWIDTH) ax.set_title(f'{mtitle} {ptitle}') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = f'segm_by_st_{col_filter}.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) chart_by_st_html.append(f'<img src="img/{curname}" />') chart_by_st_html = '\n'.join(chart_by_st_html) fid_columns = [c for c in metrics_by_exp.columns if c.startswith('fid_')] fid_int_names = {c: int(c[4:]) for c in fid_columns} fid_data = full_exp_metrics.loc[full_exp_metrics['filename'] == 'global_metrics'] fid_by_exp = fid_data.set_index('experiment')[fid_columns].rename(columns=fid_int_names).sort_index(axis=1).sort_index(axis=0).groupby(level=0, axis=0).mean().interpolate(axis=1).rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) fid_by_exp.rename(columns=EXPERIMENT_NAMES, inplace=True) fid_by_exp.transpose().plot(ax=ax, style=STYLES[:fid_by_exp.shape[0]], linewidth=LINEWIDTH) ax.set_title('FID with time by experiment') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = 'fid_by_exp.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) fid_by_exp_html = f'<img src="img/{curname}" />' fid_by_st_html = '' if not args.nost: fid_by_st = fid_data.set_index('img_type')[fid_columns].rename(columns=fid_int_names).sort_index(axis=1).sort_index(axis=0).groupby(level=0, axis=0).mean().interpolate(axis=1).rolling(window=ROLLING_WINDOW, axis=1, center=True).mean() (fig, ax) = plt.subplots() fig.set_size_inches(IMG_SIZE) fid_by_st.transpose().plot(ax=ax, style=STYLES[:fid_by_st.shape[0]], linewidth=LINEWIDTH) ax.set_title('FID with time by scene type') ax.legend(bbox_to_anchor=(1.01, 0.5), loc='center left') fig.tight_layout() curname = 'fid_by_st.png' fig.savefig(os.path.join(args.outpath, 'img', curname)) plt.close(fig) fid_by_st_html = f'<h1>FID by scene type 🡓</h1><img src="img/{curname}" />' html = f'\n <html>\n <style>\n * {{\n text-align: center;\n }}\n table {{\n margin-left: auto;\n margin-right:auto;\n border: 0.2px solid grey;\n border-spacing: 0px;\n }}\n td {{\n padding: 5px 10px;\n }}\n </style>\n <body>\n <h1>Reconstruction quality by experiment</h1>\n {pointwise_exp_mean_html}\n <br />\n <img src="img/lpips_chart.png" />\n <br />\n {pointwise_st_mean_html}\n <hr />\n\n <h1>Animation amount by experiment</h1>\n {flow_exp_mean_html}\n <br />\n {flow_st_mean_html}\n <hr />\n\n <h1>Segmentation metrics by experiment</h1>\n {chart_by_exp_html}\n <hr />\n\n {chart_by_st_html}\n \n <h1>FID by experiment 🡓</h1>\n {fid_by_exp_html}\n <hr />\n \n {fid_by_st_html}\n </body>\n </html>\n ' with open(os.path.join(args.outpath, 'index.html'), 'w') as f: f.write(html)
deep-landscape
positive
def test_subnet_connectivity(region, stackid, logical_resource_id, physical_resource_id, endpoints=[['www.amazon.com', '80']]): logger.debug({'test_subnet_connectivity': 'starting'}) error_msg = [] if region not in clients.get_available_regions('lambda'): msg = 'Test for %s %s skipped, %s not supprted by lambda' % (stackid, logical_resource_id, region) logger.warning(msg) return {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'warning': 'Test skipped, region %s not supprted by lambda' % region, 'region': region, 'stackid': stackid} try: function_name = 'test_subnet_%s_%s' % (physical_resource_id, random_string(8)) iam_name = function_name.replace('_', '-') lambda_client = clients.get('lambda', region=region) ec2_client = clients.get('ec2', region=region) <DeepExtract> global iam_role_arn recreate = False iam_client = clients.get('iam') if not iam_role_arn: recreate = True iam_name = 'test_subnet_%s' % random_string(8) else: try: iam_name = iam_role_arn.split('/')[-1] iam_client.get_role(RoleName=iam_name) except Exception as e: logger.debug({'get_iam_role:get_role': str(e)}) recreate = True if recreate: iam_role_arn = iam_client.create_role(RoleName=iam_name, AssumeRolePolicyDocument=assume_role_policy)['Role']['Arn'] logger.debug({'get_iam_role:iam_role': iam_role_arn}) iam_client.put_role_policy(RoleName=iam_name, PolicyName=iam_name, PolicyDocument=iam_policy) iam_role_arn = iam_role_arn </DeepExtract> response = ec2_client.describe_subnets(SubnetIds=[physical_resource_id]) logger.debug({'test_subnet_connectivity:describe_subnets': response}) vpc_id = response['Subnets'][0]['VpcId'] logger.debug({'test_subnet_connectivity:vpc_id': vpc_id}) security_group_id = ec2_client.create_security_group(GroupName=iam_name, Description=iam_name, VpcId=vpc_id)['GroupId'] logger.debug({'test_subnet_connectivity:security_group_id': security_group_id}) now = datetime.now() zi_timestamp = (now.year, now.month, now.day, now.hour, now.minute, now.second) zinfo = ZipInfo('lambda_function.py', zi_timestamp) zinfo.external_attr = 1860 << 16 f = StringIO() z = ZipFile(f, 'w', ZIP_DEFLATED) z.writestr(zinfo, function_code) z.close() zip_bytes = f.getvalue() logger.debug({'test_subnet_connectivity:create_function_input': {'FunctionName': function_name, 'Role': iam_role_arn, 'Code': {'ZipFile': zip_bytes}, 'VpcConfig': {'SubnetIds': [physical_resource_id], 'SecurityGroupIds': [security_group_id]}}}) lambda_function = False retries = 0 max_retries = 4 while not lambda_function: try: lambda_function = lambda_client.create_function(FunctionName=function_name, Runtime='python2.7', Role=iam_role_arn, Handler='lambda_function.lambda_handler', Code={'ZipFile': zip_bytes}, Timeout=120, MemorySize=128, VpcConfig={'SubnetIds': [physical_resource_id], 'SecurityGroupIds': [security_group_id]}) except botocore.exceptions.ClientError as e: codes = ['InvalidParameterValueException', 'AccessDeniedException'] logger.debug('boto exception: ', exc_info=1) logger.debug(e.response) if "The provided subnets contain availability zone Lambda doesn't support." in e.response['Error']['Message']: raise if e.response['Error']['Code'] in codes and retries < max_retries: logger.debug({'test_subnet_connectivity:create_function': str(e)}, exc_info=1) msg = 'role not propagated yet, sleeping a bit and then retrying' logger.debug({'test_subnet_connectivity:create_function_retry': msg}) retries += 1 sleep(10 * retries ** 2) else: raise for endpoint in endpoints: f = StringIO() f.write(json.dumps({'address': endpoint[0], 'port': endpoint[1]})) payload = f.getvalue() f.close() response = lambda_client.invoke(FunctionName=function_name, InvocationType='RequestResponse', Payload=payload) response['Payload'] = response['Payload'].read() try: response['Payload'] = json.loads(response['Payload']) except Exception: pass logger.debug({'test_subnet_connectivity:response': response}) if response['StatusCode'] != 200 or 'FunctionError' in response.keys(): results = {'success': False, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} error_msg.append({'endpoint': endpoint, 'response': response['Payload']}) elif response['StatusCode'] == 200 and len(error_msg) == 0: results = {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} except Exception as e: logger.error({'test_subnet_connectivity': str(e)}, exc_info=1) if "subnets contain availability zone Lambda doesn't support" in str(e): results = {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} logger.warning("test skipped as lambda is not supported in the subnet's az. %s" % str(results)) else: results = {'success': False, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} error_msg.append({'exception': str(e)}) finally: try: lambda_client.delete_function(FunctionName=function_name) except Exception: logger.warning('Failed to cleanup lambda function', exc_info=1) try: logger.debug({'test_subnet_connectivity:security_group_id': security_group_id}) enis = ec2_client.describe_network_interfaces(Filters=[{'Name': 'group-id', 'Values': [security_group_id]}]) for eni in enis['NetworkInterfaces']: if 'Attachment' in eni.keys(): logger.debug('Detaching ENI...') ec2_client.detach_network_interface(AttachmentId=eni['Attachment']['AttachmentId']) while 'Attachment' in ec2_client.describe_network_interfaces(NetworkInterfaceIds=[eni['NetworkInterfaceId']])['NetworkInterfaces'][0].keys(): logger.debug('eni still attached, waiting 5 seconds...') sleep(5) logger.debug('Deleting ENI %s' % eni['NetworkInterfaceId']) ec2_client.delete_network_interface(NetworkInterfaceId=eni['NetworkInterfaceId']) sg = False retries = 0 max_retries = 3 while not sg: try: sg = ec2_client.delete_security_group(GroupId=security_group_id) except botocore.exceptions.ClientError as e: msg = 'has a dependent object' dep_violation = e.response['Error']['Code'] == 'DependencyViolation' logger.debug('boto exception: ', exc_info=1) if dep_violation and msg in str(e) and (retries < max_retries): msg = 'eni deletion not propagated yet, sleeping a bit and then retrying' logger.debug({'test_subnet_connectivity:delete_sg_retry': security_group_id}) retries += 1 sleep(5 * retries ** 2) else: raise logger.debug({'test_subnet_connectivity:security_group_id_response': response}) except Exception: logger.warning('Failed to cleanup security group', exc_info=1) if len(error_msg) > 0: results['error_msg'] = error_msg return results
def test_subnet_connectivity(region, stackid, logical_resource_id, physical_resource_id, endpoints=[['www.amazon.com', '80']]): logger.debug({'test_subnet_connectivity': 'starting'}) error_msg = [] if region not in clients.get_available_regions('lambda'): msg = 'Test for %s %s skipped, %s not supprted by lambda' % (stackid, logical_resource_id, region) logger.warning(msg) return {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'warning': 'Test skipped, region %s not supprted by lambda' % region, 'region': region, 'stackid': stackid} try: function_name = 'test_subnet_%s_%s' % (physical_resource_id, random_string(8)) iam_name = function_name.replace('_', '-') lambda_client = clients.get('lambda', region=region) ec2_client = clients.get('ec2', region=region) global iam_role_arn recreate = False iam_client = clients.get('iam') if not iam_role_arn: recreate = True iam_name = 'test_subnet_%s' % random_string(8) else: try: iam_name = iam_role_arn.split('/')[-1] iam_client.get_role(RoleName=iam_name) except Exception as e: logger.debug({'get_iam_role:get_role': str(e)}) recreate = True if recreate: iam_role_arn = iam_client.create_role(RoleName=iam_name, AssumeRolePolicyDocument=assume_role_policy)['Role']['Arn'] logger.debug({'get_iam_role:iam_role': iam_role_arn}) iam_client.put_role_policy(RoleName=iam_name, PolicyName=iam_name, PolicyDocument=iam_policy) iam_role_arn = iam_role_arn response = ec2_client.describe_subnets(SubnetIds=[physical_resource_id]) logger.debug({'test_subnet_connectivity:describe_subnets': response}) vpc_id = response['Subnets'][0]['VpcId'] logger.debug({'test_subnet_connectivity:vpc_id': vpc_id}) security_group_id = ec2_client.create_security_group(GroupName=iam_name, Description=iam_name, VpcId=vpc_id)['GroupId'] logger.debug({'test_subnet_connectivity:security_group_id': security_group_id}) now = datetime.now() zi_timestamp = (now.year, now.month, now.day, now.hour, now.minute, now.second) zinfo = ZipInfo('lambda_function.py', zi_timestamp) zinfo.external_attr = 1860 << 16 f = StringIO() z = ZipFile(f, 'w', ZIP_DEFLATED) z.writestr(zinfo, function_code) z.close() zip_bytes = f.getvalue() logger.debug({'test_subnet_connectivity:create_function_input': {'FunctionName': function_name, 'Role': iam_role_arn, 'Code': {'ZipFile': zip_bytes}, 'VpcConfig': {'SubnetIds': [physical_resource_id], 'SecurityGroupIds': [security_group_id]}}}) lambda_function = False retries = 0 max_retries = 4 while not lambda_function: try: lambda_function = lambda_client.create_function(FunctionName=function_name, Runtime='python2.7', Role=iam_role_arn, Handler='lambda_function.lambda_handler', Code={'ZipFile': zip_bytes}, Timeout=120, MemorySize=128, VpcConfig={'SubnetIds': [physical_resource_id], 'SecurityGroupIds': [security_group_id]}) except botocore.exceptions.ClientError as e: codes = ['InvalidParameterValueException', 'AccessDeniedException'] logger.debug('boto exception: ', exc_info=1) logger.debug(e.response) if "The provided subnets contain availability zone Lambda doesn't support." in e.response['Error']['Message']: raise if e.response['Error']['Code'] in codes and retries < max_retries: logger.debug({'test_subnet_connectivity:create_function': str(e)}, exc_info=1) msg = 'role not propagated yet, sleeping a bit and then retrying' logger.debug({'test_subnet_connectivity:create_function_retry': msg}) retries += 1 sleep(10 * retries ** 2) else: raise for endpoint in endpoints: f = StringIO() f.write(json.dumps({'address': endpoint[0], 'port': endpoint[1]})) payload = f.getvalue() f.close() response = lambda_client.invoke(FunctionName=function_name, InvocationType='RequestResponse', Payload=payload) response['Payload'] = response['Payload'].read() try: response['Payload'] = json.loads(response['Payload']) except Exception: pass logger.debug({'test_subnet_connectivity:response': response}) if response['StatusCode'] != 200 or 'FunctionError' in response.keys(): results = {'success': False, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} error_msg.append({'endpoint': endpoint, 'response': response['Payload']}) elif response['StatusCode'] == 200 and len(error_msg) == 0: results = {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} except Exception as e: logger.error({'test_subnet_connectivity': str(e)}, exc_info=1) if "subnets contain availability zone Lambda doesn't support" in str(e): results = {'success': True, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} logger.warning("test skipped as lambda is not supported in the subnet's az. %s" % str(results)) else: results = {'success': False, 'logical_resource_id': logical_resource_id, 'physical_resource_id': physical_resource_id, 'region': region, 'stackid': stackid} error_msg.append({'exception': str(e)}) finally: try: lambda_client.delete_function(FunctionName=function_name) except Exception: logger.warning('Failed to cleanup lambda function', exc_info=1) try: logger.debug({'test_subnet_connectivity:security_group_id': security_group_id}) enis = ec2_client.describe_network_interfaces(Filters=[{'Name': 'group-id', 'Values': [security_group_id]}]) for eni in enis['NetworkInterfaces']: if 'Attachment' in eni.keys(): logger.debug('Detaching ENI...') ec2_client.detach_network_interface(AttachmentId=eni['Attachment']['AttachmentId']) while 'Attachment' in ec2_client.describe_network_interfaces(NetworkInterfaceIds=[eni['NetworkInterfaceId']])['NetworkInterfaces'][0].keys(): logger.debug('eni still attached, waiting 5 seconds...') sleep(5) logger.debug('Deleting ENI %s' % eni['NetworkInterfaceId']) ec2_client.delete_network_interface(NetworkInterfaceId=eni['NetworkInterfaceId']) sg = False retries = 0 max_retries = 3 while not sg: try: sg = ec2_client.delete_security_group(GroupId=security_group_id) except botocore.exceptions.ClientError as e: msg = 'has a dependent object' dep_violation = e.response['Error']['Code'] == 'DependencyViolation' logger.debug('boto exception: ', exc_info=1) if dep_violation and msg in str(e) and (retries < max_retries): msg = 'eni deletion not propagated yet, sleeping a bit and then retrying' logger.debug({'test_subnet_connectivity:delete_sg_retry': security_group_id}) retries += 1 sleep(5 * retries ** 2) else: raise logger.debug({'test_subnet_connectivity:security_group_id_response': response}) except Exception: logger.warning('Failed to cleanup security group', exc_info=1) if len(error_msg) > 0: results['error_msg'] = error_msg return results
cloudformation-validation-pipeline
positive
def science(self, events=[{}]): if events == [{}]: events = [{}] if self.Science == '': <DeepExtract> try: response = imports.User(self.discord, self.s, self.log).info(with_analytics_token=True) if response.status_code == 401: raise self.userData = response.json() except: self.userData = {'analytics_token': None, 'id': '0'} self.Science = imports.Science(self.discord, self.s, self.log, self.userData['analytics_token'], self.userData['id']) </DeepExtract> return self.Science.science(events)
def science(self, events=[{}]): if events == [{}]: events = [{}] if self.Science == '': try: response = imports.User(self.discord, self.s, self.log).info(with_analytics_token=True) if response.status_code == 401: raise self.userData = response.json() except: self.userData = {'analytics_token': None, 'id': '0'} self.Science = imports.Science(self.discord, self.s, self.log, self.userData['analytics_token'], self.userData['id']) return self.Science.science(events)
Discord-S.C.U.M
positive
def sort_type(metadata, **kwargs): <DeepExtract> cancer = metadata['cases'][0]['project']['project_id'] </DeepExtract> <DeepExtract> sample_type = metadata['cases'][0]['samples'][0]['sample_type'] </DeepExtract> if 'Normal' in sample_type: return sample_type.replace(' ', '_') return cancer
def sort_type(metadata, **kwargs): cancer = metadata['cases'][0]['project']['project_id'] sample_type = metadata['cases'][0]['samples'][0]['sample_type'] if 'Normal' in sample_type: return sample_type.replace(' ', '_') return cancer
DeepPATH
positive
def _open(self, name, mode): name = str(name) if name not in self.index: gz_name = name + '.gz' if gz_name in self.index: <DeepExtract> gz_name = str(gz_name) if gz_name not in self.index: gz_name = gz_name + '.gz' if gz_name in self.index: gz_in = self._open(gz_name, 'rb') gz_in = gzip.open(gz_in, 'rb') raise IOError('File does not exist: %s' % gz_name) file_info = self.index[gz_name] gz_in = CaptarFile(self.parent.open(self.tar_path, 'rb'), int(file_info['offset']), int(file_info['size'])) </DeepExtract> return gzip.open(gz_in, mode) raise IOError('File does not exist: %s' % name) file_info = self.index[name] return CaptarFile(self.parent.open(self.tar_path, mode), int(file_info['offset']), int(file_info['size']))
def _open(self, name, mode): name = str(name) if name not in self.index: gz_name = name + '.gz' if gz_name in self.index: gz_name = str(gz_name) if gz_name not in self.index: gz_name = gz_name + '.gz' if gz_name in self.index: gz_in = self._open(gz_name, 'rb') gz_in = gzip.open(gz_in, 'rb') raise IOError('File does not exist: %s' % gz_name) file_info = self.index[gz_name] gz_in = CaptarFile(self.parent.open(self.tar_path, 'rb'), int(file_info['offset']), int(file_info['size'])) return gzip.open(gz_in, mode) raise IOError('File does not exist: %s' % name) file_info = self.index[name] return CaptarFile(self.parent.open(self.tar_path, mode), int(file_info['offset']), int(file_info['size']))
capstone
positive
def convert_nmap_output_to_encoding(value, code='ascii'): """ Change encoding for scan_result object from unicode to whatever :param value: scan_result as dictionnary :param code: default = "ascii", encoding destination :returns: scan_result as dictionnary with new encoding """ new_value = {} for k in value: if type(value[k]) in [dict, PortScannerHostDict]: <DeepExtract> new_value = {} for k in value[k]: if type(value[k][k]) in [dict, PortScannerHostDict]: new_value[k] = convert_nmap_output_to_encoding(value[k][k], code) elif type(value[k][k]) is list: new_value[k] = [convert_nmap_output_to_encoding(x, code) for x in value[k][k]] else: new_value[k] = value[k][k].encode(code) new_value[k] = new_value </DeepExtract> elif type(value[k]) is list: new_value[k] = [convert_nmap_output_to_encoding(x, code) for x in value[k]] else: new_value[k] = value[k].encode(code) return new_value
def convert_nmap_output_to_encoding(value, code='ascii'): """ Change encoding for scan_result object from unicode to whatever :param value: scan_result as dictionnary :param code: default = "ascii", encoding destination :returns: scan_result as dictionnary with new encoding """ new_value = {} for k in value: if type(value[k]) in [dict, PortScannerHostDict]: new_value = {} for k in value[k]: if type(value[k][k]) in [dict, PortScannerHostDict]: new_value[k] = convert_nmap_output_to_encoding(value[k][k], code) elif type(value[k][k]) is list: new_value[k] = [convert_nmap_output_to_encoding(x, code) for x in value[k][k]] else: new_value[k] = value[k][k].encode(code) new_value[k] = new_value elif type(value[k]) is list: new_value[k] = [convert_nmap_output_to_encoding(x, code) for x in value[k]] else: new_value[k] = value[k].encode(code) return new_value
ARL
positive
def create_pvc_mask(tissues: list) -> str: """Create a pvc mask from tissue list. Parameters ---------- tissues : list List of paths to tissue Nifti1Images. Must be non-empty. Returns ------- out_mask : str Path to the resulting mask Nifti1Image. """ from os import getcwd from os.path import join import nibabel as nib import numpy as np from clinica.pipelines.pet_volume.pet_volume_utils import _load_tissues <DeepExtract> import nibabel as nib import numpy as np from clinica.pipelines.pet_volume.pet_volume_utils import _check_non_empty_tissue_list _check_non_empty_tissue_list(tissues) img_0 = nib.load(tissues[0]) shape = list(img_0.get_fdata(dtype='float32').shape) data = np.zeros(shape=shape) for image in tissues: data += nib.load(image).get_fdata(dtype='float32') (background, affine, header) = (data, img_0.affine, img_0.header) </DeepExtract> shape = background.shape shape += tuple([len(tissues) + 1]) data = np.empty(shape=shape, dtype=np.float64) for (i, tissue) in enumerate(tissues): image = nib.load(tissue) data[..., i] = np.array(image.get_fdata(dtype='float32')) background = 1.0 - background data[..., len(tissues)] = np.array(background) out_mask = join(getcwd(), 'pvc_mask.nii') mask = nib.Nifti1Image(data, affine, header=header) nib.save(mask, out_mask) return out_mask
def create_pvc_mask(tissues: list) -> str: """Create a pvc mask from tissue list. Parameters ---------- tissues : list List of paths to tissue Nifti1Images. Must be non-empty. Returns ------- out_mask : str Path to the resulting mask Nifti1Image. """ from os import getcwd from os.path import join import nibabel as nib import numpy as np from clinica.pipelines.pet_volume.pet_volume_utils import _load_tissues import nibabel as nib import numpy as np from clinica.pipelines.pet_volume.pet_volume_utils import _check_non_empty_tissue_list _check_non_empty_tissue_list(tissues) img_0 = nib.load(tissues[0]) shape = list(img_0.get_fdata(dtype='float32').shape) data = np.zeros(shape=shape) for image in tissues: data += nib.load(image).get_fdata(dtype='float32') (background, affine, header) = (data, img_0.affine, img_0.header) shape = background.shape shape += tuple([len(tissues) + 1]) data = np.empty(shape=shape, dtype=np.float64) for (i, tissue) in enumerate(tissues): image = nib.load(tissue) data[..., i] = np.array(image.get_fdata(dtype='float32')) background = 1.0 - background data[..., len(tissues)] = np.array(background) out_mask = join(getcwd(), 'pvc_mask.nii') mask = nib.Nifti1Image(data, affine, header=header) nib.save(mask, out_mask) return out_mask
clinica
positive
def fetch_issues(self, tag): <DeepExtract> '/repositories/%s/issues/' % tag = self.BASE_API2 + '/repositories/%s/issues/' % tag while '/repositories/%s/issues/' % tag is not None: response = self.get_data('/repositories/%s/issues/' % tag) yield from response['values'] '/repositories/%s/issues/' % tag = response.get('next', None) </DeepExtract> return [(tag, issue) for issue in response]
def fetch_issues(self, tag): '/repositories/%s/issues/' % tag = self.BASE_API2 + '/repositories/%s/issues/' % tag while '/repositories/%s/issues/' % tag is not None: response = self.get_data('/repositories/%s/issues/' % tag) yield from response['values'] '/repositories/%s/issues/' % tag = response.get('next', None) return [(tag, issue) for issue in response]
bugwarrior
positive
def get_selected_thread(self): """returns currently selected :class:`~alot.db.Thread`""" <DeepExtract> (threadlinewidget, _) = self.threadlist.get_focus() threadlinewidget = threadlinewidget </DeepExtract> thread = None if threadlinewidget: thread = threadlinewidget.get_thread() return thread
def get_selected_thread(self): """returns currently selected :class:`~alot.db.Thread`""" (threadlinewidget, _) = self.threadlist.get_focus() threadlinewidget = threadlinewidget thread = None if threadlinewidget: thread = threadlinewidget.get_thread() return thread
alot
positive
def add_level(self, previous_level): level = previous_level + 1 self.mStructure[level] = {} for group in self.mStructure[previous_level]: lGroupLabel = group lTuple = self.mLabels2Tuples[lGroupLabel] for k in [previous_level]: if lTuple[k] != '': new_group = list(lTuple) new_group[k] = '' new_group = tuple(new_group) <DeepExtract> str1 = '_'.join(list(new_group)) lNewGroupLabel = str1 </DeepExtract> self.mLabels2Tuples[lNewGroupLabel] = new_group if lNewGroupLabel not in self.mStructure[level]: self.mStructure[level][lNewGroupLabel] = set() self.mStructure[level][lNewGroupLabel].add(lGroupLabel)
def add_level(self, previous_level): level = previous_level + 1 self.mStructure[level] = {} for group in self.mStructure[previous_level]: lGroupLabel = group lTuple = self.mLabels2Tuples[lGroupLabel] for k in [previous_level]: if lTuple[k] != '': new_group = list(lTuple) new_group[k] = '' new_group = tuple(new_group) str1 = '_'.join(list(new_group)) lNewGroupLabel = str1 self.mLabels2Tuples[lNewGroupLabel] = new_group if lNewGroupLabel not in self.mStructure[level]: self.mStructure[level][lNewGroupLabel] = set() self.mStructure[level][lNewGroupLabel].add(lGroupLabel)
atspy
positive
def write_plain(text, split=True): if self.root_context: self.open_ended = True if not text: return if not self.whitespace: data = u' ' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch != u' ': if start + 1 == end and self.column > self.best_width and split: <DeepExtract> indent = self.indent or 0 if not self.indention or self.column > indent or (self.column == indent and (not self.whitespace)): self.write_line_break() if self.column < indent: self.whitespace = True data = u' ' * (indent - self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) </DeepExtract> self.whitespace = False self.indention = False else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch not in u'\n\x85\u2028\u2029': if text[start] == u'\n': <DeepExtract> if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) </DeepExtract> for br in text[start:end]: if br == u'\n': <DeepExtract> if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) </DeepExtract> else: <DeepExtract> if br is None: br = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: br = br.encode(self.encoding) self.stream.write(br) </DeepExtract> <DeepExtract> indent = self.indent or 0 if not self.indention or self.column > indent or (self.column == indent and (not self.whitespace)): self.write_line_break() if self.column < indent: self.whitespace = True data = u' ' * (indent - self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) </DeepExtract> self.whitespace = False self.indention = False start = end elif ch is None or ch in u' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: spaces = ch == u' ' breaks = ch in u'\n\x85\u2028\u2029' end += 1
def write_plain(text, split=True): if self.root_context: self.open_ended = True if not text: return if not self.whitespace: data = u' ' self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False spaces = False breaks = False start = end = 0 while end <= len(text): ch = None if end < len(text): ch = text[end] if spaces: if ch != u' ': if start + 1 == end and self.column > self.best_width and split: indent = self.indent or 0 if not self.indention or self.column > indent or (self.column == indent and (not self.whitespace)): self.write_line_break() if self.column < indent: self.whitespace = True data = u' ' * (indent - self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False else: data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end elif breaks: if ch not in u'\n\x85\u2028\u2029': if text[start] == u'\n': if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) for br in text[start:end]: if br == u'\n': if data is None: data = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: data = data.encode(self.encoding) self.stream.write(data) else: if br is None: br = self.best_line_break self.whitespace = True self.indention = True self.line += 1 self.column = 0 if self.encoding: br = br.encode(self.encoding) self.stream.write(br) indent = self.indent or 0 if not self.indention or self.column > indent or (self.column == indent and (not self.whitespace)): self.write_line_break() if self.column < indent: self.whitespace = True data = u' ' * (indent - self.column) self.column = indent if self.encoding: data = data.encode(self.encoding) self.stream.write(data) self.whitespace = False self.indention = False start = end elif ch is None or ch in u' \n\x85\u2028\u2029': data = text[start:end] self.column += len(data) if self.encoding: data = data.encode(self.encoding) self.stream.write(data) start = end if ch is not None: spaces = ch == u' ' breaks = ch in u'\n\x85\u2028\u2029' end += 1
aws-serverless-workshop-greater-china-region
positive
def list_scopes(self): url = f'{self.domain}/scopes' res = requests.get(url, timeout=60) <DeepExtract> allowed = [200] if allowed is None else allowed api_version = res.headers.get('x-binstar-api-version', '0.2.1') if pv(api_version) > pv(__version__): logger.warning('The api server is running the binstar-api version %s. you are using %s\nPlease update your client with pip install -U binstar or conda update binstar', api_version, __version__) if not self._token_warning_sent and 'Conda-Token-Warning' in res.headers: logger.warning('Token warning: %s', res.headers['Conda-Token-Warning']) self._token_warning_sent = True if 'X-Anaconda-Lockdown' in res.headers: logger.warning('Anaconda repository is currently in LOCKDOWN mode.') if 'X-Anaconda-Read-Only' in res.headers: logger.warning('Anaconda repository is currently in READ ONLY mode.') if res.status_code not in allowed: (short, long) = STATUS_CODES.get(res.status_code, ('?', 'Undefined error')) msg = '%s: %s ([%s] %s -> %s)' % (short, long, res.request.method, res.request.url, res.status_code) try: data = res.json() except Exception: data = {} msg = data.get('error', msg) ErrCls = errors.BinstarError if res.status_code == 401: ErrCls = errors.Unauthorized elif res.status_code == 404: ErrCls = errors.NotFound elif res.status_code == 409: ErrCls = errors.Conflict elif res.status_code >= 500: ErrCls = errors.ServerError raise ErrCls(msg, res.status_code) </DeepExtract> return res.json()
def list_scopes(self): url = f'{self.domain}/scopes' res = requests.get(url, timeout=60) allowed = [200] if allowed is None else allowed api_version = res.headers.get('x-binstar-api-version', '0.2.1') if pv(api_version) > pv(__version__): logger.warning('The api server is running the binstar-api version %s. you are using %s\nPlease update your client with pip install -U binstar or conda update binstar', api_version, __version__) if not self._token_warning_sent and 'Conda-Token-Warning' in res.headers: logger.warning('Token warning: %s', res.headers['Conda-Token-Warning']) self._token_warning_sent = True if 'X-Anaconda-Lockdown' in res.headers: logger.warning('Anaconda repository is currently in LOCKDOWN mode.') if 'X-Anaconda-Read-Only' in res.headers: logger.warning('Anaconda repository is currently in READ ONLY mode.') if res.status_code not in allowed: (short, long) = STATUS_CODES.get(res.status_code, ('?', 'Undefined error')) msg = '%s: %s ([%s] %s -> %s)' % (short, long, res.request.method, res.request.url, res.status_code) try: data = res.json() except Exception: data = {} msg = data.get('error', msg) ErrCls = errors.BinstarError if res.status_code == 401: ErrCls = errors.Unauthorized elif res.status_code == 404: ErrCls = errors.NotFound elif res.status_code == 409: ErrCls = errors.Conflict elif res.status_code >= 500: ErrCls = errors.ServerError raise ErrCls(msg, res.status_code) return res.json()
anaconda-client
positive
def run(input_dataset_dir, dataset_dir): """Runs the download and conversion operation. Args: input_dataset_dir: The dataset directory to use as input dataset_dir: The dataset directory where the dataset is stored. """ if not tf.gfile.Exists(dataset_dir): tf.gfile.MakeDirs(dataset_dir) if _dataset_exists(dataset_dir): print('Dataset files already exist. Exiting without re-creating them.') return <DeepExtract> directories = [] class_names = [] for filename in os.listdir(input_dataset_dir): path = os.path.join(input_dataset_dir, filename) if os.path.isdir(path): directories.append(path) class_names.append(filename) photo_filenames = [] for directory in directories: for filename in os.listdir(directory): path = os.path.join(directory, filename) photo_filenames.append(path) (photo_filenames, class_names) = (photo_filenames, sorted(class_names)) </DeepExtract> class_names_to_ids = dict(zip(class_names, range(len(class_names)))) random.seed(_RANDOM_SEED) random.shuffle(photo_filenames) training_filenames = photo_filenames[_NUM_VALIDATION:-_NUM_TEST] validation_filenames = photo_filenames[:_NUM_VALIDATION] test_filenames = photo_filenames[-_NUM_TEST:] <DeepExtract> assert 'test' in ['train', 'validation', 'test'] num_per_shard = int(math.ceil(len(test_filenames) / float(_NUM_SHARDS))) with tf.Graph().as_default(): image_reader = ImageReader() with tf.Session('') as sess: for shard_id in range(_NUM_SHARDS): output_filename = _get_dataset_filename(dataset_dir, 'test', shard_id) with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: start_ndx = shard_id * num_per_shard end_ndx = min((shard_id + 1) * num_per_shard, len(test_filenames)) for i in range(start_ndx, end_ndx): sys.stdout.write('\r>> Converting image %d/%d shard %d' % (i + 1, len(test_filenames), shard_id)) sys.stdout.flush() image_data = tf.gfile.FastGFile(test_filenames[i], 'rb').read() (height, width) = image_reader.read_image_dims(sess, image_data) class_name = os.path.basename(os.path.dirname(test_filenames[i])) class_id = class_names_to_ids[class_name] example = dataset_utils.image_to_tfexample(image_data, b'jpg', height, width, class_id) tfrecord_writer.write(example.SerializeToString()) sys.stdout.write('\n') sys.stdout.flush() </DeepExtract> labels_to_class_names = dict(zip(range(len(class_names)), class_names)) dataset_utils.write_label_file(labels_to_class_names, dataset_dir) print('\nFinished converting the Flowers dataset!')
def run(input_dataset_dir, dataset_dir): """Runs the download and conversion operation. Args: input_dataset_dir: The dataset directory to use as input dataset_dir: The dataset directory where the dataset is stored. """ if not tf.gfile.Exists(dataset_dir): tf.gfile.MakeDirs(dataset_dir) if _dataset_exists(dataset_dir): print('Dataset files already exist. Exiting without re-creating them.') return directories = [] class_names = [] for filename in os.listdir(input_dataset_dir): path = os.path.join(input_dataset_dir, filename) if os.path.isdir(path): directories.append(path) class_names.append(filename) photo_filenames = [] for directory in directories: for filename in os.listdir(directory): path = os.path.join(directory, filename) photo_filenames.append(path) (photo_filenames, class_names) = (photo_filenames, sorted(class_names)) class_names_to_ids = dict(zip(class_names, range(len(class_names)))) random.seed(_RANDOM_SEED) random.shuffle(photo_filenames) training_filenames = photo_filenames[_NUM_VALIDATION:-_NUM_TEST] validation_filenames = photo_filenames[:_NUM_VALIDATION] test_filenames = photo_filenames[-_NUM_TEST:] assert 'test' in ['train', 'validation', 'test'] num_per_shard = int(math.ceil(len(test_filenames) / float(_NUM_SHARDS))) with tf.Graph().as_default(): image_reader = ImageReader() with tf.Session('') as sess: for shard_id in range(_NUM_SHARDS): output_filename = _get_dataset_filename(dataset_dir, 'test', shard_id) with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: start_ndx = shard_id * num_per_shard end_ndx = min((shard_id + 1) * num_per_shard, len(test_filenames)) for i in range(start_ndx, end_ndx): sys.stdout.write('\r>> Converting image %d/%d shard %d' % (i + 1, len(test_filenames), shard_id)) sys.stdout.flush() image_data = tf.gfile.FastGFile(test_filenames[i], 'rb').read() (height, width) = image_reader.read_image_dims(sess, image_data) class_name = os.path.basename(os.path.dirname(test_filenames[i])) class_id = class_names_to_ids[class_name] example = dataset_utils.image_to_tfexample(image_data, b'jpg', height, width, class_id) tfrecord_writer.write(example.SerializeToString()) sys.stdout.write('\n') sys.stdout.flush() labels_to_class_names = dict(zip(range(len(class_names)), class_names)) dataset_utils.write_label_file(labels_to_class_names, dataset_dir) print('\nFinished converting the Flowers dataset!')
Creative-Adversarial-Networks
positive
def IsFocused(self, item): """Return True if the item has the focus""" <DeepExtract> index = item if isinstance(item, str): index = (self.Texts().index(item) - 1) // self.ColumnCount() item = index </DeepExtract> return win32defines.LVIS_FOCUSED == self.SendMessage(win32defines.LVM_GETITEMSTATE, item, win32defines.LVIS_FOCUSED)
def IsFocused(self, item): """Return True if the item has the focus""" index = item if isinstance(item, str): index = (self.Texts().index(item) - 1) // self.ColumnCount() item = index return win32defines.LVIS_FOCUSED == self.SendMessage(win32defines.LVM_GETITEMSTATE, item, win32defines.LVIS_FOCUSED)
BrowserRefresh-Sublime
positive