before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def git_version(): def _minimal_ext_cmd(cmd): env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) return out try: <DeepExtract> env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT, env=env) out = out </DeepExtract> GIT_REVISION = out.strip().decode('ascii') except (subprocess.SubprocessError, OSError): GIT_REVISION = 'Unknown' if not GIT_REVISION: GIT_REVISION = 'Unknown' return GIT_REVISION
def git_version(): def _minimal_ext_cmd(cmd): env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) return out try: env = {} for k in ['SYSTEMROOT', 'PATH', 'HOME']: v = os.environ.get(k) if v is not None: env[k] = v env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT, env=env) out = out GIT_REVISION = out.strip().decode('ascii') except (subprocess.SubprocessError, OSError): GIT_REVISION = 'Unknown' if not GIT_REVISION: GIT_REVISION = 'Unknown' return GIT_REVISION
bayesfast
positive
def test_tribool_true(self): <DeepExtract> value = gdb.parse_and_eval('val_true') pretty_printer = gdb.default_visualizer(value) self.assertIsNotNone(pretty_printer, 'Pretty printer was not registred') string = pretty_printer.to_string() if string is not None: string = text_type(string) if hasattr(pretty_printer, 'children'): children = list(pretty_printer.children()) for (child_text, _) in children: self.assertIsInstance(child_text, string_types) else: children = None if hasattr(pretty_printer, 'display_hint'): self.assertIsInstance(pretty_printer.display_hint(), string_types) display_hint = text_type(pretty_printer.display_hint()) else: display_hint = None (string, children, display_hint) = (string, children, display_hint) </DeepExtract> self.assertTrue(string.endswith('true')) self.assertIsNone(children) self.assertIsNone(display_hint)
def test_tribool_true(self): value = gdb.parse_and_eval('val_true') pretty_printer = gdb.default_visualizer(value) self.assertIsNotNone(pretty_printer, 'Pretty printer was not registred') string = pretty_printer.to_string() if string is not None: string = text_type(string) if hasattr(pretty_printer, 'children'): children = list(pretty_printer.children()) for (child_text, _) in children: self.assertIsInstance(child_text, string_types) else: children = None if hasattr(pretty_printer, 'display_hint'): self.assertIsInstance(pretty_printer.display_hint(), string_types) display_hint = text_type(pretty_printer.display_hint()) else: display_hint = None (string, children, display_hint) = (string, children, display_hint) self.assertTrue(string.endswith('true')) self.assertIsNone(children) self.assertIsNone(display_hint)
Boost-Pretty-Printer
positive
def __mod_to_sym(self, inst_type, args): sym = None if inst_type in self.sym_map: if self.memoize_encoding: <DeepExtract> value = 0 vec_par = [] actual = [] for x in args(self.sym_map[inst_type][1]): if x is not None: if type(x) != int: value += 1 vec_par.append(('a%s' % value, x.symbol_type().width)) actual.append(x) else: vec_par.append(x) def join(l1, l2): ret = [] for i in range(len(l1)): ret.append((l1[i], l2[i])) sym = ret def set_remap(in_set, remap): ret = set([]) source = False for v in in_set: if v in remap: ret.add(remap[v]) else: if source is False: base_source = '.'.join(list(in_set)[0].symbol_name().split('.')[:-1]) base_dest = '.'.join(remap[list(in_set)[0]].symbol_name().split('.')[:-1]) source = True ret.add(Symbol(v.symbol_name().replace(base_source, base_dest), v.symbol_type())) sym = ret enc_val = (inst_type, str(vec_par)) if enc_val in self.enc_map: (enc, formal) = self.enc_map[enc_val] remap = dict(join(formal, actual)) self.subwalker.set_substitute_map(remap) ts = TS() ts.vars = set_remap(enc.vars, remap) ts.set_behavior(self.subwalker.walk(enc.init), self.subwalker.walk(enc.trans), self.subwalker.walk(enc.invar)) sym = ts ret = self.mod_map[inst_type][0](*args(self.sym_map[inst_type][1])) self.enc_map[enc_val] = (ret, actual) sym = ret </DeepExtract> else: sym = self.sym_map[inst_type][0](*args(self.sym_map[inst_type][1])) return sym
def __mod_to_sym(self, inst_type, args): sym = None if inst_type in self.sym_map: if self.memoize_encoding: value = 0 vec_par = [] actual = [] for x in args(self.sym_map[inst_type][1]): if x is not None: if type(x) != int: value += 1 vec_par.append(('a%s' % value, x.symbol_type().width)) actual.append(x) else: vec_par.append(x) def join(l1, l2): ret = [] for i in range(len(l1)): ret.append((l1[i], l2[i])) sym = ret def set_remap(in_set, remap): ret = set([]) source = False for v in in_set: if v in remap: ret.add(remap[v]) else: if source is False: base_source = '.'.join(list(in_set)[0].symbol_name().split('.')[:-1]) base_dest = '.'.join(remap[list(in_set)[0]].symbol_name().split('.')[:-1]) source = True ret.add(Symbol(v.symbol_name().replace(base_source, base_dest), v.symbol_type())) sym = ret enc_val = (inst_type, str(vec_par)) if enc_val in self.enc_map: (enc, formal) = self.enc_map[enc_val] remap = dict(join(formal, actual)) self.subwalker.set_substitute_map(remap) ts = TS() ts.vars = set_remap(enc.vars, remap) ts.set_behavior(self.subwalker.walk(enc.init), self.subwalker.walk(enc.trans), self.subwalker.walk(enc.invar)) sym = ts ret = self.mod_map[inst_type][0](*args(self.sym_map[inst_type][1])) self.enc_map[enc_val] = (ret, actual) sym = ret else: sym = self.sym_map[inst_type][0](*args(self.sym_map[inst_type][1])) return sym
CoSA
positive
def decrypt(self, **kwargs): super().decrypt(**kwargs) <DeepExtract> self.get_key() self.get_iv(iv_from_file=iv_from_file) cipher = AES.new(self.KEY, mode, self.IV) </DeepExtract> data = cipher.decrypt(self.input_data[67:]) <DeepExtract> if not data.startswith(GZIP_MAGIC): raise WhatsAppCryptError('Decryption failed (not gzip).') </DeepExtract> data = gzip.decompress(self.unpad(data)) <DeepExtract> if not data.startswith(SQLITE_MAGIC): raise WhatsAppCryptError('Decryption failed (not sqlite).') </DeepExtract> <DeepExtract> if self.dst.is_file(): raise WhatsAppCryptError(f'File {self.dst} already exists!') self.dst.write_bytes(data) </DeepExtract> return self.dst
def decrypt(self, **kwargs): super().decrypt(**kwargs) self.get_key() self.get_iv(iv_from_file=iv_from_file) cipher = AES.new(self.KEY, mode, self.IV) data = cipher.decrypt(self.input_data[67:]) if not data.startswith(GZIP_MAGIC): raise WhatsAppCryptError('Decryption failed (not gzip).') data = gzip.decompress(self.unpad(data)) if not data.startswith(SQLITE_MAGIC): raise WhatsAppCryptError('Decryption failed (not sqlite).') if self.dst.is_file(): raise WhatsAppCryptError(f'File {self.dst} already exists!') self.dst.write_bytes(data) return self.dst
andriller
positive
def test_clean_up_error(self): <DeepExtract> cmd_mgr = commandmanager.CommandManager('cliff.tests') command = mock.MagicMock(spec=c_cmd.Command) command_inst = mock.MagicMock(spec=c_cmd.Command) command_inst.run.return_value = 0 command.return_value = command_inst cmd_mgr.add_command('mock', command) err_command = mock.Mock(name='err_command', spec=c_cmd.Command) err_command_inst = mock.Mock(spec=c_cmd.Command) err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception')) err_command.return_value = err_command_inst cmd_mgr.add_command('error', err_command) interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command) interrupt_command_inst = mock.Mock(spec=c_cmd.Command) interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt) interrupt_command.return_value = interrupt_command_inst cmd_mgr.add_command('interrupt', interrupt_command) pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command) pipeclose_command_inst = mock.Mock(spec=c_cmd.Command) pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError) pipeclose_command.return_value = pipeclose_command_inst cmd_mgr.add_command('pipe-close', pipeclose_command) app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs) (app, command) = (app, command) </DeepExtract> app.clean_up = mock.MagicMock(name='clean_up') ret = app.run(['error']) self.assertNotEqual(ret, 0) app.clean_up.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) call_args = app.clean_up.call_args_list[0] self.assertEqual(mock.call(mock.ANY, 1, mock.ANY), call_args) (args, kwargs) = call_args self.assertIsInstance(args[2], RuntimeError) self.assertEqual(('test exception',), args[2].args)
def test_clean_up_error(self): cmd_mgr = commandmanager.CommandManager('cliff.tests') command = mock.MagicMock(spec=c_cmd.Command) command_inst = mock.MagicMock(spec=c_cmd.Command) command_inst.run.return_value = 0 command.return_value = command_inst cmd_mgr.add_command('mock', command) err_command = mock.Mock(name='err_command', spec=c_cmd.Command) err_command_inst = mock.Mock(spec=c_cmd.Command) err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception')) err_command.return_value = err_command_inst cmd_mgr.add_command('error', err_command) interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command) interrupt_command_inst = mock.Mock(spec=c_cmd.Command) interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt) interrupt_command.return_value = interrupt_command_inst cmd_mgr.add_command('interrupt', interrupt_command) pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command) pipeclose_command_inst = mock.Mock(spec=c_cmd.Command) pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError) pipeclose_command.return_value = pipeclose_command_inst cmd_mgr.add_command('pipe-close', pipeclose_command) app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs) (app, command) = (app, command) app.clean_up = mock.MagicMock(name='clean_up') ret = app.run(['error']) self.assertNotEqual(ret, 0) app.clean_up.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) call_args = app.clean_up.call_args_list[0] self.assertEqual(mock.call(mock.ANY, 1, mock.ANY), call_args) (args, kwargs) = call_args self.assertIsInstance(args[2], RuntimeError) self.assertEqual(('test exception',), args[2].args)
cliff
positive
def update(self, force: bool=False, recursive: bool=None, knowledge_base: dict=None) -> bool: """ Updates test case based on currently generated results. :param force: Whether to force adding the test case even if errors are encountered :param recursive: Whether to recursive parse unidentified files using YARA matching. (Set to None to use whatever is set in the test case.) :param knowledge_base: Initial knowledge_base to provide to the report. :returns: Whether update was successful. """ results_path = self.results_path with open(results_path, 'r') as fo: old_results = json.load(fo) parser_name = old_results['parser'] <DeepExtract> malware_repo = mwcp.config.get('MALWARE_REPO') if not malware_repo: raise ValueError(f"MALWARE_REPO field not set in '{mwcp.config.user_path}'. Try running `mwcp config` to set this.") if file_path: with open(file_path, 'rb') as fo: self.md5 = hashlib.md5(fo.read()).hexdigest() if not self.md5: raise ValueError(f'Missing file_path or md5 parameter.') if len(self.md5) < 4: raise ValueError(f"Unable to determine md5 from '{self.md5}'. Must be at least 4 characters.") if len(self.md5) < 32: sub_dir = pathlib.Path(malware_repo, self.md5[:4]) if not sub_dir.exists(): raise ValueError(f"Failed to find sample starting with the md5 '{self.md5}'.") file_paths = [] for file_path in sub_dir.iterdir(): if file_path.name.startswith(self.md5): file_paths.append(file_path) if not file_paths: raise ValueError(f"Failed to find sample starting with the md5 '{self.md5}'.") if len(file_paths) > 1: md5s = '\t\n'.join((file_path.name for file_path in file_paths)) raise ValueError(f"Found multiple samples starting with the md5 '{self.md5}': \n\t{md5s}") file_path = file_paths[0] file_path = pathlib.Path(malware_repo, self.md5[:4], self.md5) </DeepExtract> if not file_path.exists(): logger.warning(f'Unable to update {self.name}. Missing {file_path}') return False if recursive is None: recursive = old_results.get('recursive', False) if recursive and (not config.get('YARA_REPO', None)): logger.warning(f"Unable to update {self.name}-{self.md5}. Test case is recursive, but YARA_REPO hasn't been setup. Please run `mwcp config` and set a path to 'YARA_REPO'. Alternatively, rerun the update with the `--no-recursive` flag to turn off recursion.") return False report = mwcp.run(parser_name, data=file_path.read_bytes(), log_level=logging.INFO, recursive=recursive, knowledge_base=knowledge_base) if report.errors and (not force): logger.warning(f'Results for {self.name} has the following errors, not updating:') logger.warning('\n'.join(report.errors)) return False new_results = report.as_json_dict() new_results['mwcp_version'] = old_results['mwcp_version'] if new_results == old_results: return True logger.info(f'Updating results for {file_path} in {results_path}') results_path.write_text(report.as_json()) return True
def update(self, force: bool=False, recursive: bool=None, knowledge_base: dict=None) -> bool: """ Updates test case based on currently generated results. :param force: Whether to force adding the test case even if errors are encountered :param recursive: Whether to recursive parse unidentified files using YARA matching. (Set to None to use whatever is set in the test case.) :param knowledge_base: Initial knowledge_base to provide to the report. :returns: Whether update was successful. """ results_path = self.results_path with open(results_path, 'r') as fo: old_results = json.load(fo) parser_name = old_results['parser'] malware_repo = mwcp.config.get('MALWARE_REPO') if not malware_repo: raise ValueError(f"MALWARE_REPO field not set in '{mwcp.config.user_path}'. Try running `mwcp config` to set this.") if file_path: with open(file_path, 'rb') as fo: self.md5 = hashlib.md5(fo.read()).hexdigest() if not self.md5: raise ValueError(f'Missing file_path or md5 parameter.') if len(self.md5) < 4: raise ValueError(f"Unable to determine md5 from '{self.md5}'. Must be at least 4 characters.") if len(self.md5) < 32: sub_dir = pathlib.Path(malware_repo, self.md5[:4]) if not sub_dir.exists(): raise ValueError(f"Failed to find sample starting with the md5 '{self.md5}'.") file_paths = [] for file_path in sub_dir.iterdir(): if file_path.name.startswith(self.md5): file_paths.append(file_path) if not file_paths: raise ValueError(f"Failed to find sample starting with the md5 '{self.md5}'.") if len(file_paths) > 1: md5s = '\t\n'.join((file_path.name for file_path in file_paths)) raise ValueError(f"Found multiple samples starting with the md5 '{self.md5}': \n\t{md5s}") file_path = file_paths[0] file_path = pathlib.Path(malware_repo, self.md5[:4], self.md5) if not file_path.exists(): logger.warning(f'Unable to update {self.name}. Missing {file_path}') return False if recursive is None: recursive = old_results.get('recursive', False) if recursive and (not config.get('YARA_REPO', None)): logger.warning(f"Unable to update {self.name}-{self.md5}. Test case is recursive, but YARA_REPO hasn't been setup. Please run `mwcp config` and set a path to 'YARA_REPO'. Alternatively, rerun the update with the `--no-recursive` flag to turn off recursion.") return False report = mwcp.run(parser_name, data=file_path.read_bytes(), log_level=logging.INFO, recursive=recursive, knowledge_base=knowledge_base) if report.errors and (not force): logger.warning(f'Results for {self.name} has the following errors, not updating:') logger.warning('\n'.join(report.errors)) return False new_results = report.as_json_dict() new_results['mwcp_version'] = old_results['mwcp_version'] if new_results == old_results: return True logger.info(f'Updating results for {file_path} in {results_path}') results_path.write_text(report.as_json()) return True
DC3-MWCP
positive
def check_permission(): """ Check for Admin permissions """ if IS_ADMIN: <DeepExtract> trm = 'INFO ' if term_support_color(): trm = '{}INFO {} '.format(COLOR_GREEN, COLOR_DEFAULT) print(trm + __indent_text_block('Running as Root/Admin')) </DeepExtract> else: <DeepExtract> trm = 'WARNING ' if term_support_color(): trm = '{}WARNING{} '.format(COLOR_YELLOW, COLOR_DEFAULT) print(trm + __indent_text_block('Running without root/admin privileges')) </DeepExtract>
def check_permission(): """ Check for Admin permissions """ if IS_ADMIN: trm = 'INFO ' if term_support_color(): trm = '{}INFO {} '.format(COLOR_GREEN, COLOR_DEFAULT) print(trm + __indent_text_block('Running as Root/Admin')) else: trm = 'WARNING ' if term_support_color(): trm = '{}WARNING{} '.format(COLOR_YELLOW, COLOR_DEFAULT) print(trm + __indent_text_block('Running without root/admin privileges')) </DeepExtract>
DeepFakeTutorial
positive
def nvmlUnitGetUnitInfo(unit): c_info = c_nvmlUnitInfo_t() <DeepExtract> global nvmlLib if 'nvmlUnitGetUnitInfo' in _nvmlGetFunctionPointer_cache: fn = _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] libLoadLock.acquire() try: if nvmlLib == None: raise NVMLError(NVML_ERROR_UNINITIALIZED) try: _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] = getattr(nvmlLib, 'nvmlUnitGetUnitInfo') fn = _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] except AttributeError: raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) finally: libLoadLock.release() </DeepExtract> ret = fn(unit, byref(c_info)) <DeepExtract> if ret != NVML_SUCCESS: raise NVMLError(ret) return ret </DeepExtract> return c_info
def nvmlUnitGetUnitInfo(unit): c_info = c_nvmlUnitInfo_t() global nvmlLib if 'nvmlUnitGetUnitInfo' in _nvmlGetFunctionPointer_cache: fn = _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] libLoadLock.acquire() try: if nvmlLib == None: raise NVMLError(NVML_ERROR_UNINITIALIZED) try: _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] = getattr(nvmlLib, 'nvmlUnitGetUnitInfo') fn = _nvmlGetFunctionPointer_cache['nvmlUnitGetUnitInfo'] except AttributeError: raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND) finally: libLoadLock.release() ret = fn(unit, byref(c_info)) if ret != NVML_SUCCESS: raise NVMLError(ret) return ret return c_info
DeepFaceLab_Linux
positive
def train_eval_seq(self, valid_data, test_data, recommender, epoch_id=0): """Compute performance of the sequential models with validation and test datasets for each epoch during training. Args: valid_data (pandas.DataFrame): validation dataset. test_data (pandas.DataFrame): test dataset. recommender (Object): Sequential recommender. epoch_id (int): id of the epoch. k (int): size of the recommendation list Returns: None """ METRICS = {'ndcg': ndcg, 'precision': precision, 'recall': recall, 'mrr': mrr} TOPN = self.config['system']['valid_k'] GIVEN_K = self.config['model']['GIVEN_K'] LOOK_AHEAD = self.config['model']['LOOK_AHEAD'] STEP = self.config['model']['STEP'] scroll = self.config['model']['scroll'] <DeepExtract> test_sequences = valid_data.loc[valid_data['col_sequence'].map(len) > abs(GIVEN_K), 'col_sequence'].values valid_sequences = test_sequences </DeepExtract> print('{} sequences available for evaluation'.format(len(valid_sequences))) <DeepExtract> if GIVEN_K == 0: raise ValueError('given_k must be != 0') metrics = np.zeros(len(METRICS.values())) with tqdm(total=len(valid_sequences)) as pbar: for (i, test_seq) in enumerate(valid_sequences): if users is not None: user = users[i] else: user = None if scroll: metrics += self.sequence_sequential_evaluation(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN, STEP) else: metrics += self.evaluate_sequence(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN) pbar.update(1) valid_results = metrics / len(valid_sequences) </DeepExtract> print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP)) for (mname, mvalue) in zip(METRICS.keys(), valid_results): print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue)) <DeepExtract> test_sequences = test_data.loc[test_data['col_sequence'].map(len) > abs(GIVEN_K), 'col_sequence'].values test_sequences = test_sequences </DeepExtract> print('{} sequences available for evaluation'.format(len(test_sequences))) <DeepExtract> if GIVEN_K == 0: raise ValueError('given_k must be != 0') metrics = np.zeros(len(METRICS.values())) with tqdm(total=len(test_sequences)) as pbar: for (i, test_seq) in enumerate(test_sequences): if users is not None: user = users[i] else: user = None if scroll: metrics += self.sequence_sequential_evaluation(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN, STEP) else: metrics += self.evaluate_sequence(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN) pbar.update(1) test_results = metrics / len(test_sequences) </DeepExtract> print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP)) for (mname, mvalue) in zip(METRICS.keys(), test_results): print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue))
def train_eval_seq(self, valid_data, test_data, recommender, epoch_id=0): """Compute performance of the sequential models with validation and test datasets for each epoch during training. Args: valid_data (pandas.DataFrame): validation dataset. test_data (pandas.DataFrame): test dataset. recommender (Object): Sequential recommender. epoch_id (int): id of the epoch. k (int): size of the recommendation list Returns: None """ METRICS = {'ndcg': ndcg, 'precision': precision, 'recall': recall, 'mrr': mrr} TOPN = self.config['system']['valid_k'] GIVEN_K = self.config['model']['GIVEN_K'] LOOK_AHEAD = self.config['model']['LOOK_AHEAD'] STEP = self.config['model']['STEP'] scroll = self.config['model']['scroll'] test_sequences = valid_data.loc[valid_data['col_sequence'].map(len) > abs(GIVEN_K), 'col_sequence'].values valid_sequences = test_sequences print('{} sequences available for evaluation'.format(len(valid_sequences))) if GIVEN_K == 0: raise ValueError('given_k must be != 0') metrics = np.zeros(len(METRICS.values())) with tqdm(total=len(valid_sequences)) as pbar: for (i, test_seq) in enumerate(valid_sequences): if users is not None: user = users[i] else: user = None if scroll: metrics += self.sequence_sequential_evaluation(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN, STEP) else: metrics += self.evaluate_sequence(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN) pbar.update(1) valid_results = metrics / len(valid_sequences) print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP)) for (mname, mvalue) in zip(METRICS.keys(), valid_results): print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue)) test_sequences = test_data.loc[test_data['col_sequence'].map(len) > abs(GIVEN_K), 'col_sequence'].values test_sequences = test_sequences print('{} sequences available for evaluation'.format(len(test_sequences))) if GIVEN_K == 0: raise ValueError('given_k must be != 0') metrics = np.zeros(len(METRICS.values())) with tqdm(total=len(test_sequences)) as pbar: for (i, test_seq) in enumerate(test_sequences): if users is not None: user = users[i] else: user = None if scroll: metrics += self.sequence_sequential_evaluation(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN, STEP) else: metrics += self.evaluate_sequence(recommender, test_seq, METRICS.values(), user, GIVEN_K, LOOK_AHEAD, TOPN) pbar.update(1) test_results = metrics / len(test_sequences) print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP)) for (mname, mvalue) in zip(METRICS.keys(), test_results): print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue))
beta-recsys
positive
def get_random_architecture_1d(self, args): """Create a random 1d architecture. Draw random samples from the hyperparameter sets defined at the top of the class. Returns model: the random architecture as a keras model params: dict of the chosen parameter set """ <DeepExtract> params = {} cwi = np.random.randint(len(self.conv_widths)) params['conv_width'] = self.conv_widths[cwi] chi = np.random.randint(len(self.conv_heights)) params['conv_height'] = self.conv_heights[chi] ci = np.random.randint(len(self.conv_layers_sets)) params['conv_layers'] = self.conv_layers_sets[ci] mi = np.random.randint(len(self.max_pool_sets_2d)) params['max_pools'] = self.max_pool_sets_2d[mi] mi = np.random.randint(len(self.max_pool_sets_1d)) params['max_pools_1d'] = self.max_pool_sets_1d[mi] fci = np.random.randint(len(self.fc_layer_sets)) params['fc'] = self.fc_layer_sets[fci] paddingi = np.random.randint(len(self.paddings)) params['padding'] = self.paddings[paddingi] annoi = np.random.randint(len(self.annotation_units)) params['anno_units'] = self.annotation_units[annoi] fc_dropouti = np.random.randint(len(self.fc_dropouts)) params['fc_dropout'] = self.fc_dropouts[fc_dropouti] conv_dropouti = np.random.randint(len(self.conv_dropouts)) params['conv_dropout'] = self.conv_dropouts[conv_dropouti] params['spatial_dropout'] = False bni = np.random.randint(len(self.batch_normalizations)) params['batch_normalization'] = self.batch_normalizations[bni] ki = np.random.randint(len(self.kernel_initializers)) params['kernel_initializer'] = self.kernel_initializers[ki] fci = np.random.randint(len(self.fc_initializers)) params['fc_initializer'] = self.fc_initializers[fci] params = params </DeepExtract> model = models.build_reference_annotation_1d_model_from_args(args, conv_width=params['conv_width'], conv_layers=params['conv_layers'], conv_dropout=params['conv_dropout'], spatial_dropout=params['spatial_dropout'], max_pools=params['max_pools_1d'], padding=params['padding'], annotation_units=params['anno_units'], fc_layers=params['fc'], fc_dropout=params['fc_dropout'], batch_normalization=params['batch_normalization'], kernel_initializer=params['kernel_initializer'], fc_initializer=params['fc_initializer']) return (model, params)
def get_random_architecture_1d(self, args): """Create a random 1d architecture. Draw random samples from the hyperparameter sets defined at the top of the class. Returns model: the random architecture as a keras model params: dict of the chosen parameter set """ params = {} cwi = np.random.randint(len(self.conv_widths)) params['conv_width'] = self.conv_widths[cwi] chi = np.random.randint(len(self.conv_heights)) params['conv_height'] = self.conv_heights[chi] ci = np.random.randint(len(self.conv_layers_sets)) params['conv_layers'] = self.conv_layers_sets[ci] mi = np.random.randint(len(self.max_pool_sets_2d)) params['max_pools'] = self.max_pool_sets_2d[mi] mi = np.random.randint(len(self.max_pool_sets_1d)) params['max_pools_1d'] = self.max_pool_sets_1d[mi] fci = np.random.randint(len(self.fc_layer_sets)) params['fc'] = self.fc_layer_sets[fci] paddingi = np.random.randint(len(self.paddings)) params['padding'] = self.paddings[paddingi] annoi = np.random.randint(len(self.annotation_units)) params['anno_units'] = self.annotation_units[annoi] fc_dropouti = np.random.randint(len(self.fc_dropouts)) params['fc_dropout'] = self.fc_dropouts[fc_dropouti] conv_dropouti = np.random.randint(len(self.conv_dropouts)) params['conv_dropout'] = self.conv_dropouts[conv_dropouti] params['spatial_dropout'] = False bni = np.random.randint(len(self.batch_normalizations)) params['batch_normalization'] = self.batch_normalizations[bni] ki = np.random.randint(len(self.kernel_initializers)) params['kernel_initializer'] = self.kernel_initializers[ki] fci = np.random.randint(len(self.fc_initializers)) params['fc_initializer'] = self.fc_initializers[fci] params = params model = models.build_reference_annotation_1d_model_from_args(args, conv_width=params['conv_width'], conv_layers=params['conv_layers'], conv_dropout=params['conv_dropout'], spatial_dropout=params['spatial_dropout'], max_pools=params['max_pools_1d'], padding=params['padding'], annotation_units=params['anno_units'], fc_layers=params['fc'], fc_dropout=params['fc_dropout'], batch_normalization=params['batch_normalization'], kernel_initializer=params['kernel_initializer'], fc_initializer=params['fc_initializer']) return (model, params)
dsde-deep-learning
positive
def ship_group(self, country, name: str, _type: Type[unittype.ShipType], position: mapping.Point, heading=0, group_size=1) -> unitgroup.ShipGroup: """Adds a ship group to the given country. Args: country(Country): which the ship group will belong too name: of the ship group _type: which kind of ship to add position(dcs.mapping.Point): where the new group will be placed heading: initial heading of the group, only used if no additional waypoints group_size: how many ships of _type Returns: ShipGroup: the new ship group object """ sg = unitgroup.ShipGroup(self.next_group_id(), name) for i in range(1, group_size + 1): <DeepExtract> v = Ship(self.terrain, self.next_unit_id(), name + ' Unit #{nr}'.format(nr=i), _type) </DeepExtract> v.position.x = position.x v.position.y = position.y + (i - 1) * 20 v.heading = heading sg.add_unit(v) wp = sg.add_waypoint(position, 20) wp.ETA_locked = True country.add_ship_group(sg) return sg
def ship_group(self, country, name: str, _type: Type[unittype.ShipType], position: mapping.Point, heading=0, group_size=1) -> unitgroup.ShipGroup: """Adds a ship group to the given country. Args: country(Country): which the ship group will belong too name: of the ship group _type: which kind of ship to add position(dcs.mapping.Point): where the new group will be placed heading: initial heading of the group, only used if no additional waypoints group_size: how many ships of _type Returns: ShipGroup: the new ship group object """ sg = unitgroup.ShipGroup(self.next_group_id(), name) for i in range(1, group_size + 1): v = Ship(self.terrain, self.next_unit_id(), name + ' Unit #{nr}'.format(nr=i), _type) v.position.x = position.x v.position.y = position.y + (i - 1) * 20 v.heading = heading sg.add_unit(v) wp = sg.add_waypoint(position, 20) wp.ETA_locked = True country.add_ship_group(sg) return sg
dcs
positive
def register_index(self, query_index): <DeepExtract> query = DocumentQuery(query_index, backend=self) </DeepExtract> params = query._build_params(include_indexes=True) for key in params.keys(): params[key] = 1 if params: collection = query_index.document._meta.collection try: self.get_collection(collection).ensure_index(params, background=True) except TypeError: self.get_collection(collection).ensure_index(params.items(), background=True)
def register_index(self, query_index): query = DocumentQuery(query_index, backend=self) params = query._build_params(include_indexes=True) for key in params.keys(): params[key] = 1 if params: collection = query_index.document._meta.collection try: self.get_collection(collection).ensure_index(params, background=True) except TypeError: self.get_collection(collection).ensure_index(params.items(), background=True)
django-dockit
positive
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') scope_or_label_pattern = '\\s*\\w+\\s*:\\s*\\\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') elif (initial_spaces == 1 or initial_spaces == 3) and (not Match(scope_or_label_pattern, cleansed_line)) and (not (clean_lines.raw_lines[linenum] != line and Match('^\\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. Are you using a 2-space indent?') is_header_guard = False if file_extension == 'h': <DeepExtract> filename = re.sub('_flymake\\.h$', '.h', filename) filename = re.sub('/\\.flymake/([^/]*)$', '/\\1', filename) filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) cppvar = re.sub('[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' </DeepExtract> if line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar): is_header_guard = True if not line.startswith('#include') and (not is_header_guard) and (not Match('^\\s*//.*http(s?)://\\S*$', line)) and (not Match('^// \\$Id:.*#[0-9]+ \\$$', line)): <DeepExtract> if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 line_width = width else: line_width = len(line) </DeepExtract> extended_length = int(_line_length * 1.25) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if cleansed_line.count(';') > 1 and cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and (not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') <DeepExtract> line = clean_lines.elided[linenum] if Match('\\s*{\\s*$', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if not Search('[,;:}{(]\\s*$', prevline) and (not Match('\\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') if Match('\\s*else\\b\\s*(?:if\\b|\\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match('\\s*}\\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') if Search('else if\\s*\\(', line): brace_on_left = bool(Search('}\\s*else if\\s*\\(', line)) pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search('}\\s*else[^{]*$', line) or Match('[^}]*else\\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') if Search('\\belse [^\\s{]', line) and (not Search('\\belse if\\b', line)): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') if Match('\\s*do [^\\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') if_else_match = Search('\\b(if\\s*\\(|else\\b)', line) if if_else_match and (not Match('\\s*#', line)): if_indent = GetIndentLevel(line) (endline, endlinenum, endpos) = (line, linenum, if_else_match.end()) if_match = Search('\\bif\\s*\\(', line) if if_match: pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) if not Match('\\s*{', endline[endpos:]) and (not (Match('\\s*$', endline[endpos:]) and endlinenum < len(clean_lines.elided) - 1 and Match('\\s*{', clean_lines.elided[endlinenum + 1]))): while endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]: endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] endpos = endline.find(';') if not Match(';[\\s}]*(\\\\?)$', endline[endpos:]): if not Match('^[^{};]*\\[[^\\[\\]]*\\][^{}]*\\{[^{}]*\\}\\s*\\)*[;,]\\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) if if_match and Match('\\s*else\\b', next_line) and (next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] match = Match('^(.*\\)\\s*)\\{', line) if match: closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression(clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search('\\b([A-Z_]+)\\s*$', line_prefix) func = Match('^(.*\\])\\s*$', line_prefix) if macro and macro.group(1) not in ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF') or (func and (not Search('\\boperator\\s*\\[\\s*\\]', func.group(1)))) or Search('\\b(?:struct|union)\\s+alignas\\s*$', line_prefix) or Search('\\s+=\\s*$', line_prefix): match = None if match and opening_parenthesis[1] > 1 and Search('\\]\\s*$', clean_lines.elided[opening_parenthesis[1] - 1]): match = None else: match = Match('^(.*(?:else|\\)\\s*const)\\s*)\\{', line) if not match: prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search('[;{}]\\s*$', prevline): match = Match('^(\\s*)\\{', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match('^\\s*;', endline[endpos:]): error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] matched = Match('\\s*(for|while|if)\\s*\\(', line) if matched: (end_line, end_linenum, end_pos) = CloseExpression(clean_lines, linenum, line.find('(')) if end_pos >= 0 and Match(';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] matched = Match('\\s*(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)', line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: pass </DeepExtract> <DeepExtract> raw = clean_lines.lines_without_raw_strings line = raw[linenum] if IsBlankLine(line) and (not nesting_state.InNamespaceBody()) and (not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: exception = False if Match(' {6}\\w', prev_line): search_position = linenum - 2 while search_position >= 0 and Match(' {6}\\w', elided[search_position]): search_position -= 1 exception = search_position >= 0 and elided[search_position][:5] == ' :' else: exception = Match(' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)', prev_line) or Match(' {4}:', prev_line) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.') if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if next_line and Match('\\s*}', next_line) and (next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.') matched = Match('\\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) line = clean_lines.elided[linenum] if Search('\\w\\s+\\[', line) and (not Search('(?:delete|return)\\s+\\[', line)): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') if Search('for *\\(.*[^:]:[^: ]', line) or Search('for *\\(.*[^: ]:[^:]', line): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] while True: match = Match('^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$', line) if match: line = match.group(1) + '_' * len(match.group(2)) + match.group(3) else: break if (Search('[\\w.]=', line) or Search('=[\\w.]', line)) and (not Search('\\b(if|while|for) ', line)) and (not Search('(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)', line)) and (not Search('operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') match = Search('[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match('#.*include', line): match = Match('^(.*[^\\s<])<[^\\s=<,]', line) if match: (_, _, end_pos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') match = Match('^(.*[^-\\s>])>[^\\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression(clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') match = Search('(operator|[^\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\s,=<])', line) if match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') match = Search('>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') match = Search('(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] match = Search(' (if\\(|for\\(|while\\(|switch\\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) match = Search('\\b(if|for|while|switch)\\s*\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or (not match.group(2) and Search('\\bfor\\s*\\(.*; \\)', line))): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) </DeepExtract> <DeepExtract> raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] if Search(',[^,\\s]', ReplaceAll('\\boperator\\s*,\\s*\\(', 'F(', line)) and Search(',[^,\\s]', raw[linenum]): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') if Search(';[^\\s};\\\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] match = Match('^(.*[^ ({>]){', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if not Match('^[\\s}]*[{.;,)<>\\]:]', trailing_text): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') if Search('}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') if Search(':\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search('^\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, use {} instead.') elif Search('\\s+;\\s*$', line) and (not Search('\\bfor\\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty statement, use {} instead.') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] fncall = line for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) break if not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall)): if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall): error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef|using\\s+\\w+\\s*=', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall)) and (not Search('\\bcase\\s+\\(', fncall)): if Search('\\boperator_*\\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall): if Search('^\\s+\\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] match = Match('^(.*\\S)&&', line) if not match: match = Match('(.*)&&\\S', line) if not match or '(&&)' in line or Search('\\boperator\\s*$', match.group(1)): return typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&') </DeepExtract> <DeepExtract> lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos) if end_pos < 0: return if not Match('\\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] lhs = '' rhs = '' operator = None while expression: matched = Match('^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||==|!=|>=|>|<=|<|\\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): lhs += token expression = matched.group(2) else: operator = token rhs = matched.group(2) break else: matched = Match('^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match('^(\\s*\\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) if not (lhs and operator and rhs): return if rhs.find('&&') > -1 or rhs.find('||') > -1: return lhs = lhs.strip() rhs = rhs.strip() match_constant = '^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\\\'.*\\\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) </DeepExtract> <DeepExtract> line = clean_lines.elided[linenum] if Match('^\\s*#', line): return if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) </DeepExtract> classinfo = nesting_state.InnermostClass() if classinfo: <DeepExtract> if classinfo.last_line - classinfo.starting_linenum <= 24 or linenum <= classinfo.starting_linenum: return matched = Match('\\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: prev_line = clean_lines.lines[linenum - 1] if not IsBlankLine(prev_line) and (not Search('\\b(class|struct)\\b', prev_line)) and (not Search('\\\\$', prev_line)): end_class_head = classinfo.starting_linenum for i in range(classinfo.starting_linenum, linenum): if Search('\\{\\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) </DeepExtract>
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') scope_or_label_pattern = '\\s*\\w+\\s*:\\s*\\\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') elif (initial_spaces == 1 or initial_spaces == 3) and (not Match(scope_or_label_pattern, cleansed_line)) and (not (clean_lines.raw_lines[linenum] != line and Match('^\\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. Are you using a 2-space indent?') is_header_guard = False if file_extension == 'h': filename = re.sub('_flymake\\.h$', '.h', filename) filename = re.sub('/\\.flymake/([^/]*)$', '/\\1', filename) filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) cppvar = re.sub('[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' if line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar): is_header_guard = True if not line.startswith('#include') and (not is_header_guard) and (not Match('^\\s*//.*http(s?)://\\S*$', line)) and (not Match('^// \\$Id:.*#[0-9]+ \\$$', line)): if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 line_width = width else: line_width = len(line) extended_length = int(_line_length * 1.25) if line_width > extended_length: error(filename, linenum, 'whitespace/line_length', 4, 'Lines should very rarely be longer than %i characters' % extended_length) elif line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if cleansed_line.count(';') > 1 and cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and (not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') line = clean_lines.elided[linenum] if Match('\\s*{\\s*$', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if not Search('[,;:}{(]\\s*$', prevline) and (not Match('\\s*#', prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') if Match('\\s*else\\b\\s*(?:if\\b|\\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match('\\s*}\\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') if Search('else if\\s*\\(', line): brace_on_left = bool(Search('}\\s*else if\\s*\\(', line)) pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search('}\\s*else[^{]*$', line) or Match('[^}]*else\\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') if Search('\\belse [^\\s{]', line) and (not Search('\\belse if\\b', line)): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') if Match('\\s*do [^\\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') if_else_match = Search('\\b(if\\s*\\(|else\\b)', line) if if_else_match and (not Match('\\s*#', line)): if_indent = GetIndentLevel(line) (endline, endlinenum, endpos) = (line, linenum, if_else_match.end()) if_match = Search('\\bif\\s*\\(', line) if if_match: pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) if not Match('\\s*{', endline[endpos:]) and (not (Match('\\s*$', endline[endpos:]) and endlinenum < len(clean_lines.elided) - 1 and Match('\\s*{', clean_lines.elided[endlinenum + 1]))): while endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]: endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] endpos = endline.find(';') if not Match(';[\\s}]*(\\\\?)$', endline[endpos:]): if not Match('^[^{};]*\\[[^\\[\\]]*\\][^{}]*\\{[^{}]*\\}\\s*\\)*[;,]\\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) if if_match and Match('\\s*else\\b', next_line) and (next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') line = clean_lines.elided[linenum] match = Match('^(.*\\)\\s*)\\{', line) if match: closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression(clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search('\\b([A-Z_]+)\\s*$', line_prefix) func = Match('^(.*\\])\\s*$', line_prefix) if macro and macro.group(1) not in ('TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF') or (func and (not Search('\\boperator\\s*\\[\\s*\\]', func.group(1)))) or Search('\\b(?:struct|union)\\s+alignas\\s*$', line_prefix) or Search('\\s+=\\s*$', line_prefix): match = None if match and opening_parenthesis[1] > 1 and Search('\\]\\s*$', clean_lines.elided[opening_parenthesis[1] - 1]): match = None else: match = Match('^(.*(?:else|\\)\\s*const)\\s*)\\{', line) if not match: prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search('[;{}]\\s*$', prevline): match = Match('^(\\s*)\\{', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match('^\\s*;', endline[endpos:]): error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") line = clean_lines.elided[linenum] matched = Match('\\s*(for|while|if)\\s*\\(', line) if matched: (end_line, end_linenum, end_pos) = CloseExpression(clean_lines, linenum, line.find('(')) if end_pos >= 0 and Match(';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') line = clean_lines.elided[linenum] matched = Match('\\s*(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)', line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: pass raw = clean_lines.lines_without_raw_strings line = raw[linenum] if IsBlankLine(line) and (not nesting_state.InNamespaceBody()) and (not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: exception = False if Match(' {6}\\w', prev_line): search_position = linenum - 2 while search_position >= 0 and Match(' {6}\\w', elided[search_position]): search_position -= 1 exception = search_position >= 0 and elided[search_position][:5] == ' :' else: exception = Match(' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)', prev_line) or Match(' {4}:', prev_line) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block should be deleted.') if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if next_line and Match('\\s*}', next_line) and (next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block should be deleted.') matched = Match('\\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) line = clean_lines.elided[linenum] if Search('\\w\\s+\\[', line) and (not Search('(?:delete|return)\\s+\\[', line)): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') if Search('for *\\(.*[^:]:[^: ]', line) or Search('for *\\(.*[^: ]:[^:]', line): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') line = clean_lines.elided[linenum] while True: match = Match('^(.*\\boperator\\b)(\\S+)(\\s*\\(.*)$', line) if match: line = match.group(1) + '_' * len(match.group(2)) + match.group(3) else: break if (Search('[\\w.]=', line) or Search('=[\\w.]', line)) and (not Search('\\b(if|while|for) ', line)) and (not Search('(>=|<=|==|!=|&=|\\^=|\\|=|\\+=|\\*=|\\/=|\\%=)', line)) and (not Search('operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') match = Search('[^<>=!\\s](==|!=|<=|>=|\\|\\|)[^<>=!\\s,;\\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match('#.*include', line): match = Match('^(.*[^\\s<])<[^\\s=<,]', line) if match: (_, _, end_pos) = CloseExpression(clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') match = Match('^(.*[^-\\s>])>[^\\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression(clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') match = Search('(operator|[^\\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\\s,=<])', line) if match and (not (match.group(1).isdigit() and match.group(2).isdigit())) and (not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') match = Search('>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') match = Search('(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) line = clean_lines.elided[linenum] match = Search(' (if\\(|for\\(|while\\(|switch\\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) match = Search('\\b(if|for|while|switch)\\s*\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or (not match.group(2) and Search('\\bfor\\s*\\(.*; \\)', line))): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] if Search(',[^,\\s]', ReplaceAll('\\boperator\\s*,\\s*\\(', 'F(', line)) and Search(',[^,\\s]', raw[linenum]): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') if Search(';[^\\s};\\\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') line = clean_lines.elided[linenum] match = Match('^(.*[^ ({>]){', line) if match: (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if not Match('^[\\s}]*[{.;,)<>\\]:]', trailing_text): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') if Search('}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') if Search(':\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search('^\\s*;\\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, use {} instead.') elif Search('\\s+;\\s*$', line) and (not Search('\\bfor\\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty statement, use {} instead.') line = clean_lines.elided[linenum] fncall = line for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) break if not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall)): if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall): error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef|using\\s+\\w+\\s*=', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall)) and (not Search('\\bcase\\s+\\(', fncall)): if Search('\\boperator_*\\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall): if Search('^\\s+\\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') line = clean_lines.elided[linenum] match = Match('^(.*\\S)&&', line) if not match: match = Match('(.*)&&\\S', line) if not match or '(&&)' in line or Search('\\boperator\\s*$', match.group(1)): return typenames = GetTemplateArgs(clean_lines, linenum) and_pos = len(match.group(1)) if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): if not IsRValueAllowed(clean_lines, linenum, typenames): error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.') else: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&') lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos) if end_pos < 0: return if not Match('\\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] lhs = '' rhs = '' operator = None while expression: matched = Match('^\\s*(<<|<<=|>>|>>=|->\\*|->|&&|\\|\\||==|!=|>=|>|<=|<|\\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): lhs += token expression = matched.group(2) else: operator = token rhs = matched.group(2) break else: matched = Match('^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match('^(\\s*\\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) if not (lhs and operator and rhs): return if rhs.find('&&') > -1 or rhs.find('||') > -1: return lhs = lhs.strip() rhs = rhs.strip() match_constant = '^([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\\\'.*\\\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) line = clean_lines.elided[linenum] if Match('^\\s*#', line): return if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) classinfo = nesting_state.InnermostClass() if classinfo: if classinfo.last_line - classinfo.starting_linenum <= 24 or linenum <= classinfo.starting_linenum: return matched = Match('\\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: prev_line = clean_lines.lines[linenum - 1] if not IsBlankLine(prev_line) and (not Search('\\b(class|struct)\\b', prev_line)) and (not Search('\\\\$', prev_line)): end_class_head = classinfo.starting_linenum for i in range(classinfo.starting_linenum, linenum): if Search('\\{\\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) </DeepExtract>
cowry
positive
def _delete_item_action(list_func, delete_func, object_type_description, name, namespace='default', propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): """ This function takes the action to delete an object (job, cronjob, pod) from kubernetes. It will wait for the object to be fully deleted before returning to processing or timing out. :param list_func: The callback function to list the specified object type :param delete_func: The callback function to delete the specified object type :param object_type_description: The types of objects to delete, in `job`, `cronjob`, or `pod` :param name: The name of the object to delete :param namespace: The namespace of the object :param propagation_policy: The Kubernetes propagation_policy to apply to the delete. Default 'Foreground' means that child objects will be deleted before the given object is marked as deleted. See: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#controlling-how-the-garbage-collector-deletes-dependents # noqa :param timeout: The timeout to wait for the delete to complete """ try: <DeepExtract> if timeout <= 0: LOG.warn('Kubernetes timeout is invalid or unspecified, using default %ss.', DEFAULT_K8S_TIMEOUT) timeout = DEFAULT_K8S_TIMEOUT timeout = timeout </DeepExtract> LOG.debug('Watching to delete %s: %s in namespace=%s (wait timeout=%s)', object_type_description, name, namespace, timeout) body = client.V1DeleteOptions(propagation_policy=propagation_policy) w = watch.Watch() issue_delete = True found_events = False deadline = round(time.time() + timeout) while timeout > 0: for event in w.stream(list_func, namespace=namespace, field_selector='metadata.name={}'.format(name), timeout_seconds=timeout): if issue_delete: delete_func(name=name, namespace=namespace, body=body) issue_delete = False event_type = event['type'].upper() item = event['object'] item_name = item.metadata.name LOG.debug('Watch event seen: type=%s, name=%s, namespace=%s (waiting on %s: %s)', event_type, item_name, namespace, object_type_description, name) if item_name == name: found_events = True if event_type == 'DELETED': LOG.info('Successfully deleted %s: %s in namespace=%s', object_type_description, item_name, namespace) return timeout = round(deadline - time.time()) if not found_events: LOG.warn('Saw no events for %s: %s in namespace=%s', object_type_description, name, namespace) err_msg = 'Reached timeout while waiting to delete %s: name=%s, namespace=%s' % (object_type_description, name, namespace) LOG.error(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg) except ApiException as e: LOG.exception('Exception when deleting %s: name=%s, namespace=%s', object_type_description, name, namespace) raise e
def _delete_item_action(list_func, delete_func, object_type_description, name, namespace='default', propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): """ This function takes the action to delete an object (job, cronjob, pod) from kubernetes. It will wait for the object to be fully deleted before returning to processing or timing out. :param list_func: The callback function to list the specified object type :param delete_func: The callback function to delete the specified object type :param object_type_description: The types of objects to delete, in `job`, `cronjob`, or `pod` :param name: The name of the object to delete :param namespace: The namespace of the object :param propagation_policy: The Kubernetes propagation_policy to apply to the delete. Default 'Foreground' means that child objects will be deleted before the given object is marked as deleted. See: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#controlling-how-the-garbage-collector-deletes-dependents # noqa :param timeout: The timeout to wait for the delete to complete """ try: if timeout <= 0: LOG.warn('Kubernetes timeout is invalid or unspecified, using default %ss.', DEFAULT_K8S_TIMEOUT) timeout = DEFAULT_K8S_TIMEOUT timeout = timeout LOG.debug('Watching to delete %s: %s in namespace=%s (wait timeout=%s)', object_type_description, name, namespace, timeout) body = client.V1DeleteOptions(propagation_policy=propagation_policy) w = watch.Watch() issue_delete = True found_events = False deadline = round(time.time() + timeout) while timeout > 0: for event in w.stream(list_func, namespace=namespace, field_selector='metadata.name={}'.format(name), timeout_seconds=timeout): if issue_delete: delete_func(name=name, namespace=namespace, body=body) issue_delete = False event_type = event['type'].upper() item = event['object'] item_name = item.metadata.name LOG.debug('Watch event seen: type=%s, name=%s, namespace=%s (waiting on %s: %s)', event_type, item_name, namespace, object_type_description, name) if item_name == name: found_events = True if event_type == 'DELETED': LOG.info('Successfully deleted %s: %s in namespace=%s', object_type_description, item_name, namespace) return timeout = round(deadline - time.time()) if not found_events: LOG.warn('Saw no events for %s: %s in namespace=%s', object_type_description, name, namespace) err_msg = 'Reached timeout while waiting to delete %s: name=%s, namespace=%s' % (object_type_description, name, namespace) LOG.error(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg) except ApiException as e: LOG.exception('Exception when deleting %s: name=%s, namespace=%s', object_type_description, name, namespace) raise e
armada
positive
def _ratio_enum(anchor, ratios): """Enumerate a set of anchors for each aspect ratio wrt an anchor.""" <DeepExtract> w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) (w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr) </DeepExtract> size = w * h size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) <DeepExtract> ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1))) anchors = anchors </DeepExtract> return anchors
def _ratio_enum(anchor, ratios): """Enumerate a set of anchors for each aspect ratio wrt an anchor.""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) (w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr) size = w * h size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1))) anchors = anchors return anchors
CenterNet-CondInst
positive
def test_from_valid_pubkey(self): """Create P2PKHBitcoinAddress's from valid pubkeys""" def T(pubkey, expected_str_addr): addr = P2PKHBitcoinAddress.from_pubkey(pubkey) self.assertEqual(str(addr), expected_str_addr) <DeepExtract> addr = CBitcoinAddress(x('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71')) self.assertEqual(addr.to_bytes(), '1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) </DeepExtract> <DeepExtract> addr = CBitcoinAddress(x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455')) self.assertEqual(addr.to_bytes(), '1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) </DeepExtract> <DeepExtract> addr = CBitcoinAddress(CPubKey(x('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71'))) self.assertEqual(addr.to_bytes(), '1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) </DeepExtract> <DeepExtract> addr = CBitcoinAddress(CPubKey(x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'))) self.assertEqual(addr.to_bytes(), '1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) </DeepExtract>
def test_from_valid_pubkey(self): """Create P2PKHBitcoinAddress's from valid pubkeys""" def T(pubkey, expected_str_addr): addr = P2PKHBitcoinAddress.from_pubkey(pubkey) self.assertEqual(str(addr), expected_str_addr) addr = CBitcoinAddress(x('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71')) self.assertEqual(addr.to_bytes(), '1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) addr = CBitcoinAddress(x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455')) self.assertEqual(addr.to_bytes(), '1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) addr = CBitcoinAddress(CPubKey(x('0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71'))) self.assertEqual(addr.to_bytes(), '1C7zdTfnkzmr13HfA2vNm5SJYRK6nEKyq8') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) addr = CBitcoinAddress(CPubKey(x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'))) self.assertEqual(addr.to_bytes(), '1JwSSubhmg6iPtRjtyqhUYYH7bZg3Lfy1T') self.assertEqual(addr.nVersion, expected_nVersion) self.assertEqual(addr.__class__, expected_class) </DeepExtract>
checklocktimeverify-demos
positive
@numba.njit def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, global_rot_noises): num_boxes = boxes.shape[0] num_tests = loc_noises.shape[1] box_corners = box_np_ops.box2d_to_corner_jit(boxes) current_corners = np.zeros((4, 2), dtype=boxes.dtype) current_box = np.zeros((1, 5), dtype=boxes.dtype) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) dst_pos = np.zeros((2,), dtype=boxes.dtype) success_mask = -np.ones((num_boxes,), dtype=np.int64) corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners_norm = corners_norm.reshape(4, 2) for i in range(num_boxes): if valid_mask[i]: for j in range(num_tests): current_box[0, :] = boxes[i] current_radius = np.sqrt(boxes[i, 0] ** 2 + boxes[i, 1] ** 2) current_grot = np.arctan2(boxes[i, 0], boxes[i, 1]) dst_grot = current_grot + global_rot_noises[i, j] dst_pos[0] = current_radius * np.sin(dst_grot) dst_pos[1] = current_radius * np.cos(dst_grot) current_box[0, :2] = dst_pos current_box[0, -1] += dst_grot - current_grot rot_sin = np.sin(current_box[0, -1]) rot_cos = np.cos(current_box[0, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos current_corners[:] = current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2] current_corners -= current_box[0, :2] <DeepExtract> rot_sin = np.sin(rot_noises[i, j]) rot_cos = np.cos(rot_noises[i, j]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos current_corners[:] = current_corners @ rot_mat_T </DeepExtract> current_corners += current_box[0, :2] + loc_noises[i, j, :2] <DeepExtract> N = current_corners.reshape(1, 4, 2).shape[0] K = box_corners.shape[0] ret = np.zeros((N, K), dtype=np.bool_) slices = np.array([1, 2, 3, 0]) lines_boxes = np.stack((current_corners.reshape(1, 4, 2), current_corners.reshape(1, 4, 2)[:, slices, :]), axis=2) lines_qboxes = np.stack((box_corners, box_corners[:, slices, :]), axis=2) boxes_standup = box_np_ops.corner_to_standup_nd_jit(current_corners.reshape(1, 4, 2)) qboxes_standup = box_np_ops.corner_to_standup_nd_jit(box_corners) for i in range(N): for j in range(K): iw = min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(boxes_standup[i, 0], qboxes_standup[j, 0]) if iw > 0: ih = min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(boxes_standup[i, 1], qboxes_standup[j, 1]) if ih > 0: for k in range(4): for l in range(4): A = lines_boxes[i, k, 0] B = lines_boxes[i, k, 1] C = lines_qboxes[j, l, 0] D = lines_qboxes[j, l, 1] acd = (D[1] - A[1]) * (C[0] - A[0]) > (C[1] - A[1]) * (D[0] - A[0]) bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0]) abd = (D[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (D[0] - A[0]) if abc != abd: ret[i, j] = True break if ret[i, j] is True: break if ret[i, j] is False: box_overlap_qbox = True for l in range(4): for k in range(4): vec = current_corners.reshape(1, 4, 2)[i, k] - current_corners.reshape(1, 4, 2)[i, (k + 1) % 4] if clockwise: vec = -vec cross = vec[1] * (current_corners.reshape(1, 4, 2)[i, k, 0] - box_corners[j, l, 0]) cross -= vec[0] * (current_corners.reshape(1, 4, 2)[i, k, 1] - box_corners[j, l, 1]) if cross >= 0: box_overlap_qbox = False break if box_overlap_qbox is False: break if box_overlap_qbox is False: qbox_overlap_box = True for l in range(4): for k in range(4): vec = box_corners[j, k] - box_corners[j, (k + 1) % 4] if clockwise: vec = -vec cross = vec[1] * (box_corners[j, k, 0] - current_corners.reshape(1, 4, 2)[i, l, 0]) cross -= vec[0] * (box_corners[j, k, 1] - current_corners.reshape(1, 4, 2)[i, l, 1]) if cross >= 0: qbox_overlap_box = False break if qbox_overlap_box is False: break if qbox_overlap_box: ret[i, j] = True else: ret[i, j] = True coll_mat = ret </DeepExtract> coll_mat[0, i] = False if not coll_mat.any(): success_mask[i] = j box_corners[i] = current_corners loc_noises[i, j, :2] += dst_pos - boxes[i, :2] rot_noises[i, j] += dst_grot - current_grot break return success_mask
@numba.njit def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, global_rot_noises): num_boxes = boxes.shape[0] num_tests = loc_noises.shape[1] box_corners = box_np_ops.box2d_to_corner_jit(boxes) current_corners = np.zeros((4, 2), dtype=boxes.dtype) current_box = np.zeros((1, 5), dtype=boxes.dtype) rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) dst_pos = np.zeros((2,), dtype=boxes.dtype) success_mask = -np.ones((num_boxes,), dtype=np.int64) corners_norm = np.zeros((4, 2), dtype=boxes.dtype) corners_norm[1, 1] = 1.0 corners_norm[2] = 1.0 corners_norm[3, 0] = 1.0 corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) corners_norm = corners_norm.reshape(4, 2) for i in range(num_boxes): if valid_mask[i]: for j in range(num_tests): current_box[0, :] = boxes[i] current_radius = np.sqrt(boxes[i, 0] ** 2 + boxes[i, 1] ** 2) current_grot = np.arctan2(boxes[i, 0], boxes[i, 1]) dst_grot = current_grot + global_rot_noises[i, j] dst_pos[0] = current_radius * np.sin(dst_grot) dst_pos[1] = current_radius * np.cos(dst_grot) current_box[0, :2] = dst_pos current_box[0, -1] += dst_grot - current_grot rot_sin = np.sin(current_box[0, -1]) rot_cos = np.cos(current_box[0, -1]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos current_corners[:] = current_box[0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2] current_corners -= current_box[0, :2] rot_sin = np.sin(rot_noises[i, j]) rot_cos = np.cos(rot_noises[i, j]) rot_mat_T[0, 0] = rot_cos rot_mat_T[0, 1] = -rot_sin rot_mat_T[1, 0] = rot_sin rot_mat_T[1, 1] = rot_cos current_corners[:] = current_corners @ rot_mat_T current_corners += current_box[0, :2] + loc_noises[i, j, :2] N = current_corners.reshape(1, 4, 2).shape[0] K = box_corners.shape[0] ret = np.zeros((N, K), dtype=np.bool_) slices = np.array([1, 2, 3, 0]) lines_boxes = np.stack((current_corners.reshape(1, 4, 2), current_corners.reshape(1, 4, 2)[:, slices, :]), axis=2) lines_qboxes = np.stack((box_corners, box_corners[:, slices, :]), axis=2) boxes_standup = box_np_ops.corner_to_standup_nd_jit(current_corners.reshape(1, 4, 2)) qboxes_standup = box_np_ops.corner_to_standup_nd_jit(box_corners) for i in range(N): for j in range(K): iw = min(boxes_standup[i, 2], qboxes_standup[j, 2]) - max(boxes_standup[i, 0], qboxes_standup[j, 0]) if iw > 0: ih = min(boxes_standup[i, 3], qboxes_standup[j, 3]) - max(boxes_standup[i, 1], qboxes_standup[j, 1]) if ih > 0: for k in range(4): for l in range(4): A = lines_boxes[i, k, 0] B = lines_boxes[i, k, 1] C = lines_qboxes[j, l, 0] D = lines_qboxes[j, l, 1] acd = (D[1] - A[1]) * (C[0] - A[0]) > (C[1] - A[1]) * (D[0] - A[0]) bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0]) abd = (D[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (D[0] - A[0]) if abc != abd: ret[i, j] = True break if ret[i, j] is True: break if ret[i, j] is False: box_overlap_qbox = True for l in range(4): for k in range(4): vec = current_corners.reshape(1, 4, 2)[i, k] - current_corners.reshape(1, 4, 2)[i, (k + 1) % 4] if clockwise: vec = -vec cross = vec[1] * (current_corners.reshape(1, 4, 2)[i, k, 0] - box_corners[j, l, 0]) cross -= vec[0] * (current_corners.reshape(1, 4, 2)[i, k, 1] - box_corners[j, l, 1]) if cross >= 0: box_overlap_qbox = False break if box_overlap_qbox is False: break if box_overlap_qbox is False: qbox_overlap_box = True for l in range(4): for k in range(4): vec = box_corners[j, k] - box_corners[j, (k + 1) % 4] if clockwise: vec = -vec cross = vec[1] * (box_corners[j, k, 0] - current_corners.reshape(1, 4, 2)[i, l, 0]) cross -= vec[0] * (box_corners[j, k, 1] - current_corners.reshape(1, 4, 2)[i, l, 1]) if cross >= 0: qbox_overlap_box = False break if qbox_overlap_box is False: break if qbox_overlap_box: ret[i, j] = True else: ret[i, j] = True coll_mat = ret coll_mat[0, i] = False if not coll_mat.any(): success_mask[i] = j box_corners[i] = current_corners loc_noises[i, j, :2] += dst_pos - boxes[i, :2] rot_noises[i, j] += dst_grot - current_grot break return success_mask
3D-CVF
positive
def add_filter(self, filter_values): """ Construct a filter. """ if not filter_values: return <DeepExtract> pass </DeepExtract> for v in filter_values[1:]: f |= self.get_value_filter(v) return f
def add_filter(self, filter_values): """ Construct a filter. """ if not filter_values: return pass for v in filter_values[1:]: f |= self.get_value_filter(v) return f
elasticsearch-dsl-py
positive
def lambda_handler(event, context): try: logger.config(context.aws_request_id) logger.debug('Handler starting...') logger.debug(event) pipeline_run.consume_event(event, context, loglevel=loglevel) logger.info({'event': 'new_invoke'}) errors = [] successes = [] cfn_nag_version = os.environ['TOOLVERSION'] if cfn_nag_version.lower() == 'latest': cfn_nag_version = '' else: cfn_nag_version = ' -v %s' % cfn_nag_version for (artifact, template_name) in get_templates(pipeline_run.ci_configs): <DeepExtract> codebuild_client = clients.get('codebuild') try: build_response = codebuild_client.start_build(projectName='CFN-Lint-' + pipeline_run.pipeline_name, buildspecOverride='version: 0.1\n\nphases:\n install:\n commands:\n - apt-get -y update\n - apt-get -y install ruby-full\n - apt-get -y install jq\n - gem install cfn-nag' + cfn_nag_version + '\n pre_build:\n commands:\n - echo Nothing to do in the pre_build phase...\n build:\n commands:\n - echo Build started on `date`\n - cfn_nag_scan --input-path templates/' + template_name + ' --debug\n post_build:\n commands:\n - echo Build completed on `date`') build_id = build_response['build']['id'] build_status = build_response['build']['buildStatus'] while build_status == 'IN_PROGRESS': time.sleep(5) check_response = {'builds': [{}]} retry = 0 while not ('phases' in check_response['builds'][0] and 'buildStatus' in check_response['builds'][0]): if retry > 4: raise KeyError('Cannot get buildStatus or phases from CodeBuild response') elif retry > 0: time.sleep(10) retry += 1 check_response = codebuild_client.batch_get_builds(ids=[build_id]) build_status = check_response['builds'][0]['buildStatus'] phases = check_response['builds'][0]['phases'] print(check_response) if build_status != 'SUCCEEDED': error_message = 'linting of template ' + template_name + ' failed' for phase in phases: if 'phaseStatus' in phase and phase['phaseStatus'] != 'SUCCEEDED': for context in phase['contexts']: error_message += context['message'] + ' - ' + context['statusCode'] lint_failed = error_message except botocore.exceptions.ClientError as exception: lint_failed = exception.message </DeepExtract> if lint_failed: errors.append([artifact, template_name, lint_failed]) else: successes.append('%s/%s' % (artifact, template_name)) if len(errors) > 0: msg = '%s lint failures %s' % (len(errors), errors) pipeline_run.put_job_failure(msg) logger.error(msg) else: pipeline_run.put_job_success('Successfully linted: %s' % successes) logger.info('Successfully linted: %s' % successes) except Exception as exception: logger.error('unhandled exception!', exc_info=1) pipeline_run.put_job_failure(str(exception))
def lambda_handler(event, context): try: logger.config(context.aws_request_id) logger.debug('Handler starting...') logger.debug(event) pipeline_run.consume_event(event, context, loglevel=loglevel) logger.info({'event': 'new_invoke'}) errors = [] successes = [] cfn_nag_version = os.environ['TOOLVERSION'] if cfn_nag_version.lower() == 'latest': cfn_nag_version = '' else: cfn_nag_version = ' -v %s' % cfn_nag_version for (artifact, template_name) in get_templates(pipeline_run.ci_configs): codebuild_client = clients.get('codebuild') try: build_response = codebuild_client.start_build(projectName='CFN-Lint-' + pipeline_run.pipeline_name, buildspecOverride='version: 0.1\n\nphases:\n install:\n commands:\n - apt-get -y update\n - apt-get -y install ruby-full\n - apt-get -y install jq\n - gem install cfn-nag' + cfn_nag_version + '\n pre_build:\n commands:\n - echo Nothing to do in the pre_build phase...\n build:\n commands:\n - echo Build started on `date`\n - cfn_nag_scan --input-path templates/' + template_name + ' --debug\n post_build:\n commands:\n - echo Build completed on `date`') build_id = build_response['build']['id'] build_status = build_response['build']['buildStatus'] while build_status == 'IN_PROGRESS': time.sleep(5) check_response = {'builds': [{}]} retry = 0 while not ('phases' in check_response['builds'][0] and 'buildStatus' in check_response['builds'][0]): if retry > 4: raise KeyError('Cannot get buildStatus or phases from CodeBuild response') elif retry > 0: time.sleep(10) retry += 1 check_response = codebuild_client.batch_get_builds(ids=[build_id]) build_status = check_response['builds'][0]['buildStatus'] phases = check_response['builds'][0]['phases'] print(check_response) if build_status != 'SUCCEEDED': error_message = 'linting of template ' + template_name + ' failed' for phase in phases: if 'phaseStatus' in phase and phase['phaseStatus'] != 'SUCCEEDED': for context in phase['contexts']: error_message += context['message'] + ' - ' + context['statusCode'] lint_failed = error_message except botocore.exceptions.ClientError as exception: lint_failed = exception.message if lint_failed: errors.append([artifact, template_name, lint_failed]) else: successes.append('%s/%s' % (artifact, template_name)) if len(errors) > 0: msg = '%s lint failures %s' % (len(errors), errors) pipeline_run.put_job_failure(msg) logger.error(msg) else: pipeline_run.put_job_success('Successfully linted: %s' % successes) logger.info('Successfully linted: %s' % successes) except Exception as exception: logger.error('unhandled exception!', exc_info=1) pipeline_run.put_job_failure(str(exception))
cloudformation-validation-pipeline
positive
def test_review_modify(self): <DeepExtract> review = db_review.create(**self.review) </DeepExtract> resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.another_user)) self.assert403(resp, "Shouldn't be able to edit someone else's review.") data = dict() resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.user), data=json.dumps(data)) self.assert200(resp) resp = self.client.get('/review/%s/revisions' % review['id']).json self.assertEqual(len(resp['revisions']), 1) data = dict(text='Some updated text with length more than twenty five.') resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.user), data=json.dumps(data)) self.assert200(resp) resp = self.client.get('/review/%s' % review['id']).json self.assertEqual(resp['review']['text'], data['text']) self.assertEqual(resp['review']['rating'], review['rating'])
def test_review_modify(self): review = db_review.create(**self.review) resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.another_user)) self.assert403(resp, "Shouldn't be able to edit someone else's review.") data = dict() resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.user), data=json.dumps(data)) self.assert200(resp) resp = self.client.get('/review/%s/revisions' % review['id']).json self.assertEqual(len(resp['revisions']), 1) data = dict(text='Some updated text with length more than twenty five.') resp = self.client.post('/review/%s' % review['id'], headers=self.header(self.user), data=json.dumps(data)) self.assert200(resp) resp = self.client.get('/review/%s' % review['id']).json self.assertEqual(resp['review']['text'], data['text']) self.assertEqual(resp['review']['rating'], review['rating'])
critiquebrainz
positive
def write_points3d_binary(points3D, path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ with open(path_to_model_file, 'wb') as fid: <DeepExtract> if isinstance(len(points3D), (list, tuple)): bytes = struct.pack(endian_character + 'Q', *len(points3D)) else: bytes = struct.pack(endian_character + 'Q', len(points3D)) fid.write(bytes) </DeepExtract> for (_, pt) in points3D.items(): <DeepExtract> if isinstance(pt.id, (list, tuple)): bytes = struct.pack(endian_character + 'Q', *pt.id) else: bytes = struct.pack(endian_character + 'Q', pt.id) fid.write(bytes) </DeepExtract> <DeepExtract> if isinstance(pt.xyz.tolist(), (list, tuple)): bytes = struct.pack(endian_character + 'ddd', *pt.xyz.tolist()) else: bytes = struct.pack(endian_character + 'ddd', pt.xyz.tolist()) fid.write(bytes) </DeepExtract> <DeepExtract> if isinstance(pt.rgb.tolist(), (list, tuple)): bytes = struct.pack(endian_character + 'BBB', *pt.rgb.tolist()) else: bytes = struct.pack(endian_character + 'BBB', pt.rgb.tolist()) fid.write(bytes) </DeepExtract> <DeepExtract> if isinstance(pt.error, (list, tuple)): bytes = struct.pack(endian_character + 'd', *pt.error) else: bytes = struct.pack(endian_character + 'd', pt.error) fid.write(bytes) </DeepExtract> track_length = pt.image_ids.shape[0] <DeepExtract> if isinstance(track_length, (list, tuple)): bytes = struct.pack(endian_character + 'Q', *track_length) else: bytes = struct.pack(endian_character + 'Q', track_length) fid.write(bytes) </DeepExtract> for (image_id, point2D_id) in zip(pt.image_ids, pt.point2D_idxs): <DeepExtract> if isinstance([image_id, point2D_id], (list, tuple)): bytes = struct.pack(endian_character + 'ii', *[image_id, point2D_id]) else: bytes = struct.pack(endian_character + 'ii', [image_id, point2D_id]) fid.write(bytes) </DeepExtract>
def write_points3d_binary(points3D, path_to_model_file): """ see: src/base/reconstruction.cc void Reconstruction::ReadPoints3DBinary(const std::string& path) void Reconstruction::WritePoints3DBinary(const std::string& path) """ with open(path_to_model_file, 'wb') as fid: if isinstance(len(points3D), (list, tuple)): bytes = struct.pack(endian_character + 'Q', *len(points3D)) else: bytes = struct.pack(endian_character + 'Q', len(points3D)) fid.write(bytes) for (_, pt) in points3D.items(): if isinstance(pt.id, (list, tuple)): bytes = struct.pack(endian_character + 'Q', *pt.id) else: bytes = struct.pack(endian_character + 'Q', pt.id) fid.write(bytes) if isinstance(pt.xyz.tolist(), (list, tuple)): bytes = struct.pack(endian_character + 'ddd', *pt.xyz.tolist()) else: bytes = struct.pack(endian_character + 'ddd', pt.xyz.tolist()) fid.write(bytes) if isinstance(pt.rgb.tolist(), (list, tuple)): bytes = struct.pack(endian_character + 'BBB', *pt.rgb.tolist()) else: bytes = struct.pack(endian_character + 'BBB', pt.rgb.tolist()) fid.write(bytes) if isinstance(pt.error, (list, tuple)): bytes = struct.pack(endian_character + 'd', *pt.error) else: bytes = struct.pack(endian_character + 'd', pt.error) fid.write(bytes) track_length = pt.image_ids.shape[0] if isinstance(track_length, (list, tuple)): bytes = struct.pack(endian_character + 'Q', *track_length) else: bytes = struct.pack(endian_character + 'Q', track_length) fid.write(bytes) for (image_id, point2D_id) in zip(pt.image_ids, pt.point2D_idxs): if isinstance([image_id, point2D_id], (list, tuple)): bytes = struct.pack(endian_character + 'ii', *[image_id, point2D_id]) else: bytes = struct.pack(endian_character + 'ii', [image_id, point2D_id]) fid.write(bytes) </DeepExtract>
EasyMocap
positive
def forward(self, x, alpha, beta, ratio): result = 0 if isinstance(ratio[0], torch.Tensor): ratio0 = self._width_mult_list_left[ratio[0].argmax()] r_score0 = ratio[0][ratio[0].argmax()] else: ratio0 = ratio[0] r_score0 = 1.0 if isinstance(ratio[1], torch.Tensor): ratio1 = self._width_mult_list[ratio[1].argmax()] r_score1 = ratio[1][ratio[1].argmax()] else: ratio1 = ratio[1] r_score1 = 1.0 if self.slimmable: <DeepExtract> self._op.set_ratio((ratio0, ratio1)) </DeepExtract> for (w, op) in zip(alpha, self._ops): if self.quantize == 'search': result = result + (beta[0] * op(x, quantize=False) + beta[1] * op(x, quantize=True)) * w * r_score0 * r_score1 elif self.quantize: result = result + op(x, quantize=True) * w * r_score0 * r_score1 else: result = result + op(x, quantize=False) * w * r_score0 * r_score1 return result
def forward(self, x, alpha, beta, ratio): result = 0 if isinstance(ratio[0], torch.Tensor): ratio0 = self._width_mult_list_left[ratio[0].argmax()] r_score0 = ratio[0][ratio[0].argmax()] else: ratio0 = ratio[0] r_score0 = 1.0 if isinstance(ratio[1], torch.Tensor): ratio1 = self._width_mult_list[ratio[1].argmax()] r_score1 = ratio[1][ratio[1].argmax()] else: ratio1 = ratio[1] r_score1 = 1.0 if self.slimmable: self._op.set_ratio((ratio0, ratio1)) for (w, op) in zip(alpha, self._ops): if self.quantize == 'search': result = result + (beta[0] * op(x, quantize=False) + beta[1] * op(x, quantize=True)) * w * r_score0 * r_score1 elif self.quantize: result = result + op(x, quantize=True) * w * r_score0 * r_score1 else: result = result + op(x, quantize=False) * w * r_score0 * r_score1 return result
AGD
positive
def add_embeddings(self): with tf.name_scope('embedding'): if self.is_Embedding_Needed: W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable) W_pos = tf.Variable(tf.random_uniform([500, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable) else: W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable) self.embedding_W = W self.embedding_W_pos = W_pos <DeepExtract> embedded_chars_q = tf.nn.embedding_lookup(self.embedding_W, self.question) embedding_chars_q_pos = tf.nn.embedding_lookup(self.embedding_W_pos, self.q_position) (self.embedded_chars_q, self.embedded_chars_q_pos) = (embedded_chars_q, embedding_chars_q_pos) </DeepExtract> self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars_q, -1)
def add_embeddings(self): with tf.name_scope('embedding'): if self.is_Embedding_Needed: W = tf.Variable(np.array(self.embeddings), name='W', dtype='float32', trainable=self.trainable) W_pos = tf.Variable(tf.random_uniform([500, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable) else: W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name='W', trainable=self.trainable) self.embedding_W = W self.embedding_W_pos = W_pos embedded_chars_q = tf.nn.embedding_lookup(self.embedding_W, self.question) embedding_chars_q_pos = tf.nn.embedding_lookup(self.embedding_W_pos, self.q_position) (self.embedded_chars_q, self.embedded_chars_q_pos) = (embedded_chars_q, embedding_chars_q_pos) self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars_q, -1)
complex-order
positive
def get_impedance(self, node_name: str, freq: float, atol: float=0.0) -> complex: """Computes the impedance looking into the given node. Parameters ---------- node_name : str the node to compute impedance for. We will inject a current into this node and measure the voltage on this node. freq : float the frequency to compute the impedance at, in Hertz. atol : float absolute tolerance for checking zeros in the numerator. Used to filter out scipy warnings. Returns ------- impedance : complex the impedance value, in Ohms. """ <DeepExtract> (num, den) = self.get_num_den(node_name, node_name, in_type='i', atol=atol) sys = TransferFunctionContinuous(num, den) </DeepExtract> w_test = 2 * np.pi * freq (_, zin_vec) = sys.freqresp(w=[w_test]) return zin_vec[0]
def get_impedance(self, node_name: str, freq: float, atol: float=0.0) -> complex: """Computes the impedance looking into the given node. Parameters ---------- node_name : str the node to compute impedance for. We will inject a current into this node and measure the voltage on this node. freq : float the frequency to compute the impedance at, in Hertz. atol : float absolute tolerance for checking zeros in the numerator. Used to filter out scipy warnings. Returns ------- impedance : complex the impedance value, in Ohms. """ (num, den) = self.get_num_den(node_name, node_name, in_type='i', atol=atol) sys = TransferFunctionContinuous(num, den) w_test = 2 * np.pi * freq (_, zin_vec) = sys.freqresp(w=[w_test]) return zin_vec[0]
bag
positive
def get_final_text(pred_text, orig_text, do_lower_case): """Project the tokenized prediction back to the original text.""" def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) return (ns_text, ns_to_s_map) tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = ' '.join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if FLAGS.verbose_logging: logging.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 <DeepExtract> ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(orig_text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) (orig_ns_text, orig_ns_to_s_map) = (ns_text, ns_to_s_map) </DeepExtract> <DeepExtract> ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(tok_text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) (tok_ns_text, tok_ns_to_s_map) = (ns_text, ns_to_s_map) </DeepExtract> if len(orig_ns_text) != len(tok_ns_text): if FLAGS.verbose_logging: logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if FLAGS.verbose_logging: logging.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if FLAGS.verbose_logging: logging.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:orig_end_position + 1] return output_text
def get_final_text(pred_text, orig_text, do_lower_case): """Project the tokenized prediction back to the original text.""" def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) return (ns_text, ns_to_s_map) tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = ' '.join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if FLAGS.verbose_logging: logging.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(orig_text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) (orig_ns_text, orig_ns_to_s_map) = (ns_text, ns_to_s_map) ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(tok_text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) (tok_ns_text, tok_ns_to_s_map) = (ns_text, ns_to_s_map) if len(orig_ns_text) != len(tok_ns_text): if FLAGS.verbose_logging: logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if FLAGS.verbose_logging: logging.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if FLAGS.verbose_logging: logging.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:orig_end_position + 1] return output_text
DAPPLE
positive
def results2json(dataset, results, out_file): result_files = dict() if isinstance(results[0], list): <DeepExtract> json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] json_results.append(data) json_results = json_results </DeepExtract> result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): <DeepExtract> bbox_json_results = [] segm_json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] (det, seg) = results[idx] for label in range(len(det)): bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] bbox_json_results.append(data) if len(seg) == 2: segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['score'] = float(mask_score[i]) data['category_id'] = dataset.cat_ids[label] segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) json_results = (bbox_json_results, segm_json_results) </DeepExtract> result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') result_files['segm'] = '{}.{}.json'.format(out_file, 'segm') mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): <DeepExtract> json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) json_results = json_results </DeepExtract> result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal') mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files
def results2json(dataset, results, out_file): result_files = dict() if isinstance(results[0], list): json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] json_results.append(data) json_results = json_results result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): bbox_json_results = [] segm_json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] (det, seg) = results[idx] for label in range(len(det)): bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = dataset.cat_ids[label] bbox_json_results.append(data) if len(seg) == 2: segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['score'] = float(mask_score[i]) data['category_id'] = dataset.cat_ids[label] segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) json_results = (bbox_json_results, segm_json_results) result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') result_files['segm'] = '{}.{}.json'.format(out_file, 'segm') mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = [] for idx in range(len(dataset)): img_id = dataset.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) json_results = json_results result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal') mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files
C-HOI
positive
def f_retry(*args, **kwargs): """True decorator""" (m_tries, m_delay) = (tries, delay) <DeepExtract> template = 'Attempt failed with Exception: \n{0}: {1}\n' try: r_value = f(*args, **kwargs) r_status = True except Exception as exp: stderr.write(template.format(type(exp).__name__, exp)) r_value = exp r_status = False (r_value, r_status) = (r_value, r_status) </DeepExtract> while m_tries > 0: if r_status is True: return r_value m_tries -= 1 time.sleep(m_delay) m_delay *= backoff <DeepExtract> template = 'Attempt failed with Exception: \n{0}: {1}\n' try: r_value = f(*args, **kwargs) r_status = True except Exception as exp: stderr.write(template.format(type(exp).__name__, exp)) r_value = exp r_status = False (r_value, r_status) = (r_value, r_status) </DeepExtract> if r_status is True: return r_value else: raise r_value
def f_retry(*args, **kwargs): """True decorator""" (m_tries, m_delay) = (tries, delay) template = 'Attempt failed with Exception: \n{0}: {1}\n' try: r_value = f(*args, **kwargs) r_status = True except Exception as exp: stderr.write(template.format(type(exp).__name__, exp)) r_value = exp r_status = False (r_value, r_status) = (r_value, r_status) while m_tries > 0: if r_status is True: return r_value m_tries -= 1 time.sleep(m_delay) m_delay *= backoff template = 'Attempt failed with Exception: \n{0}: {1}\n' try: r_value = f(*args, **kwargs) r_status = True except Exception as exp: stderr.write(template.format(type(exp).__name__, exp)) r_value = exp r_status = False (r_value, r_status) = (r_value, r_status) if r_status is True: return r_value else: raise r_value
dataduct
positive
def __exit__(self, exc_type, exc_value, trace): <DeepExtract> logger.debug('Closing node %s for %s / %s', self.ip_addr, compute.name, compute.id) req = {'compute_id': compute.id, 'auth': compute.auth, 'node_ip_addr': self.ip_addr, 'terminate_pending': terminate_pending, 'close': close} try: yield self.send(b'CLOSE:' + serialize(req), reply=True, task=task) except Exception: logger.debug('Deleting computation %s/%s from %s failed', compute.id, compute.name, self.ip_addr) if not self.clusters: self.cpus = self.avail_cpus </DeepExtract> return True
def __exit__(self, exc_type, exc_value, trace): logger.debug('Closing node %s for %s / %s', self.ip_addr, compute.name, compute.id) req = {'compute_id': compute.id, 'auth': compute.auth, 'node_ip_addr': self.ip_addr, 'terminate_pending': terminate_pending, 'close': close} try: yield self.send(b'CLOSE:' + serialize(req), reply=True, task=task) except Exception: logger.debug('Deleting computation %s/%s from %s failed', compute.id, compute.name, self.ip_addr) if not self.clusters: self.cpus = self.avail_cpus return True
dispy
positive
def handle_oauth1_response(self, args): """Handles an oauth1 authorization response.""" <DeepExtract> if self.request_token_url: params = self.get_oauth1_client_params(token) client = oauthlib.oauth1.Client(client_key=self.consumer_key, client_secret=self.consumer_secret, **params) else: if token: if isinstance(token, (tuple, list)): token = {'access_token': token[0]} elif isinstance(token, string_types): token = {'access_token': token} client = oauthlib.oauth2.WebApplicationClient(self.consumer_key, token=token) client = client </DeepExtract> client.verifier = args.get('oauth_verifier') tup = session.get('%s_oauthtok' % self.name) if not tup: raise OAuthException('Token not found, maybe you disabled cookie', type='token_not_found') client.resource_owner_key = tup[0] client.resource_owner_secret = tup[1] (uri, headers, data) = client.sign(self.expand_url(self.access_token_url), _encode(self.access_token_method)) headers.update(self._access_token_headers) <DeepExtract> (uri, headers, to_bytes(data, self.encoding), self.access_token_method) = prepare_request(uri, headers, to_bytes(data, self.encoding), self.access_token_method) log.debug('Request %r with %r method' % (uri, self.access_token_method)) req = http.Request(uri, headers=headers, data=to_bytes(data, self.encoding)) req.get_method = lambda : self.access_token_method.upper() try: resp = http.urlopen(req) content = resp.read() resp.close() (resp, content) = (resp, content) except http.HTTPError as resp: content = resp.read() resp.close() (resp, content) = (resp, content) </DeepExtract> <DeepExtract> if not content_type: content_type = resp.headers.get('content-type', 'application/json') (ct, options) = parse_options_header(content_type) if ct in ('application/json', 'text/javascript'): if not content: data = {} data = json.loads(content) if ct in ('application/xml', 'text/xml'): data = get_etree().fromstring(content) if ct != 'application/x-www-form-urlencoded' and strict: data = content charset = options.get('charset', 'utf-8') data = url_decode(content, charset=charset).to_dict() </DeepExtract> if resp.code not in (200, 201): raise OAuthException('Invalid response from %s' % self.name, type='invalid_response', data=data) return data
def handle_oauth1_response(self, args): """Handles an oauth1 authorization response.""" if self.request_token_url: params = self.get_oauth1_client_params(token) client = oauthlib.oauth1.Client(client_key=self.consumer_key, client_secret=self.consumer_secret, **params) else: if token: if isinstance(token, (tuple, list)): token = {'access_token': token[0]} elif isinstance(token, string_types): token = {'access_token': token} client = oauthlib.oauth2.WebApplicationClient(self.consumer_key, token=token) client = client client.verifier = args.get('oauth_verifier') tup = session.get('%s_oauthtok' % self.name) if not tup: raise OAuthException('Token not found, maybe you disabled cookie', type='token_not_found') client.resource_owner_key = tup[0] client.resource_owner_secret = tup[1] (uri, headers, data) = client.sign(self.expand_url(self.access_token_url), _encode(self.access_token_method)) headers.update(self._access_token_headers) (uri, headers, to_bytes(data, self.encoding), self.access_token_method) = prepare_request(uri, headers, to_bytes(data, self.encoding), self.access_token_method) log.debug('Request %r with %r method' % (uri, self.access_token_method)) req = http.Request(uri, headers=headers, data=to_bytes(data, self.encoding)) req.get_method = lambda : self.access_token_method.upper() try: resp = http.urlopen(req) content = resp.read() resp.close() (resp, content) = (resp, content) except http.HTTPError as resp: content = resp.read() resp.close() (resp, content) = (resp, content) if not content_type: content_type = resp.headers.get('content-type', 'application/json') (ct, options) = parse_options_header(content_type) if ct in ('application/json', 'text/javascript'): if not content: data = {} data = json.loads(content) if ct in ('application/xml', 'text/xml'): data = get_etree().fromstring(content) if ct != 'application/x-www-form-urlencoded' and strict: data = content charset = options.get('charset', 'utf-8') data = url_decode(content, charset=charset).to_dict() if resp.code not in (200, 201): raise OAuthException('Invalid response from %s' % self.name, type='invalid_response', data=data) return data
BhagavadGita
positive
def upvote(self, by_user): <DeepExtract> if self._status == MergeRequestStatus.CLOSED: raise MergeRequestException("can't vote on a closed merge request") </DeepExtract> self._context['downvotes'].discard(by_user) self._context['upvotes'].add(by_user)
def upvote(self, by_user): if self._status == MergeRequestStatus.CLOSED: raise MergeRequestException("can't vote on a closed merge request") self._context['downvotes'].discard(by_user) self._context['upvotes'].add(by_user)
Clean-code-in-Python
positive
@csrf_protect_m @filter_hook def get(self, request, *args, **kwargs): <DeepExtract> self.form_obj = self.model_form(**self.get_form_datas()) </DeepExtract> <DeepExtract> helper = self.get_form_helper() if helper: self.form_obj.helper = helper </DeepExtract> return self.get_response()
@csrf_protect_m @filter_hook def get(self, request, *args, **kwargs): self.form_obj = self.model_form(**self.get_form_datas()) helper = self.get_form_helper() if helper: self.form_obj.helper = helper return self.get_response()
Django_Blog
positive
def iterate_once(self, batch_size): if self.enable_shuffle: <DeepExtract> if self.deterministic: return perm = np.arange(self.n) np.random.shuffle(perm) for key in self.data_map: self.data_map[key] = self.data_map[key][perm] self._next_id = 0 </DeepExtract> while self._next_id <= self.n - batch_size: yield self.next_batch(batch_size) self._next_id = 0
def iterate_once(self, batch_size): if self.enable_shuffle: if self.deterministic: return perm = np.arange(self.n) np.random.shuffle(perm) for key in self.data_map: self.data_map[key] = self.data_map[key][perm] self._next_id = 0 while self._next_id <= self.n - batch_size: yield self.next_batch(batch_size) self._next_id = 0
CHER
positive
def crop(self, box): """ Crops a rectangular region from this bounding box. The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. """ <DeepExtract> if self.mode == 'xyxy': (xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1) (xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax) elif self.mode == 'xywh': TO_REMOVE = 1 (xmin, ymin, w, h) = self.bbox.split(1, dim=-1) (xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0)) else: raise RuntimeError('Should not be here') </DeepExtract> (w, h) = (box[2] - box[0], box[3] - box[1]) cropped_xmin = (xmin - box[0]).clamp(min=0, max=w) cropped_ymin = (ymin - box[1]).clamp(min=0, max=h) cropped_xmax = (xmax - box[0]).clamp(min=0, max=w) cropped_ymax = (ymax - box[1]).clamp(min=0, max=h) if False: is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax) cropped_box = torch.cat((cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1) bbox = BoxList(cropped_box, (w, h), mode='xyxy') for (k, v) in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.crop(box) bbox.add_field(k, v) return bbox.convert(self.mode)
def crop(self, box): """ Crops a rectangular region from this bounding box. The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate. """ if self.mode == 'xyxy': (xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1) (xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax) elif self.mode == 'xywh': TO_REMOVE = 1 (xmin, ymin, w, h) = self.bbox.split(1, dim=-1) (xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0)) else: raise RuntimeError('Should not be here') (w, h) = (box[2] - box[0], box[3] - box[1]) cropped_xmin = (xmin - box[0]).clamp(min=0, max=w) cropped_ymin = (ymin - box[1]).clamp(min=0, max=h) cropped_xmax = (xmax - box[0]).clamp(min=0, max=w) cropped_ymax = (ymax - box[1]).clamp(min=0, max=h) if False: is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax) cropped_box = torch.cat((cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1) bbox = BoxList(cropped_box, (w, h), mode='xyxy') for (k, v) in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.crop(box) bbox.add_field(k, v) return bbox.convert(self.mode)
DetNAS
positive
@override_flag('limit_data_access', active=True) @mock.patch('apps.authorization.models.datetime', StubDate) def test_thirteen_month_app_type_with_flag_limit_data_access(self): """ Test Application.data_access_type="THIRTEEN_MONTH" with limit_data_access flag True This will be the flag setting in SBX. """ assert flag_is_active('limit_data_access') (user, app, ac) = self._create_user_app_token_grant(first_name='first', last_name='last1', fhir_id='-20140000008325', app_name='test_app1', app_username='devuser1', app_user_organization='org1', app_data_access_type='THIRTEEN_MONTH') self.assertEqual(app.data_access_type, 'THIRTEEN_MONTH') dag = DataAccessGrant.objects.get(beneficiary=user, application=app) self.assertNotEqual(dag, None) self.assertNotEqual(dag.expiration_date, None) <DeepExtract> refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if expected_response_error_mesg is not None: self.assertEqual(content['error'], expected_response_error_mesg) if expected_response_error_description_mesg is not None: self.assertEqual(content['error_description'], expected_response_error_description_mesg) ac = content </DeepExtract> <DeepExtract> response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) </DeepExtract> self.assertFalse(dag.has_expired()) StubDate.now = classmethod(lambda cls: datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+13, hours=+1)) self.assertTrue(dag.has_expired()) self.assertGreater(dag.expiration_date, datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+13, hours=-1)) <DeepExtract> refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 401) if 'invalid_client' is not None: self.assertEqual(content['error'], 'invalid_client') if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: self.assertEqual(content['error_description'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) return content </DeepExtract> <DeepExtract> response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) </DeepExtract> (user, app, ac) = self._create_user_app_token_grant(first_name='first', last_name='last1', fhir_id='-20140000008325', app_name='test_app1', app_username='devuser1', app_user_organization='org1', app_data_access_type='THIRTEEN_MONTH') self.assertEqual(app.data_access_type, 'THIRTEEN_MONTH') dag = DataAccessGrant.objects.get(beneficiary=user, application=app) self.assertNotEqual(dag, None) self.assertNotEqual(dag.expiration_date, None) self.assertGreater(dag.expiration_date, datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+26, days=-2)) <DeepExtract> refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if expected_response_error_mesg is not None: self.assertEqual(content['error'], expected_response_error_mesg) if expected_response_error_description_mesg is not None: self.assertEqual(content['error_description'], expected_response_error_description_mesg) ac = content </DeepExtract> <DeepExtract> response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) </DeepExtract>
@override_flag('limit_data_access', active=True) @mock.patch('apps.authorization.models.datetime', StubDate) def test_thirteen_month_app_type_with_flag_limit_data_access(self): """ Test Application.data_access_type="THIRTEEN_MONTH" with limit_data_access flag True This will be the flag setting in SBX. """ assert flag_is_active('limit_data_access') (user, app, ac) = self._create_user_app_token_grant(first_name='first', last_name='last1', fhir_id='-20140000008325', app_name='test_app1', app_username='devuser1', app_user_organization='org1', app_data_access_type='THIRTEEN_MONTH') self.assertEqual(app.data_access_type, 'THIRTEEN_MONTH') dag = DataAccessGrant.objects.get(beneficiary=user, application=app) self.assertNotEqual(dag, None) self.assertNotEqual(dag.expiration_date, None) refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if expected_response_error_mesg is not None: self.assertEqual(content['error'], expected_response_error_mesg) if expected_response_error_description_mesg is not None: self.assertEqual(content['error_description'], expected_response_error_description_mesg) ac = content response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) self.assertFalse(dag.has_expired()) StubDate.now = classmethod(lambda cls: datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+13, hours=+1)) self.assertTrue(dag.has_expired()) self.assertGreater(dag.expiration_date, datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+13, hours=-1)) refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 401) if 'invalid_client' is not None: self.assertEqual(content['error'], 'invalid_client') if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: self.assertEqual(content['error_description'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) return content response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 401) if settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG is not None: content = json.loads(response.content) self.assertEqual(content['detail'], settings.APPLICATION_THIRTEEN_MONTH_DATA_ACCESS_EXPIRED_MESG) (user, app, ac) = self._create_user_app_token_grant(first_name='first', last_name='last1', fhir_id='-20140000008325', app_name='test_app1', app_username='devuser1', app_user_organization='org1', app_data_access_type='THIRTEEN_MONTH') self.assertEqual(app.data_access_type, 'THIRTEEN_MONTH') dag = DataAccessGrant.objects.get(beneficiary=user, application=app) self.assertNotEqual(dag, None) self.assertNotEqual(dag.expiration_date, None) self.assertGreater(dag.expiration_date, datetime.now().replace(tzinfo=pytz.UTC) + relativedelta(months=+26, days=-2)) refresh_post_data = {'grant_type': 'refresh_token', 'refresh_token': ac['refresh_token'], 'redirect_uri': app.redirect_uris, 'client_id': app.client_id, 'client_secret': app.client_secret} response = self.client.post('/v1/o/token/', data=refresh_post_data) content = json.loads(response.content) self.assertEqual(response.status_code, 200) if expected_response_error_mesg is not None: self.assertEqual(content['error'], expected_response_error_mesg) if expected_response_error_description_mesg is not None: self.assertEqual(content['error_description'], expected_response_error_description_mesg) ac = content response = self.client.get('/v1/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) response = self.client.get('/v2/connect/userinfo', HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_readview_v1_success_mock, self.fhir_request_coverage_readview_v1_success_mock, self.fhir_request_eob_readview_v1_success_mock, self.fhir_request_patient_readview_v2_success_mock, self.fhir_request_coverage_readview_v2_success_mock, self.fhir_request_eob_readview_v2_success_mock): for path in ['/v1/fhir/Patient/-20140000008325', '/v1/fhir/Coverage/-20140000008325', '/v1/fhir/ExplanationOfBenefit/-20140000008325', '/v2/fhir/Patient/-20140000008325', '/v2/fhir/Coverage/-20140000008325', '/v2/fhir/ExplanationOfBenefit/-20140000008325']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) with HTTMock(self.fhir_request_patient_searchview_v1_success_mock, self.fhir_request_coverage_searchview_v1_success_mock, self.fhir_request_eob_searchview_v1_success_mock, self.fhir_request_patient_searchview_v2_success_mock, self.fhir_request_coverage_searchview_v2_success_mock, self.fhir_request_eob_searchview_v2_success_mock): for path in ['/v1/fhir/Patient?patient=-20140000008325', '/v1/fhir/Coverage?patient=-20140000008325', '/v1/fhir/Patient', '/v2/fhir/Patient?patient=-20140000008325', '/v2/fhir/Coverage?patient=-20140000008325', '/v2/fhir/Patient']: response = self.client.get(path, HTTP_AUTHORIZATION='Bearer ' + ac['access_token']) self.assertEqual(response.status_code, 200) if expected_response_detail_mesg is not None: content = json.loads(response.content) self.assertEqual(content['detail'], expected_response_detail_mesg) </DeepExtract>
bluebutton-web-server
positive
def test_good_connection(self): """Connect with a valid URL and token.""" with requests_mock.mock() as m: <DeepExtract> solver1_data = structured_solver_data(solver1_name) solver2_data = structured_solver_data(solver2_name) all_solver_data = [solver1_data, solver2_data] valid_token_headers = {'X-Auth-Token': token} invalid_token_headers = {'X-Auth-Token': bad_token} m.get(requests_mock.ANY, status_code=404) m.get(requests_mock.ANY, status_code=401, request_headers=invalid_token_headers) m.get(solver1_url, json=solver1_data, request_headers=valid_token_headers) m.get(solver2_url, json=solver2_data, request_headers=valid_token_headers) m.get(all_solver_url, json=all_solver_data, request_headers=valid_token_headers) </DeepExtract> with Client(endpoint=url, token=token) as client: self.assertTrue(len(client.get_solvers()) > 0)
def test_good_connection(self): """Connect with a valid URL and token.""" with requests_mock.mock() as m: solver1_data = structured_solver_data(solver1_name) solver2_data = structured_solver_data(solver2_name) all_solver_data = [solver1_data, solver2_data] valid_token_headers = {'X-Auth-Token': token} invalid_token_headers = {'X-Auth-Token': bad_token} m.get(requests_mock.ANY, status_code=404) m.get(requests_mock.ANY, status_code=401, request_headers=invalid_token_headers) m.get(solver1_url, json=solver1_data, request_headers=valid_token_headers) m.get(solver2_url, json=solver2_data, request_headers=valid_token_headers) m.get(all_solver_url, json=all_solver_data, request_headers=valid_token_headers) with Client(endpoint=url, token=token) as client: self.assertTrue(len(client.get_solvers()) > 0)
dwave-cloud-client
positive
def decode_pose(yaw, pitch, roll, face_bbox, frame): """ pitch > 0 Head down, < 0 look up yaw > 0 Turn right < 0 Turn left roll > 0 Tilt right, < 0 Tilt left """ <DeepExtract> mult = 2 if bold else 1 cv2.putText(frame, 'pitch:{:.0f}, yaw:{:.0f}, roll:{:.0f}'.format(pitch, yaw, roll), (face_bbox[0] + 10 - 15, face_bbox[1] - 15), text_type, size, bg_color, 3 * mult, line_type) cv2.putText(frame, 'pitch:{:.0f}, yaw:{:.0f}, roll:{:.0f}'.format(pitch, yaw, roll), (face_bbox[0] + 10 - 15, face_bbox[1] - 15), text_type, size, color, 1 * mult, line_type) </DeepExtract> vals = np.array([abs(pitch), abs(yaw), abs(roll)]) max_index = np.argmax(vals) if vals[max_index] < MIN_THRESHOLD: return if max_index == 0: if pitch > 0: txt = 'Look down' else: txt = 'Look up' elif max_index == 1: if yaw > 0: txt = 'Turn right' else: txt = 'Turn left' elif max_index == 2: if roll > 0: txt = 'Tilt right' else: txt = 'Tilt left' <DeepExtract> mult = 2 if True else 1 cv2.putText(frame, txt, (face_bbox[0] + 10, face_bbox[1] + 30), text_type, 1, bg_color, 3 * mult, line_type) cv2.putText(frame, txt, (face_bbox[0] + 10, face_bbox[1] + 30), text_type, 1, color, 1 * mult, line_type) </DeepExtract>
def decode_pose(yaw, pitch, roll, face_bbox, frame): """ pitch > 0 Head down, < 0 look up yaw > 0 Turn right < 0 Turn left roll > 0 Tilt right, < 0 Tilt left """ mult = 2 if bold else 1 cv2.putText(frame, 'pitch:{:.0f}, yaw:{:.0f}, roll:{:.0f}'.format(pitch, yaw, roll), (face_bbox[0] + 10 - 15, face_bbox[1] - 15), text_type, size, bg_color, 3 * mult, line_type) cv2.putText(frame, 'pitch:{:.0f}, yaw:{:.0f}, roll:{:.0f}'.format(pitch, yaw, roll), (face_bbox[0] + 10 - 15, face_bbox[1] - 15), text_type, size, color, 1 * mult, line_type) vals = np.array([abs(pitch), abs(yaw), abs(roll)]) max_index = np.argmax(vals) if vals[max_index] < MIN_THRESHOLD: return if max_index == 0: if pitch > 0: txt = 'Look down' else: txt = 'Look up' elif max_index == 1: if yaw > 0: txt = 'Turn right' else: txt = 'Turn left' elif max_index == 2: if roll > 0: txt = 'Tilt right' else: txt = 'Tilt left' mult = 2 if True else 1 cv2.putText(frame, txt, (face_bbox[0] + 10, face_bbox[1] + 30), text_type, 1, bg_color, 3 * mult, line_type) cv2.putText(frame, txt, (face_bbox[0] + 10, face_bbox[1] + 30), text_type, 1, color, 1 * mult, line_type) </DeepExtract>
depthai-experiments
positive
def get_crop_box(self): """ Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio. """ <DeepExtract> f = self.fmt if f.flexible_height and f.flexible_max_height: (flexw, flexh) = (self.fw, f.flexible_max_height) flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio </DeepExtract> if self.fmt.nocrop: return if self.crop_box: return self.crop_box (iw, ih) = self.image.size if iw <= self.fw and ih <= self.fh: return if self.image_ratio < self.format_ratio: diff = ih - iw * self.fh / self.fw return (0, diff // 2, iw, ih - diff // 2) elif self.image_ratio > self.format_ratio: diff = iw - ih * self.fw / self.fh return (diff // 2, 0, iw - diff // 2, ih) else: return
def get_crop_box(self): """ Get coordinates of the rectangle defining the new image boundaries. It takes into acount any specific wishes from the model (explicitely passed in crop_box), the desired format and it's options (flexible_height, nocrop) and mainly it's ratio. After dimensions of the format were specified (see set_format), crop the image to the same ratio. """ f = self.fmt if f.flexible_height and f.flexible_max_height: (flexw, flexh) = (self.fw, f.flexible_max_height) flex_ratio = float(flexw) / flexh if abs(flex_ratio - self.image_ratio) < abs(self.format_ratio - self.image_ratio): self.fh = flexh self.format_ratio = flex_ratio if self.fmt.nocrop: return if self.crop_box: return self.crop_box (iw, ih) = self.image.size if iw <= self.fw and ih <= self.fh: return if self.image_ratio < self.format_ratio: diff = ih - iw * self.fh / self.fw return (0, diff // 2, iw, ih - diff // 2) elif self.image_ratio > self.format_ratio: diff = iw - ih * self.fw / self.fh return (diff // 2, 0, iw - diff // 2, ih) else: return
ella
positive
def get_standard_stat_dvh(self, dose_scale='absolute', volume_scale='relative'): """ :param dose_scale: either 'absolute' or 'relative' :param volume_scale: either 'absolute' or 'relative' :return: a standard set of statistical dvhs (min, q1, mean, median, q1, and max) :rtype: dict """ if dose_scale == 'relative': <DeepExtract> resampled_bin_count = load_options(return_attr='RESAMPLED_DVH_BIN_COUNT') min_rx_dose = np.min(self.rx_dose) * 100.0 new_bin_count = int(np.divide(float(self.bin_count), min_rx_dose) * resampled_bin_count) x1 = np.linspace(0, self.bin_count, self.bin_count) y2 = np.zeros([new_bin_count, self.count]) for i in range(self.count): x2 = np.multiply(np.linspace(0, new_bin_count, new_bin_count), self.rx_dose[i] * 100.0 / resampled_bin_count) y2[:, i] = np.interp(x2, x1, self.dvh[:, i]) x2 = np.divide(np.linspace(0, new_bin_count, new_bin_count), resampled_bin_count) (x_axis, dvhs) = (x2, y2) </DeepExtract> else: dvhs = self.dvh if volume_scale == 'absolute': <DeepExtract> dvhs = np.multiply(dvhs, self.volume) </DeepExtract> standard_stat_dvh = {'min': np.min(dvhs, 1), 'q1': np.percentile(dvhs, 25, 1), 'mean': np.mean(dvhs, 1), 'median': np.median(dvhs, 1), 'q3': np.percentile(dvhs, 75, 1), 'max': np.max(dvhs, 1)} return standard_stat_dvh
def get_standard_stat_dvh(self, dose_scale='absolute', volume_scale='relative'): """ :param dose_scale: either 'absolute' or 'relative' :param volume_scale: either 'absolute' or 'relative' :return: a standard set of statistical dvhs (min, q1, mean, median, q1, and max) :rtype: dict """ if dose_scale == 'relative': resampled_bin_count = load_options(return_attr='RESAMPLED_DVH_BIN_COUNT') min_rx_dose = np.min(self.rx_dose) * 100.0 new_bin_count = int(np.divide(float(self.bin_count), min_rx_dose) * resampled_bin_count) x1 = np.linspace(0, self.bin_count, self.bin_count) y2 = np.zeros([new_bin_count, self.count]) for i in range(self.count): x2 = np.multiply(np.linspace(0, new_bin_count, new_bin_count), self.rx_dose[i] * 100.0 / resampled_bin_count) y2[:, i] = np.interp(x2, x1, self.dvh[:, i]) x2 = np.divide(np.linspace(0, new_bin_count, new_bin_count), resampled_bin_count) (x_axis, dvhs) = (x2, y2) else: dvhs = self.dvh if volume_scale == 'absolute': dvhs = np.multiply(dvhs, self.volume) standard_stat_dvh = {'min': np.min(dvhs, 1), 'q1': np.percentile(dvhs, 25, 1), 'mean': np.mean(dvhs, 1), 'median': np.median(dvhs, 1), 'q3': np.percentile(dvhs, 75, 1), 'max': np.max(dvhs, 1)} return standard_stat_dvh
DVH-Analytics-Bokeh
positive
def _handle_signatures(original_bytes, original, message, params): """Shared code for handling message signatures. RFC 3156 is quite strict: * exactly two messages * the second is of type 'application/pgp-signature' * the second contains the detached signature :param original_bytes: the original top-level mail raw bytes, containing the segments against which signatures will be verified. Necessary because parsing and re-serialising a Message isn't byte-perfect, which interferes with signature validation. :type original_bytes: bytes :param original: The original top-level mail. This is required to attache special headers to :type original: :class:`email.message.Message` :param message: The multipart/signed payload to verify :type message: :class:`email.message.Message` :param params: the message parameters as returned by :func:`get_params` :type params: dict[str, str] """ try: nb_parts = len(message.get_payload()) if message.is_multipart() else 1 if nb_parts != 2: raise MessageError(f'expected exactly two messages, got {nb_parts}') signature_part = message.get_payload(1) ct = signature_part.get_content_type() if ct != _APP_PGP_SIG: raise MessageError(f'expected Content-Type: {_APP_PGP_SIG}, got: {ct}') mic_alg = params.get('micalg', 'nothing') if not mic_alg.startswith('pgp-'): raise MessageError(f'expected micalg=pgp-..., got: {mic_alg}') if b'\r\n' not in original_bytes: original_bytes = original_bytes.replace(b'\n', b'\r\n') signed_boundary = b'\r\n--' + message.get_boundary().encode() original_chunks = original_bytes.split(signed_boundary) nb_chunks = len(original_chunks) if nb_chunks != 4: raise MessageError(f'unexpected number of multipart chunks, got {nb_chunks}') signed_chunk = original_chunks[1] if len(signed_chunk) < len(b'\r\n'): raise MessageError('signed chunk has an invalid length') sigs = crypto.verify_detached(signed_chunk[len(b'\r\n'):], signature_part.get_payload(decode=True)) <DeepExtract> sig_from = '' sig_known = True uid_trusted = False assert None is None or isinstance(None, str) if not sigs: None = None or 'no signature found' elif not None: try: key = crypto.get_key(sigs[0].fpr) for uid in key.uids: if crypto.check_uid_validity(key, uid.email): sig_from = uid.uid uid_trusted = True break else: sig_from = key.uids[0].uid except GPGProblem: sig_from = sigs[0].fpr sig_known = False if None: msg = 'Invalid: {}'.format(None) elif uid_trusted: msg = 'Valid: {}'.format(sig_from) else: msg = 'Untrusted: {}'.format(sig_from) original.add_header(X_SIGNATURE_VALID_HEADER, 'False' if None or not sig_known else 'True') original.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) </DeepExtract> except (GPGProblem, MessageError) as error: <DeepExtract> sig_from = '' sig_known = True uid_trusted = False assert str(error) is None or isinstance(str(error), str) if not []: str(error) = str(error) or 'no signature found' elif not str(error): try: key = crypto.get_key([][0].fpr) for uid in key.uids: if crypto.check_uid_validity(key, uid.email): sig_from = uid.uid uid_trusted = True break else: sig_from = key.uids[0].uid except GPGProblem: sig_from = [][0].fpr sig_known = False if str(error): msg = 'Invalid: {}'.format(str(error)) elif uid_trusted: msg = 'Valid: {}'.format(sig_from) else: msg = 'Untrusted: {}'.format(sig_from) original.add_header(X_SIGNATURE_VALID_HEADER, 'False' if str(error) or not sig_known else 'True') original.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) </DeepExtract>
def _handle_signatures(original_bytes, original, message, params): """Shared code for handling message signatures. RFC 3156 is quite strict: * exactly two messages * the second is of type 'application/pgp-signature' * the second contains the detached signature :param original_bytes: the original top-level mail raw bytes, containing the segments against which signatures will be verified. Necessary because parsing and re-serialising a Message isn't byte-perfect, which interferes with signature validation. :type original_bytes: bytes :param original: The original top-level mail. This is required to attache special headers to :type original: :class:`email.message.Message` :param message: The multipart/signed payload to verify :type message: :class:`email.message.Message` :param params: the message parameters as returned by :func:`get_params` :type params: dict[str, str] """ try: nb_parts = len(message.get_payload()) if message.is_multipart() else 1 if nb_parts != 2: raise MessageError(f'expected exactly two messages, got {nb_parts}') signature_part = message.get_payload(1) ct = signature_part.get_content_type() if ct != _APP_PGP_SIG: raise MessageError(f'expected Content-Type: {_APP_PGP_SIG}, got: {ct}') mic_alg = params.get('micalg', 'nothing') if not mic_alg.startswith('pgp-'): raise MessageError(f'expected micalg=pgp-..., got: {mic_alg}') if b'\r\n' not in original_bytes: original_bytes = original_bytes.replace(b'\n', b'\r\n') signed_boundary = b'\r\n--' + message.get_boundary().encode() original_chunks = original_bytes.split(signed_boundary) nb_chunks = len(original_chunks) if nb_chunks != 4: raise MessageError(f'unexpected number of multipart chunks, got {nb_chunks}') signed_chunk = original_chunks[1] if len(signed_chunk) < len(b'\r\n'): raise MessageError('signed chunk has an invalid length') sigs = crypto.verify_detached(signed_chunk[len(b'\r\n'):], signature_part.get_payload(decode=True)) sig_from = '' sig_known = True uid_trusted = False assert None is None or isinstance(None, str) if not sigs: None = None or 'no signature found' elif not None: try: key = crypto.get_key(sigs[0].fpr) for uid in key.uids: if crypto.check_uid_validity(key, uid.email): sig_from = uid.uid uid_trusted = True break else: sig_from = key.uids[0].uid except GPGProblem: sig_from = sigs[0].fpr sig_known = False if None: msg = 'Invalid: {}'.format(None) elif uid_trusted: msg = 'Valid: {}'.format(sig_from) else: msg = 'Untrusted: {}'.format(sig_from) original.add_header(X_SIGNATURE_VALID_HEADER, 'False' if None or not sig_known else 'True') original.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) except (GPGProblem, MessageError) as error: sig_from = '' sig_known = True uid_trusted = False assert str(error) is None or isinstance(str(error), str) if not []: str(error) = str(error) or 'no signature found' elif not str(error): try: key = crypto.get_key([][0].fpr) for uid in key.uids: if crypto.check_uid_validity(key, uid.email): sig_from = uid.uid uid_trusted = True break else: sig_from = key.uids[0].uid except GPGProblem: sig_from = [][0].fpr sig_known = False if str(error): msg = 'Invalid: {}'.format(str(error)) elif uid_trusted: msg = 'Valid: {}'.format(sig_from) else: msg = 'Untrusted: {}'.format(sig_from) original.add_header(X_SIGNATURE_VALID_HEADER, 'False' if str(error) or not sig_known else 'True') original.add_header(X_SIGNATURE_MESSAGE_HEADER, msg) </DeepExtract>
alot
positive
def _write_bone_list(bones, object_, parent_node, group): scene = bpy.context.scene bone_names = [] for bone in bones: <DeepExtract> bone_name = bone.name.replace('__', '*') node_name = utils.get_node_name(group) props_name = '%{!s}%--PRprops_name={!s}'.format(node_name, bone_name) props_name = props_name </DeepExtract> <DeepExtract> props = '' if utils.is_physic_bone(bone): armature_object = bpy.data.objects[object_.name[:-5]] pose_bone = armature_object.pose.bones[bone.name[:-5]] (xIK, yIK, zIK) = udp.get_bone_ik_max_min(pose_bone) (damping, spring, spring_tension) = udp.get_bone_ik_properties(pose_bone) props = join(xIK, '_xdamping={}'.format(damping[1]), '_xspringangle={}'.format(spring[1]), '_xspringtension={}'.format(spring_tension[1]), yIK, '_ydamping={}'.format(damping[0]), '_yspringangle={}'.format(spring[0]), '_yspringtension={}'.format(spring_tension[0]), zIK, '_zdamping={}'.format(damping[2]), '_zspringangle={}'.format(spring[2]), '_zspringtension={}'.format(spring_tension[2])) props_ik = props </DeepExtract> bone_name = join(bone.name, props_name, props_ik) bone_names.append(bone_name) node = self._doc.createElement('node') node.setAttribute('id', bone_name) node.setAttribute('name', bone_name) node.setIdAttribute('id') fakebone = utils.get_fakebone(bone.name) if fakebone is not None: <DeepExtract> trans = self._create_translation_node(fakebone) (rotx, roty, rotz) = self._create_rotation_node(fakebone) scale = self._create_scale_node(fakebone) node.appendChild(trans) node.appendChild(rotx) node.appendChild(roty) node.appendChild(rotz) node.appendChild(scale) </DeepExtract> bone_geometry = utils.get_bone_geometry(bone) if bone_geometry is not None: geo_name = utils.get_geometry_name(group, bone_geometry) <DeepExtract> instance = None instance = self._doc.createElement('instance_geometry') instance.setAttribute('url', '#{}'.format(geo_name)) bm = self._doc.createElement('bind_material') tc = self._doc.createElement('technique_common') for mat in bone_geometry.material_slots: im = self._doc.createElement('instance_material') im.setAttribute('symbol', mat.name) im.setAttribute('target', '#{}'.format(mat.name)) bvi = self._doc.createElement('bind_vertex_input') bvi.setAttribute('semantic', 'UVMap') bvi.setAttribute('input_semantic', 'TEXCOORD') bvi.setAttribute('input_set', '0') im.appendChild(bvi) tc.appendChild(im) bm.appendChild(tc) instance.appendChild(bm) instance = instance </DeepExtract> node.appendChild(instance) <DeepExtract> extra = None try: bonePhys = object_.parent.pose.bones[bone.name]['phys_proxy'] bcPrint(bone.name + ' physic proxy is ' + bonePhys) extra = self._doc.createElement('extra') techcry = self._doc.createElement('technique') techcry.setAttribute('profile', 'CryEngine') prop2 = self._doc.createElement('properties') cryprops = self._doc.createTextNode(bonePhys) prop2.appendChild(cryprops) techcry.appendChild(prop2) extra.appendChild(techcry) except: pass extra = extra </DeepExtract> if extra is not None: node.appendChild(extra) elif utils.is_physic_bone(bone): bone_geometry = utils.get_bone_geometry(bone) if fakebone is not None: <DeepExtract> trans = self._create_translation_node(fakebone) (rotx, roty, rotz) = self._create_rotation_node(fakebone) scale = self._create_scale_node(fakebone) node.appendChild(trans) node.appendChild(rotx) node.appendChild(roty) node.appendChild(rotz) node.appendChild(scale) </DeepExtract> parent_node.appendChild(node) if bone.children: <DeepExtract> scene = bpy.context.scene bone_names = [] for bone in bone.children: props_name = self._create_properties_name(bone, group) props_ik = self._create_ik_properties(bone, object_) bone_name = join(bone.name, props_name, props_ik) bone_names.append(bone_name) node = self._doc.createElement('node') node.setAttribute('id', bone_name) node.setAttribute('name', bone_name) node.setIdAttribute('id') fakebone = utils.get_fakebone(bone.name) if fakebone is not None: self._write_transforms(fakebone, node) bone_geometry = utils.get_bone_geometry(bone) if bone_geometry is not None: geo_name = utils.get_geometry_name(group, bone_geometry) instance = self._create_bone_instance(bone_geometry, geo_name) node.appendChild(instance) extra = self._create_physic_proxy_for_bone(object_.parent, bone) if extra is not None: node.appendChild(extra) elif utils.is_physic_bone(bone): bone_geometry = utils.get_bone_geometry(bone) if fakebone is not None: self._write_transforms(fakebone, node) node.appendChild(node) if bone.children: self._write_bone_list(bone.children, object_, node, group) </DeepExtract>
def _write_bone_list(bones, object_, parent_node, group): scene = bpy.context.scene bone_names = [] for bone in bones: bone_name = bone.name.replace('__', '*') node_name = utils.get_node_name(group) props_name = '%{!s}%--PRprops_name={!s}'.format(node_name, bone_name) props_name = props_name props = '' if utils.is_physic_bone(bone): armature_object = bpy.data.objects[object_.name[:-5]] pose_bone = armature_object.pose.bones[bone.name[:-5]] (xIK, yIK, zIK) = udp.get_bone_ik_max_min(pose_bone) (damping, spring, spring_tension) = udp.get_bone_ik_properties(pose_bone) props = join(xIK, '_xdamping={}'.format(damping[1]), '_xspringangle={}'.format(spring[1]), '_xspringtension={}'.format(spring_tension[1]), yIK, '_ydamping={}'.format(damping[0]), '_yspringangle={}'.format(spring[0]), '_yspringtension={}'.format(spring_tension[0]), zIK, '_zdamping={}'.format(damping[2]), '_zspringangle={}'.format(spring[2]), '_zspringtension={}'.format(spring_tension[2])) props_ik = props bone_name = join(bone.name, props_name, props_ik) bone_names.append(bone_name) node = self._doc.createElement('node') node.setAttribute('id', bone_name) node.setAttribute('name', bone_name) node.setIdAttribute('id') fakebone = utils.get_fakebone(bone.name) if fakebone is not None: trans = self._create_translation_node(fakebone) (rotx, roty, rotz) = self._create_rotation_node(fakebone) scale = self._create_scale_node(fakebone) node.appendChild(trans) node.appendChild(rotx) node.appendChild(roty) node.appendChild(rotz) node.appendChild(scale) bone_geometry = utils.get_bone_geometry(bone) if bone_geometry is not None: geo_name = utils.get_geometry_name(group, bone_geometry) instance = None instance = self._doc.createElement('instance_geometry') instance.setAttribute('url', '#{}'.format(geo_name)) bm = self._doc.createElement('bind_material') tc = self._doc.createElement('technique_common') for mat in bone_geometry.material_slots: im = self._doc.createElement('instance_material') im.setAttribute('symbol', mat.name) im.setAttribute('target', '#{}'.format(mat.name)) bvi = self._doc.createElement('bind_vertex_input') bvi.setAttribute('semantic', 'UVMap') bvi.setAttribute('input_semantic', 'TEXCOORD') bvi.setAttribute('input_set', '0') im.appendChild(bvi) tc.appendChild(im) bm.appendChild(tc) instance.appendChild(bm) instance = instance node.appendChild(instance) extra = None try: bonePhys = object_.parent.pose.bones[bone.name]['phys_proxy'] bcPrint(bone.name + ' physic proxy is ' + bonePhys) extra = self._doc.createElement('extra') techcry = self._doc.createElement('technique') techcry.setAttribute('profile', 'CryEngine') prop2 = self._doc.createElement('properties') cryprops = self._doc.createTextNode(bonePhys) prop2.appendChild(cryprops) techcry.appendChild(prop2) extra.appendChild(techcry) except: pass extra = extra if extra is not None: node.appendChild(extra) elif utils.is_physic_bone(bone): bone_geometry = utils.get_bone_geometry(bone) if fakebone is not None: trans = self._create_translation_node(fakebone) (rotx, roty, rotz) = self._create_rotation_node(fakebone) scale = self._create_scale_node(fakebone) node.appendChild(trans) node.appendChild(rotx) node.appendChild(roty) node.appendChild(rotz) node.appendChild(scale) parent_node.appendChild(node) if bone.children: scene = bpy.context.scene bone_names = [] for bone in bone.children: props_name = self._create_properties_name(bone, group) props_ik = self._create_ik_properties(bone, object_) bone_name = join(bone.name, props_name, props_ik) bone_names.append(bone_name) node = self._doc.createElement('node') node.setAttribute('id', bone_name) node.setAttribute('name', bone_name) node.setIdAttribute('id') fakebone = utils.get_fakebone(bone.name) if fakebone is not None: self._write_transforms(fakebone, node) bone_geometry = utils.get_bone_geometry(bone) if bone_geometry is not None: geo_name = utils.get_geometry_name(group, bone_geometry) instance = self._create_bone_instance(bone_geometry, geo_name) node.appendChild(instance) extra = self._create_physic_proxy_for_bone(object_.parent, bone) if extra is not None: node.appendChild(extra) elif utils.is_physic_bone(bone): bone_geometry = utils.get_bone_geometry(bone) if fakebone is not None: self._write_transforms(fakebone, node) node.appendChild(node) if bone.children: self._write_bone_list(bone.children, object_, node, group) </DeepExtract>
BCRYExporter
positive
@testcase def test_migrate(): <DeepExtract> application_name = random_application_name() repository_path = os.path.join(os.getcwd(), application_name) os.mkdir(repository_path) os.chdir(repository_path) call('git', 'init') create_file('djangy.config', '[application]\napplication_name=%s\nrootdir=%s\n' % (application_name, 'testapp')) commit_code('djangy.config') call('djangy', 'create') (application_name, repository_path) = (application_name, repository_path) </DeepExtract> shutil.copytree(os.path.join(TEST_DIR, 'data', 'testapp-v4'), os.path.join(repository_path, 'testapp')) <DeepExtract> call('git', 'add', '.') call('git', 'commit', '-m', 'initial version') </DeepExtract> <DeepExtract> call('git', 'push', 'djangy', 'master') sleep(1) </DeepExtract> output = call('djangy', 'manage.py', 'syncdb', stdin_contents='no') output = call('djangy', 'manage.py', 'migrate') <DeepExtract> log('Checking website output...') url = 'http://%s.djangy.com/%s' % (application_name, 'add_foo') log('Using URL: %s' % url) result = fetch_url.fetch_url_body('api.djangy.com', url) log('Expected output: %s' % 'bar') log('Actual Output: %s' % result) assert result == 'bar' log('Website output matched.') </DeepExtract> return (application_name, repository_path)
@testcase def test_migrate(): application_name = random_application_name() repository_path = os.path.join(os.getcwd(), application_name) os.mkdir(repository_path) os.chdir(repository_path) call('git', 'init') create_file('djangy.config', '[application]\napplication_name=%s\nrootdir=%s\n' % (application_name, 'testapp')) commit_code('djangy.config') call('djangy', 'create') (application_name, repository_path) = (application_name, repository_path) shutil.copytree(os.path.join(TEST_DIR, 'data', 'testapp-v4'), os.path.join(repository_path, 'testapp')) call('git', 'add', '.') call('git', 'commit', '-m', 'initial version') call('git', 'push', 'djangy', 'master') sleep(1) output = call('djangy', 'manage.py', 'syncdb', stdin_contents='no') output = call('djangy', 'manage.py', 'migrate') log('Checking website output...') url = 'http://%s.djangy.com/%s' % (application_name, 'add_foo') log('Using URL: %s' % url) result = fetch_url.fetch_url_body('api.djangy.com', url) log('Expected output: %s' % 'bar') log('Actual Output: %s' % result) assert result == 'bar' log('Website output matched.') return (application_name, repository_path)
djangy
positive
def checkio(lines_list): """Return the quantity of squares.""" <DeepExtract> ret = [] for i in [1, 2, 3, 5, 6, 7, 9, 10, 11]: corners = [i, i + 1, i + 4, i + 1 + 4] ret.append(generate_segments(corners)) for i in [1, 2, 5, 6]: corners = [i, i + 2, i + 8, i + 2 + 8] ret.append(generate_segments(corners)) for i in [1]: corners = [i, i + 3, i + 12, i + 3 + 12] ret.append(generate_segments(corners)) all_squares = ret </DeepExtract> lines_list = set(map(lambda x: tuple(sorted(x)), lines_list)) squares = 0 for i in all_squares: if set(i).intersection(lines_list) == set(i): squares += 1 return squares
def checkio(lines_list): """Return the quantity of squares.""" ret = [] for i in [1, 2, 3, 5, 6, 7, 9, 10, 11]: corners = [i, i + 1, i + 4, i + 1 + 4] ret.append(generate_segments(corners)) for i in [1, 2, 5, 6]: corners = [i, i + 2, i + 8, i + 2 + 8] ret.append(generate_segments(corners)) for i in [1]: corners = [i, i + 3, i + 12, i + 3 + 12] ret.append(generate_segments(corners)) all_squares = ret lines_list = set(map(lambda x: tuple(sorted(x)), lines_list)) squares = 0 for i in all_squares: if set(i).intersection(lines_list) == set(i): squares += 1 return squares
checkio
positive
@wrapt.decorator def wrapped_find_spec(find_spec, _, args, kwargs): spec = find_spec(*args, **kwargs) if spec is not None: if getattr(spec.loader, 'exec_module', None) is not None: loader = spec.loader exec_module = inspect.getattr_static(loader, 'exec_module') if isinstance(exec_module, staticmethod): <DeepExtract> loader.exec_module = wrap_finder_function(exec_module, wrapped_exec_module) </DeepExtract> else: <DeepExtract> loader.exec_module = wrap_finder_function(loader.exec_module, wrapped_exec_module) </DeepExtract> else: logger.debug('no exec_module for loader %r', spec.loader) return spec
@wrapt.decorator def wrapped_find_spec(find_spec, _, args, kwargs): spec = find_spec(*args, **kwargs) if spec is not None: if getattr(spec.loader, 'exec_module', None) is not None: loader = spec.loader exec_module = inspect.getattr_static(loader, 'exec_module') if isinstance(exec_module, staticmethod): loader.exec_module = wrap_finder_function(exec_module, wrapped_exec_module) else: loader.exec_module = wrap_finder_function(loader.exec_module, wrapped_exec_module) else: logger.debug('no exec_module for loader %r', spec.loader) return spec
appmap-python
positive
def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '</w>',) if token in self.cache: return self.cache[token] <DeepExtract> pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = pairs </DeepExtract> if not pairs: return token + '</w>' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: <DeepExtract> pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = pairs </DeepExtract> word = ' '.join(word) if word == '\n </w>': word = '\n</w>' self.cache[token] = word return word
def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '</w>',) if token in self.cache: return self.cache[token] pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = pairs if not pairs: return token + '</w>' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break (first, second) = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = pairs word = ' '.join(word) if word == '\n </w>': word = '\n</w>' self.cache[token] = word return word
EssayKiller_V2
positive
def shiva_the_destroyer(): """ Remove all directories, databases, etc. associated with the application. """ with settings(warn_only=True): run('rm -Rf %(path)s' % env) run('rm -Rf %(log_path)s' % env) <DeepExtract> sudo('/etc/init.d/pgpool stop') </DeepExtract> run('dropdb %(project_name)s' % env) run('dropuser %(project_name)s' % env) <DeepExtract> sudo('/etc/init.d/pgpool start') </DeepExtract> sudo('rm %(apache_config_path)s' % env) <DeepExtract> if env.multi_server: run('/mnt/apps/bin/restart-all-apache.sh') else: sudo('service apache2 restart') </DeepExtract> run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s' % env)
def shiva_the_destroyer(): """ Remove all directories, databases, etc. associated with the application. """ with settings(warn_only=True): run('rm -Rf %(path)s' % env) run('rm -Rf %(log_path)s' % env) sudo('/etc/init.d/pgpool stop') run('dropdb %(project_name)s' % env) run('dropuser %(project_name)s' % env) sudo('/etc/init.d/pgpool start') sudo('rm %(apache_config_path)s' % env) if env.multi_server: run('/mnt/apps/bin/restart-all-apache.sh') else: sudo('service apache2 restart') run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s' % env)
cdr-stats
positive
def __exit__(self, exc_type, exc_val, exc_tb): <DeepExtract> JobServer._release() if label is not None: del_label(label) </DeepExtract> return False
def __exit__(self, exc_type, exc_val, exc_tb): JobServer._release() if label is not None: del_label(label) return False
catkin_tools
positive
def decodeSpectrum(spectrum): <DeepExtract> massTable = '\nG 57\nA 71\nS 87\nP 97\nV 99\nT 101\nC 103\nI 113\nL 113\nN 114\nD 115\nK 128\nQ 128\nE 129\nM 131\nH 137\nF 147\nR 156\nY 163\nW 186' mass = massTable.split() (massDict, aaDict) = ({int(mass[i + 1]): mass[i] for i in range(0, len(mass), 2)}, {mass[i]: int(mass[i + 1]) for i in range(0, len(mass), 2)}) </DeepExtract> <DeepExtract> adj = [[] for _ in range(len(spectrum))] spectrum.insert(0, 0) for i in range(len(spectrum) - 1): for j in range(i + 1, len(spectrum)): mass = spectrum[j] - spectrum[i] if mass in massDict: adj[i].append((j, massDict[mass])) adj = adj </DeepExtract> s = 0 d = adj[-1][-1][0] <DeepExtract> paths = [] path = [] self.findAllPathsUtil(adj, '', s, d, path, paths) paths = paths </DeepExtract> for path in paths: <DeepExtract> n = len(path) ispectrum = [] for i in range(n): ispectrum.append(sum([aaDict[aa] for aa in path[:i]])) ispectrum.append(sum([aaDict[aa] for aa in path[i:]])) ispectrum = sorted(ispectrum) </DeepExtract> if ispectrum == spectrum: return ''.join(path)
def decodeSpectrum(spectrum): massTable = '\nG 57\nA 71\nS 87\nP 97\nV 99\nT 101\nC 103\nI 113\nL 113\nN 114\nD 115\nK 128\nQ 128\nE 129\nM 131\nH 137\nF 147\nR 156\nY 163\nW 186' mass = massTable.split() (massDict, aaDict) = ({int(mass[i + 1]): mass[i] for i in range(0, len(mass), 2)}, {mass[i]: int(mass[i + 1]) for i in range(0, len(mass), 2)}) adj = [[] for _ in range(len(spectrum))] spectrum.insert(0, 0) for i in range(len(spectrum) - 1): for j in range(i + 1, len(spectrum)): mass = spectrum[j] - spectrum[i] if mass in massDict: adj[i].append((j, massDict[mass])) adj = adj s = 0 d = adj[-1][-1][0] paths = [] path = [] self.findAllPathsUtil(adj, '', s, d, path, paths) paths = paths for path in paths: n = len(path) ispectrum = [] for i in range(n): ispectrum.append(sum([aaDict[aa] for aa in path[:i]])) ispectrum.append(sum([aaDict[aa] for aa in path[i:]])) ispectrum = sorted(ispectrum) if ispectrum == spectrum: return ''.join(path)
Coursera-Bioinformatics
positive
def test_phi_while(self): """ Test inclusion of phi functions in simple 'while' loop. i = 0; while (i < 100) { i = i + 1; } return i; """ input = '\n i = 0;\n 100: if (i >= 100) goto 400;\n i = i + 1;\n goto 100;\n 400: return i;\n ' expected = '\n func() {\n i@0 = 0;\n goto loc_1;\n loc_1:\n i@1 = Φ(i@0, i@2, );\n goto loc_4 if(i@1 >= 100) else goto loc_2;\n loc_2:\n i@2 = i@1 + 1;\n goto loc_1;\n loc_4:\n return i@1;\n }\n ' self.assert_step(decompiler.step_ssa_form_registers, input, expected) self.assert_uninitialized(input, []) <DeepExtract> dec = self.decompile_until(input, decompiler.step_ssa_form_registers) lri = ssa.live_range_iterator_t(dec.function) allstmts = [id(stmt) for stmt in iterators.statement_iterator_t(dec.function)] actual = {} for lr in lri.live_ranges(): stmts = lr.statements t = self.deep_tokenize(dec.function, lr.definition) if t not in actual: actual[t] = [] actual[t].append([allstmts.index(id(stmt)) for stmt in stmts]) self.assertEqual({'i@0': [[0, 1, 2]], 'i@2': [[4, 5, 2]], 'i@1': [[2, 3], [2, 3, 6], [2, 3, 4]]}, actual) </DeepExtract> return
def test_phi_while(self): """ Test inclusion of phi functions in simple 'while' loop. i = 0; while (i < 100) { i = i + 1; } return i; """ input = '\n i = 0;\n 100: if (i >= 100) goto 400;\n i = i + 1;\n goto 100;\n 400: return i;\n ' expected = '\n func() {\n i@0 = 0;\n goto loc_1;\n loc_1:\n i@1 = Φ(i@0, i@2, );\n goto loc_4 if(i@1 >= 100) else goto loc_2;\n loc_2:\n i@2 = i@1 + 1;\n goto loc_1;\n loc_4:\n return i@1;\n }\n ' self.assert_step(decompiler.step_ssa_form_registers, input, expected) self.assert_uninitialized(input, []) dec = self.decompile_until(input, decompiler.step_ssa_form_registers) lri = ssa.live_range_iterator_t(dec.function) allstmts = [id(stmt) for stmt in iterators.statement_iterator_t(dec.function)] actual = {} for lr in lri.live_ranges(): stmts = lr.statements t = self.deep_tokenize(dec.function, lr.definition) if t not in actual: actual[t] = [] actual[t].append([allstmts.index(id(stmt)) for stmt in stmts]) self.assertEqual({'i@0': [[0, 1, 2]], 'i@2': [[4, 5, 2]], 'i@1': [[2, 3], [2, 3, 6], [2, 3, 4]]}, actual) return
decompiler
positive
def write_svg(filename, root_node): nodes = [] edges = [] <DeepExtract> if root_node in nodes: return nodes.append(root_node) for head in root_node.iplugs: for tail in head.srcs: _collect_dot_graph(src.node, nodes, edges) for tail in root_node.oplugs: for head in tail.dsts: edge = (tail, head) if edge not in edges: edges.append(edge) _collect_dot_graph(dst.node, nodes, edges) </DeepExtract> g = pydot.Dot(splines='ortho') g_nodes = [pydot.Node(name=node.__class__.__name__, shape='box') for node in nodes] for n in g_nodes: g.add_node(n) for (tail, head) in edges: g_tail = g_nodes[nodes.index(tail.node)] g_head = g_nodes[nodes.index(head.node)] g.add_edge(pydot.Edge(g_tail, g_head, taillabel=tail.name, headlabel=head.name, sametail=tail.name, samehead=head.name, fontname='courier', fontsize=10, arrowsize=0.4, dir='both', arrowtail='box', arrowhead='obox')) g.write_svg(filename)
def write_svg(filename, root_node): nodes = [] edges = [] if root_node in nodes: return nodes.append(root_node) for head in root_node.iplugs: for tail in head.srcs: _collect_dot_graph(src.node, nodes, edges) for tail in root_node.oplugs: for head in tail.dsts: edge = (tail, head) if edge not in edges: edges.append(edge) _collect_dot_graph(dst.node, nodes, edges) g = pydot.Dot(splines='ortho') g_nodes = [pydot.Node(name=node.__class__.__name__, shape='box') for node in nodes] for n in g_nodes: g.add_node(n) for (tail, head) in edges: g_tail = g_nodes[nodes.index(tail.node)] g_head = g_nodes[nodes.index(head.node)] g.add_edge(pydot.Edge(g_tail, g_head, taillabel=tail.name, headlabel=head.name, sametail=tail.name, samehead=head.name, fontname='courier', fontsize=10, arrowsize=0.4, dir='both', arrowtail='box', arrowhead='obox')) g.write_svg(filename)
DeepBind
positive
def main(argv): with open(FLAGS.config, 'r') as f: config = json.load(f) <DeepExtract> gpu_cfg = config.get('gpu_versions', {}) python_cfg = config.get('python_versions', {}) base_img_cfg = config.get('base_images', {}) images = config.get('images', []) gpu_specs = _create_gpu_base_image_specs(images=images, base_img_cfg=base_img_cfg, gpu_cfg=gpu_cfg) cpu_specs = _create_cpu_image_specs(images=images, base_img_cfg=base_img_cfg, cpu_cfg=python_cfg) specs = list(gpu_specs.values()) specs += [_create_image_spec(cfg=c, cpu_specs=cpu_specs, gpu_specs=gpu_specs) for c in images] specs = specs </DeepExtract> print(f'generating the following images in {FLAGS.output}:') for s in specs: print(f'{s.tag}') steps = [] for s in specs: steps.append(_create_build_step(spec=s).to_dict()) steps.append(_create_push_step(tag=s.tag).to_dict()) with open(FLAGS.output, 'w') as f: json.dump({'steps': steps, 'timeout': f'{FLAGS.timeout_sec}s'}, f, indent=2)
def main(argv): with open(FLAGS.config, 'r') as f: config = json.load(f) gpu_cfg = config.get('gpu_versions', {}) python_cfg = config.get('python_versions', {}) base_img_cfg = config.get('base_images', {}) images = config.get('images', []) gpu_specs = _create_gpu_base_image_specs(images=images, base_img_cfg=base_img_cfg, gpu_cfg=gpu_cfg) cpu_specs = _create_cpu_image_specs(images=images, base_img_cfg=base_img_cfg, cpu_cfg=python_cfg) specs = list(gpu_specs.values()) specs += [_create_image_spec(cfg=c, cpu_specs=cpu_specs, gpu_specs=gpu_specs) for c in images] specs = specs print(f'generating the following images in {FLAGS.output}:') for s in specs: print(f'{s.tag}') steps = [] for s in specs: steps.append(_create_build_step(spec=s).to_dict()) steps.append(_create_push_step(tag=s.tag).to_dict()) with open(FLAGS.output, 'w') as f: json.dump({'steps': steps, 'timeout': f'{FLAGS.timeout_sec}s'}, f, indent=2)
caliban
positive
def _login(self): """ Authenticates a user in a bugzilla tracker """ if not (self.backend_user and self.backend_password): printdbg('No account data provided. Not logged in bugzilla') return import cookielib cookie_j = cookielib.CookieJar() cookie_h = urllib2.HTTPCookieProcessor(cookie_j) <DeepExtract> pos = self.url.rfind('buglist') url = self.url[:pos] + 'index.cgi' url = url </DeepExtract> values = {'Bugzilla_login': self.backend_user, 'Bugzilla_password': self.backend_password} opener = urllib2.build_opener(cookie_h) urllib2.install_opener(opener) data = urllib.urlencode(values) request = urllib2.Request(url, data) urllib2.urlopen(request) for (i, c) in enumerate(cookie_j): self.cookies[c.name] = c.value printout('Logged in bugzilla as %s' % self.backend_user) printdbg('Bugzilla session cookies: %s' % self.cookies)
def _login(self): """ Authenticates a user in a bugzilla tracker """ if not (self.backend_user and self.backend_password): printdbg('No account data provided. Not logged in bugzilla') return import cookielib cookie_j = cookielib.CookieJar() cookie_h = urllib2.HTTPCookieProcessor(cookie_j) pos = self.url.rfind('buglist') url = self.url[:pos] + 'index.cgi' url = url values = {'Bugzilla_login': self.backend_user, 'Bugzilla_password': self.backend_password} opener = urllib2.build_opener(cookie_h) urllib2.install_opener(opener) data = urllib.urlencode(values) request = urllib2.Request(url, data) urllib2.urlopen(request) for (i, c) in enumerate(cookie_j): self.cookies[c.name] = c.value printout('Logged in bugzilla as %s' % self.backend_user) printdbg('Bugzilla session cookies: %s' % self.cookies)
Bicho
positive
def fetch_zones(region): <DeepExtract> clients = [] config = get_cloudexec_config() regions = config.aws_regions for region in regions: client = boto3.client('ec2', region_name=region, aws_access_key_id=config.aws_access_key, aws_secret_access_key=config.aws_access_secret) client.region = region clients.append(client) clients = clients </DeepExtract> for client in clients: if client.region == region: zones = [x['ZoneName'] for x in client.describe_availability_zones()['AvailabilityZones']] return zones
def fetch_zones(region): clients = [] config = get_cloudexec_config() regions = config.aws_regions for region in regions: client = boto3.client('ec2', region_name=region, aws_access_key_id=config.aws_access_key, aws_secret_access_key=config.aws_access_secret) client.region = region clients.append(client) clients = clients for client in clients: if client.region == region: zones = [x['ZoneName'] for x in client.describe_availability_zones()['AvailabilityZones']] return zones
deepbootcamp
positive
def load(): global _config if not _config: <DeepExtract> flavor = os.environ.get('SETTINGS_FLAVOR', 'dev') config_path = os.environ.get('DOCKER_REGISTRY_CONFIG', 'config.yml') if not os.path.isabs(config_path): config_path = os.path.join(os.path.dirname(__file__), '../../', 'config', config_path) try: f = open(config_path) except Exception: raise exceptions.FileNotFoundError('Heads-up! File is missing: %s' % config_path) conf = Config(f.read()) if flavor: if flavor not in conf: raise exceptions.ConfigError('The specified flavor (%s) is missing in your config file (%s)' % (flavor, config_path)) conf = conf[flavor] conf.flavor = flavor if conf.privileged_key: try: f = open(conf.privileged_key) except Exception: raise exceptions.FileNotFoundError('Heads-up! File is missing: %s' % conf.privileged_key) try: pk = f.read().split('\n') pk = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A' + ''.join(pk[1:-2]) pk = [pk[i:i + 64] for i in range(0, len(pk), 64)] pk = '-----BEGIN PUBLIC KEY-----\n' + '\n'.join(pk) + '\n-----END PUBLIC KEY-----' bio = BIO.MemoryBuffer(pk) conf.privileged_key = RSA.load_pub_key_bio(bio) except Exception: raise exceptions.ConfigError('Key at %s is not a valid RSA key' % conf.privileged_key) f.close() if conf.index_endpoint: conf.index_endpoint = conf.index_endpoint.strip('/') _config = conf </DeepExtract> return _config
def load(): global _config if not _config: flavor = os.environ.get('SETTINGS_FLAVOR', 'dev') config_path = os.environ.get('DOCKER_REGISTRY_CONFIG', 'config.yml') if not os.path.isabs(config_path): config_path = os.path.join(os.path.dirname(__file__), '../../', 'config', config_path) try: f = open(config_path) except Exception: raise exceptions.FileNotFoundError('Heads-up! File is missing: %s' % config_path) conf = Config(f.read()) if flavor: if flavor not in conf: raise exceptions.ConfigError('The specified flavor (%s) is missing in your config file (%s)' % (flavor, config_path)) conf = conf[flavor] conf.flavor = flavor if conf.privileged_key: try: f = open(conf.privileged_key) except Exception: raise exceptions.FileNotFoundError('Heads-up! File is missing: %s' % conf.privileged_key) try: pk = f.read().split('\n') pk = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A' + ''.join(pk[1:-2]) pk = [pk[i:i + 64] for i in range(0, len(pk), 64)] pk = '-----BEGIN PUBLIC KEY-----\n' + '\n'.join(pk) + '\n-----END PUBLIC KEY-----' bio = BIO.MemoryBuffer(pk) conf.privileged_key = RSA.load_pub_key_bio(bio) except Exception: raise exceptions.ConfigError('Key at %s is not a valid RSA key' % conf.privileged_key) f.close() if conf.index_endpoint: conf.index_endpoint = conf.index_endpoint.strip('/') _config = conf return _config
docker-registry
positive
def process_outlier(self, outlier=None, should_notify=False, extract_derived_fields=False): """ Save outlier (if configuration is setup for that), notify (also depending of configuration) and print. :param outlier: the detected outlier :param should_notify: True if notification need to be send :param extract_derived_fields: True to save derived fields """ if self.settings.es_save_results: <DeepExtract> if extract_derived_fields: derived_fields = self.extract_derived_fields(outlier.doc['_source']) for (derived_field, derived_value) in derived_fields.items(): outlier.outlier_dict['derived_' + derived_field] = derived_value del outlier.doc['_source'][derived_field] doc = add_outlier_to_document(outlier) self.add_update_bulk_action(doc) </DeepExtract> if should_notify: self.notifier.notify_on_outlier(outlier=outlier) if self.settings.print_outliers_to_console: self.logging.logger.info('outlier - ' + outlier.outlier_dict['summary'])
def process_outlier(self, outlier=None, should_notify=False, extract_derived_fields=False): """ Save outlier (if configuration is setup for that), notify (also depending of configuration) and print. :param outlier: the detected outlier :param should_notify: True if notification need to be send :param extract_derived_fields: True to save derived fields """ if self.settings.es_save_results: if extract_derived_fields: derived_fields = self.extract_derived_fields(outlier.doc['_source']) for (derived_field, derived_value) in derived_fields.items(): outlier.outlier_dict['derived_' + derived_field] = derived_value del outlier.doc['_source'][derived_field] doc = add_outlier_to_document(outlier) self.add_update_bulk_action(doc) if should_notify: self.notifier.notify_on_outlier(outlier=outlier) if self.settings.print_outliers_to_console: self.logging.logger.info('outlier - ' + outlier.outlier_dict['summary'])
ee-outliers
positive
def validate_compound_keys(m: ExpConf) -> ExpConf: """Check that: - all key are strings, which do not: contain spaces or consecutive commas contain commas unless inside a '[...]' compound key contain '[' unless at the beginning, and matched by a ']' at the end contain ']' unless at the end, and matched by a '[' at the beginning begin with '[' and end with ']' unless also containing a comma - all values are either boolean, strings, numbers or lists """ def check_k(k): if not isinstance(k, str): raise argparse.ArgumentTypeError("Key '{}' is invalid! Keys must be strings.".format(k)) valid_re_str = '[^\\s\\,\\]\\[]+' list_re = re.compile('\\A({}|\\[\\s*({})(\\s*,\\s*{})*\\s*\\])\\Z'.format(valid_re_str, valid_re_str, valid_re_str)) if list_re.match(k) is None: raise argparse.ArgumentTypeError("Key '{}' is invalid! Not a valid compound key.".format(k)) def check_v(v): types = [list, bool, str, int, float] if not any(map(lambda t: isinstance(v, t), types)): raise argparse.ArgumentTypeError("Value '{}' in the expanded experiment config '{}' is invalid! Values must be strings, lists, ints, floats or bools.".format(v, m)) def check_kv_compatibility(k, v): """ For already validated k and v, check that if k is a compound key, the number of arguments in each sublist must match the number of arguments in k """ if k[0] == '[': n_args = len(k.strip('][').split(',')) if not isinstance(v, list): raise argparse.ArgumentTypeError("Key '{}' and value '{}' are incompatible: key is compound, but value is not.".format(k, v)) elif isinstance(v[0], list): for vi in v: if len(vi) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, vi)) elif len(v) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, v)) if isinstance(m, list): return [validate_compound_keys(i) for i in m] for (k, v) in m.items(): <DeepExtract> if not isinstance(k, str): raise argparse.ArgumentTypeError("Key '{}' is invalid! Keys must be strings.".format(k)) valid_re_str = '[^\\s\\,\\]\\[]+' list_re = re.compile('\\A({}|\\[\\s*({})(\\s*,\\s*{})*\\s*\\])\\Z'.format(valid_re_str, valid_re_str, valid_re_str)) if list_re.match(k) is None: raise argparse.ArgumentTypeError("Key '{}' is invalid! Not a valid compound key.".format(k)) </DeepExtract> <DeepExtract> types = [list, bool, str, int, float] if not any(map(lambda t: isinstance(v, t), types)): raise argparse.ArgumentTypeError("Value '{}' in the expanded experiment config '{}' is invalid! Values must be strings, lists, ints, floats or bools.".format(v, m)) </DeepExtract> <DeepExtract> if k[0] == '[': n_args = len(k.strip('][').split(',')) if not isinstance(v, list): raise argparse.ArgumentTypeError("Key '{}' and value '{}' are incompatible: key is compound, but value is not.".format(k, v)) elif isinstance(v[0], list): for vi in v: if len(vi) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, vi)) elif len(v) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, v)) </DeepExtract> return m
def validate_compound_keys(m: ExpConf) -> ExpConf: """Check that: - all key are strings, which do not: contain spaces or consecutive commas contain commas unless inside a '[...]' compound key contain '[' unless at the beginning, and matched by a ']' at the end contain ']' unless at the end, and matched by a '[' at the beginning begin with '[' and end with ']' unless also containing a comma - all values are either boolean, strings, numbers or lists """ def check_k(k): if not isinstance(k, str): raise argparse.ArgumentTypeError("Key '{}' is invalid! Keys must be strings.".format(k)) valid_re_str = '[^\\s\\,\\]\\[]+' list_re = re.compile('\\A({}|\\[\\s*({})(\\s*,\\s*{})*\\s*\\])\\Z'.format(valid_re_str, valid_re_str, valid_re_str)) if list_re.match(k) is None: raise argparse.ArgumentTypeError("Key '{}' is invalid! Not a valid compound key.".format(k)) def check_v(v): types = [list, bool, str, int, float] if not any(map(lambda t: isinstance(v, t), types)): raise argparse.ArgumentTypeError("Value '{}' in the expanded experiment config '{}' is invalid! Values must be strings, lists, ints, floats or bools.".format(v, m)) def check_kv_compatibility(k, v): """ For already validated k and v, check that if k is a compound key, the number of arguments in each sublist must match the number of arguments in k """ if k[0] == '[': n_args = len(k.strip('][').split(',')) if not isinstance(v, list): raise argparse.ArgumentTypeError("Key '{}' and value '{}' are incompatible: key is compound, but value is not.".format(k, v)) elif isinstance(v[0], list): for vi in v: if len(vi) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, vi)) elif len(v) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, v)) if isinstance(m, list): return [validate_compound_keys(i) for i in m] for (k, v) in m.items(): if not isinstance(k, str): raise argparse.ArgumentTypeError("Key '{}' is invalid! Keys must be strings.".format(k)) valid_re_str = '[^\\s\\,\\]\\[]+' list_re = re.compile('\\A({}|\\[\\s*({})(\\s*,\\s*{})*\\s*\\])\\Z'.format(valid_re_str, valid_re_str, valid_re_str)) if list_re.match(k) is None: raise argparse.ArgumentTypeError("Key '{}' is invalid! Not a valid compound key.".format(k)) types = [list, bool, str, int, float] if not any(map(lambda t: isinstance(v, t), types)): raise argparse.ArgumentTypeError("Value '{}' in the expanded experiment config '{}' is invalid! Values must be strings, lists, ints, floats or bools.".format(v, m)) if k[0] == '[': n_args = len(k.strip('][').split(',')) if not isinstance(v, list): raise argparse.ArgumentTypeError("Key '{}' and value '{}' are incompatible: key is compound, but value is not.".format(k, v)) elif isinstance(v[0], list): for vi in v: if len(vi) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, vi)) elif len(v) != n_args: raise argparse.ArgumentTypeError("Key '{}' and value '{}' have incompatible arities.".format(k, v)) return m
caliban
positive
def forward(self, im_source, im_target, *args, **kwargs): feature_A = self.FeatureExtraction(im_source) feature_B = self.FeatureExtraction(im_target) if self.half_precision: feature_A = feature_A.half() feature_B = feature_B.half() corr4d = self.FeatureCorrelation(feature_A, feature_B) if self.relocalization_k_size > 1: <DeepExtract> slices = [] for i in range(self.relocalization_k_size): for j in range(self.relocalization_k_size): for k in range(self.relocalization_k_size): for l in range(self.relocalization_k_size): slices.append(corr4d[:, 0, i::self.relocalization_k_size, j::self.relocalization_k_size, k::self.relocalization_k_size, l::self.relocalization_k_size].unsqueeze(0)) slices = torch.cat(tuple(slices), dim=1) (corr4d, max_idx) = torch.max(slices, dim=1, keepdim=True) max_l = torch.fmod(max_idx, self.relocalization_k_size) max_k = torch.fmod(max_idx.sub(max_l).div(self.relocalization_k_size), self.relocalization_k_size) max_j = torch.fmod(max_idx.sub(max_l).div(self.relocalization_k_size).sub(max_k).div(self.relocalization_k_size), self.relocalization_k_size) max_i = max_idx.sub(max_l).div(self.relocalization_k_size).sub(max_k).div(self.relocalization_k_size).sub(max_j).div(self.relocalization_k_size) (corr4d, max_i, max_j, max_k, max_l) = (corr4d, max_i, max_j, max_k, max_l) </DeepExtract> <DeepExtract> (batch_size, ch, fs1, fs2, fs3, fs4) = corr4d.size() corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4) (corr4d_B_max, _) = torch.max(corr4d_B, dim=1, keepdim=True) (corr4d_A_max, _) = torch.max(corr4d_A, dim=3, keepdim=True) eps = 1e-05 corr4d_B = corr4d_B / (corr4d_B_max + eps) corr4d_A = corr4d_A / (corr4d_A_max + eps) corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d = corr4d * (corr4d_A * corr4d_B) corr4d = corr4d </DeepExtract> corr4d = self.NeighConsensus(corr4d) <DeepExtract> (batch_size, ch, fs1, fs2, fs3, fs4) = corr4d.size() corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4) (corr4d_B_max, _) = torch.max(corr4d_B, dim=1, keepdim=True) (corr4d_A_max, _) = torch.max(corr4d_A, dim=3, keepdim=True) eps = 1e-05 corr4d_B = corr4d_B / (corr4d_B_max + eps) corr4d_A = corr4d_A / (corr4d_A_max + eps) corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d = corr4d * (corr4d_A * corr4d_B) corr4d = corr4d </DeepExtract> if self.relocalization_k_size > 1: delta4d = (max_i, max_j, max_k, max_l) return (corr4d, delta4d) else: return corr4d
def forward(self, im_source, im_target, *args, **kwargs): feature_A = self.FeatureExtraction(im_source) feature_B = self.FeatureExtraction(im_target) if self.half_precision: feature_A = feature_A.half() feature_B = feature_B.half() corr4d = self.FeatureCorrelation(feature_A, feature_B) if self.relocalization_k_size > 1: slices = [] for i in range(self.relocalization_k_size): for j in range(self.relocalization_k_size): for k in range(self.relocalization_k_size): for l in range(self.relocalization_k_size): slices.append(corr4d[:, 0, i::self.relocalization_k_size, j::self.relocalization_k_size, k::self.relocalization_k_size, l::self.relocalization_k_size].unsqueeze(0)) slices = torch.cat(tuple(slices), dim=1) (corr4d, max_idx) = torch.max(slices, dim=1, keepdim=True) max_l = torch.fmod(max_idx, self.relocalization_k_size) max_k = torch.fmod(max_idx.sub(max_l).div(self.relocalization_k_size), self.relocalization_k_size) max_j = torch.fmod(max_idx.sub(max_l).div(self.relocalization_k_size).sub(max_k).div(self.relocalization_k_size), self.relocalization_k_size) max_i = max_idx.sub(max_l).div(self.relocalization_k_size).sub(max_k).div(self.relocalization_k_size).sub(max_j).div(self.relocalization_k_size) (corr4d, max_i, max_j, max_k, max_l) = (corr4d, max_i, max_j, max_k, max_l) (batch_size, ch, fs1, fs2, fs3, fs4) = corr4d.size() corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4) (corr4d_B_max, _) = torch.max(corr4d_B, dim=1, keepdim=True) (corr4d_A_max, _) = torch.max(corr4d_A, dim=3, keepdim=True) eps = 1e-05 corr4d_B = corr4d_B / (corr4d_B_max + eps) corr4d_A = corr4d_A / (corr4d_A_max + eps) corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d = corr4d * (corr4d_A * corr4d_B) corr4d = corr4d corr4d = self.NeighConsensus(corr4d) (batch_size, ch, fs1, fs2, fs3, fs4) = corr4d.size() corr4d_B = corr4d.view(batch_size, fs1 * fs2, fs3, fs4) corr4d_A = corr4d.view(batch_size, fs1, fs2, fs3 * fs4) (corr4d_B_max, _) = torch.max(corr4d_B, dim=1, keepdim=True) (corr4d_A_max, _) = torch.max(corr4d_A, dim=3, keepdim=True) eps = 1e-05 corr4d_B = corr4d_B / (corr4d_B_max + eps) corr4d_A = corr4d_A / (corr4d_A_max + eps) corr4d_B = corr4d_B.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d_A = corr4d_A.view(batch_size, 1, fs1, fs2, fs3, fs4) corr4d = corr4d * (corr4d_A * corr4d_B) corr4d = corr4d if self.relocalization_k_size > 1: delta4d = (max_i, max_j, max_k, max_l) return (corr4d, delta4d) else: return corr4d
DenseMatching
positive
def test_change_email_verification_user_exists(self): new_email = self.random_username() <DeepExtract> return self.assertContains(response=self.change_email(new_email), text='Please check your mailbox to confirm email address change.', status_code=status.HTTP_202_ACCEPTED) </DeepExtract> <DeepExtract> confirmation_link = self.assertEmailSent(subject_contains='Confirmation required: Email address change', body_contains='You requested to change the email address associated', recipient=[new_email], reset=reset, pattern='following link[^:]*:\\s+([^\\s]*)') </DeepExtract> <DeepExtract> (new_email, password, response) = self.register_user(new_email, password, late_captcha, **kwargs) self.assertRegistrationSuccessResponse(response) self.assertUserExists(new_email) self.assertFalse(User.objects.get(email=new_email).is_active) self.assertIsNone(User.objects.get(email=new_email).is_active) self.assertEqual(User.objects.get(email=new_email).needs_captcha, late_captcha) self.assertEqual(User.objects.get(email=new_email).outreach_preference, kwargs.get('outreach_preference', True)) self.assertPassword(new_email, password) confirmation_link = self.assertRegistrationEmail(new_email) self.assertConfirmationLinkRedirect(confirmation_link) response = self.client.verify(confirmation_link) if late_captcha: self.assertRegistrationVerificationFailureResponse(response) (captcha_id, captcha_solution) = self.get_captcha() data = {'captcha': {'id': captcha_id, 'solution': captcha_solution}} response = self.client.verify(confirmation_link, data=data) self.assertRegistrationVerificationSuccessResponse(response) self.assertTrue(User.objects.get(email=new_email).is_active) self.assertFalse(User.objects.get(email=new_email).needs_captcha) self.assertPassword(new_email, password) (new_email, new_password) = (new_email, password) </DeepExtract> <DeepExtract> return self.assertContains(response=self.client.verify(confirmation_link), text='You already have another account with this email address.', status_code=status.HTTP_400_BAD_REQUEST) </DeepExtract> <DeepExtract> try: User.objects.get(email=self.email) except User.DoesNotExist: self.fail('Expected user %s to exist, but did not.' % self.email) </DeepExtract> <DeepExtract> if self.password is None: self.assertFalse(is_password_usable(User.objects.get(email=self.email).password)) return self.password = self.password.strip() self.assertTrue(User.objects.get(email=self.email).check_password(self.password), 'Expected user password to be "%s" (potentially trimmed), but check failed.' % self.password) </DeepExtract> <DeepExtract> try: User.objects.get(email=new_email) except User.DoesNotExist: self.fail('Expected user %s to exist, but did not.' % new_email) </DeepExtract> <DeepExtract> if new_password is None: self.assertFalse(is_password_usable(User.objects.get(email=new_email).password)) return new_password = new_password.strip() self.assertTrue(User.objects.get(email=new_email).check_password(new_password), 'Expected user password to be "%s" (potentially trimmed), but check failed.' % new_password) </DeepExtract>
def test_change_email_verification_user_exists(self): new_email = self.random_username() return self.assertContains(response=self.change_email(new_email), text='Please check your mailbox to confirm email address change.', status_code=status.HTTP_202_ACCEPTED) confirmation_link = self.assertEmailSent(subject_contains='Confirmation required: Email address change', body_contains='You requested to change the email address associated', recipient=[new_email], reset=reset, pattern='following link[^:]*:\\s+([^\\s]*)') (new_email, password, response) = self.register_user(new_email, password, late_captcha, **kwargs) self.assertRegistrationSuccessResponse(response) self.assertUserExists(new_email) self.assertFalse(User.objects.get(email=new_email).is_active) self.assertIsNone(User.objects.get(email=new_email).is_active) self.assertEqual(User.objects.get(email=new_email).needs_captcha, late_captcha) self.assertEqual(User.objects.get(email=new_email).outreach_preference, kwargs.get('outreach_preference', True)) self.assertPassword(new_email, password) confirmation_link = self.assertRegistrationEmail(new_email) self.assertConfirmationLinkRedirect(confirmation_link) response = self.client.verify(confirmation_link) if late_captcha: self.assertRegistrationVerificationFailureResponse(response) (captcha_id, captcha_solution) = self.get_captcha() data = {'captcha': {'id': captcha_id, 'solution': captcha_solution}} response = self.client.verify(confirmation_link, data=data) self.assertRegistrationVerificationSuccessResponse(response) self.assertTrue(User.objects.get(email=new_email).is_active) self.assertFalse(User.objects.get(email=new_email).needs_captcha) self.assertPassword(new_email, password) (new_email, new_password) = (new_email, password) return self.assertContains(response=self.client.verify(confirmation_link), text='You already have another account with this email address.', status_code=status.HTTP_400_BAD_REQUEST) try: User.objects.get(email=self.email) except User.DoesNotExist: self.fail('Expected user %s to exist, but did not.' % self.email) if self.password is None: self.assertFalse(is_password_usable(User.objects.get(email=self.email).password)) return self.password = self.password.strip() self.assertTrue(User.objects.get(email=self.email).check_password(self.password), 'Expected user password to be "%s" (potentially trimmed), but check failed.' % self.password) try: User.objects.get(email=new_email) except User.DoesNotExist: self.fail('Expected user %s to exist, but did not.' % new_email) if new_password is None: self.assertFalse(is_password_usable(User.objects.get(email=new_email).password)) return new_password = new_password.strip() self.assertTrue(User.objects.get(email=new_email).check_password(new_password), 'Expected user password to be "%s" (potentially trimmed), but check failed.' % new_password) </DeepExtract>
desec-stack
positive
def train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt, recorder): """ one epoch training for instance discrimination """ print('==> (MoCo) training...') model.train() model_ema.eval() def set_bn_train(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.train() model_ema.apply(set_bn_train) batch_time = AverageMeter() data_time = AverageMeter() loss_meter = AverageMeter() prob_meter = AverageMeter() end = time.time() for (idx, (inputs, _, index)) in enumerate(train_loader): data_time.update(time.time() - end) bsz = inputs[0].size(0) if bsz < opt.pt_batch_size: print('batch less than 16, continue') continue inputs[0] = inputs[0].float() inputs[1] = inputs[1].float() inputs[2] = inputs[2].float() inputs[0] = inputs[0].cuda() inputs[1] = inputs[1].cuda() inputs[2] = inputs[2].cuda() index = index.cuda(non_blocking=True) (anchor, positive, negative) = inputs <DeepExtract> forward_inds = torch.randperm(bsz).long().cuda() backward_inds = torch.zeros(bsz).long().cuda() value = torch.arange(bsz).long().cuda() backward_inds.index_copy_(0, forward_inds, value) (shuffle_ids, reverse_ids) = (forward_inds, backward_inds) </DeepExtract> (feat_q, _) = model(anchor) (feat_k, _) = model_ema(positive) (feat_n, _) = model(negative) out = contrast(feat_q, feat_k, feat_n, index) contrast_loss = criterion(out) loss = contrast_loss prob = out[:, 0].mean() optimizer.zero_grad() loss.backward() optimizer.step() loss_meter.update(loss.item(), bsz) prob_meter.update(prob.item(), bsz) moment_update(model, model_ema, opt.pt_alpha) torch.cuda.synchronize() batch_time.update(time.time() - end) end = time.time() message = 'MoCo Train: [{0}][{1}/{2}]\tBT {batch_time.val:.3f} ({batch_time.avg:.3f})\tDT {data_time.val:.3f} ({data_time.avg:.3f})\tloss {loss.val:.3f} ({loss.avg:.3f})\tprob {prob.val:.3f} ({prob.avg:.3f})'.format(epoch, idx + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=loss_meter, prob=prob_meter) if (idx + 1) % opt.pt_print_freq == 0: print(message) recorder.record_message('a', message) sys.stdout.flush() return (loss_meter.avg, prob_meter.avg)
def train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt, recorder): """ one epoch training for instance discrimination """ print('==> (MoCo) training...') model.train() model_ema.eval() def set_bn_train(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.train() model_ema.apply(set_bn_train) batch_time = AverageMeter() data_time = AverageMeter() loss_meter = AverageMeter() prob_meter = AverageMeter() end = time.time() for (idx, (inputs, _, index)) in enumerate(train_loader): data_time.update(time.time() - end) bsz = inputs[0].size(0) if bsz < opt.pt_batch_size: print('batch less than 16, continue') continue inputs[0] = inputs[0].float() inputs[1] = inputs[1].float() inputs[2] = inputs[2].float() inputs[0] = inputs[0].cuda() inputs[1] = inputs[1].cuda() inputs[2] = inputs[2].cuda() index = index.cuda(non_blocking=True) (anchor, positive, negative) = inputs forward_inds = torch.randperm(bsz).long().cuda() backward_inds = torch.zeros(bsz).long().cuda() value = torch.arange(bsz).long().cuda() backward_inds.index_copy_(0, forward_inds, value) (shuffle_ids, reverse_ids) = (forward_inds, backward_inds) (feat_q, _) = model(anchor) (feat_k, _) = model_ema(positive) (feat_n, _) = model(negative) out = contrast(feat_q, feat_k, feat_n, index) contrast_loss = criterion(out) loss = contrast_loss prob = out[:, 0].mean() optimizer.zero_grad() loss.backward() optimizer.step() loss_meter.update(loss.item(), bsz) prob_meter.update(prob.item(), bsz) moment_update(model, model_ema, opt.pt_alpha) torch.cuda.synchronize() batch_time.update(time.time() - end) end = time.time() message = 'MoCo Train: [{0}][{1}/{2}]\tBT {batch_time.val:.3f} ({batch_time.avg:.3f})\tDT {data_time.val:.3f} ({data_time.avg:.3f})\tloss {loss.val:.3f} ({loss.avg:.3f})\tprob {prob.val:.3f} ({prob.avg:.3f})'.format(epoch, idx + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=loss_meter, prob=prob_meter) if (idx + 1) % opt.pt_print_freq == 0: print(message) recorder.record_message('a', message) sys.stdout.flush() return (loss_meter.avg, prob_meter.avg)
BE
positive
def fetch(self, debug=False): self._start_fetch() try: with transaction.atomic(): self._truncate() <DeepExtract> trello_labels = self.trello_board.get_labels() for trello_label in trello_labels: LabelUpdater.update(trello_label, self.board) self.labels = self.board.labels.all() </DeepExtract> <DeepExtract> trello_cards = self._fetch_trello_cards() trello_movements_by_card = self._fetch_trello_card_movements_by_card() trello_comments_by_card = self._fetch_trello_comments_by_card() card_fetcher = CardFetcher(self, trello_cards, trello_movements_by_card, trello_comments_by_card) self.cards = card_fetcher.fetch() </DeepExtract> <DeepExtract> workflows = self.board.workflows.all() for card in self.cards: trello_card = card.trello_card label_uuids = trello_card.idLabels card_labels = self.labels.filter(uuid__in=label_uuids) card.labels.clear() for card_label in card_labels: card.labels.add(card_label) for workflow in workflows: self._fetch_workflow(workflow, [card]) </DeepExtract> if self.board.url != self.trello_board.url: self.board.url = self.trello_board.url self.board.last_fetch_datetime = timezone.now() self.board.last_activity_datetime = self.board.last_fetch_datetime self.board.save() except Exception as e: raise finally: self._end_fetch()
def fetch(self, debug=False): self._start_fetch() try: with transaction.atomic(): self._truncate() trello_labels = self.trello_board.get_labels() for trello_label in trello_labels: LabelUpdater.update(trello_label, self.board) self.labels = self.board.labels.all() trello_cards = self._fetch_trello_cards() trello_movements_by_card = self._fetch_trello_card_movements_by_card() trello_comments_by_card = self._fetch_trello_comments_by_card() card_fetcher = CardFetcher(self, trello_cards, trello_movements_by_card, trello_comments_by_card) self.cards = card_fetcher.fetch() workflows = self.board.workflows.all() for card in self.cards: trello_card = card.trello_card label_uuids = trello_card.idLabels card_labels = self.labels.filter(uuid__in=label_uuids) card.labels.clear() for card_label in card_labels: card.labels.add(card_label) for workflow in workflows: self._fetch_workflow(workflow, [card]) if self.board.url != self.trello_board.url: self.board.url = self.trello_board.url self.board.last_fetch_datetime = timezone.now() self.board.last_activity_datetime = self.board.last_fetch_datetime self.board.save() except Exception as e: raise finally: self._end_fetch()
djanban
positive
def _make_clusters(clusters: np.ndarray) -> None: n_clusters = max(clusters) + 1 self.clusters = [] for cluster in range(n_clusters): selection = clusters == cluster kwargs = {'reaction_trees': self._select_subset(self.reaction_trees, selection), 'nodes': self._select_subset(self.nodes, selection), 'scores': self._select_subset(self.scores, selection)} if self._images: <DeepExtract> kwargs['images'] = [item for (sel, item) in zip(selection, self.images) if sel] </DeepExtract> if self._dicts: <DeepExtract> kwargs['dicts'] = [item for (sel, item) in zip(selection, self.dicts) if sel] </DeepExtract> if self._jsons: <DeepExtract> kwargs['jsons'] = [item for (sel, item) in zip(selection, self.jsons) if sel] </DeepExtract> self.clusters.append(RouteCollection(**kwargs))
def _make_clusters(clusters: np.ndarray) -> None: n_clusters = max(clusters) + 1 self.clusters = [] for cluster in range(n_clusters): selection = clusters == cluster kwargs = {'reaction_trees': self._select_subset(self.reaction_trees, selection), 'nodes': self._select_subset(self.nodes, selection), 'scores': self._select_subset(self.scores, selection)} if self._images: kwargs['images'] = [item for (sel, item) in zip(selection, self.images) if sel] if self._dicts: kwargs['dicts'] = [item for (sel, item) in zip(selection, self.dicts) if sel] if self._jsons: kwargs['jsons'] = [item for (sel, item) in zip(selection, self.jsons) if sel] self.clusters.append(RouteCollection(**kwargs))
aizynthfinder
positive
def set_issue_title(self, title, org=None, repo=None, number=None): <DeepExtract> for (idx, x) in enumerate(self.issues): if org and x['org'] != org: continue if repo and x['repo'] != repo: continue if number and x['number'] != number: continue if itype and x['itype'] != itype: continue ix = idx ix = None </DeepExtract> self.issues[ix]['title'] = title <DeepExtract> cachefile = os.path.join(self.cachedir, 'issuedb.json') with open(cachefile, 'w') as f: f.write(json.dumps({'issues': self.issues[:], 'eventids': list(self.eventids)})) print('### ISSUEDB CACHE SAVED %s' % cachefile) </DeepExtract>
def set_issue_title(self, title, org=None, repo=None, number=None): for (idx, x) in enumerate(self.issues): if org and x['org'] != org: continue if repo and x['repo'] != repo: continue if number and x['number'] != number: continue if itype and x['itype'] != itype: continue ix = idx ix = None self.issues[ix]['title'] = title cachefile = os.path.join(self.cachedir, 'issuedb.json') with open(cachefile, 'w') as f: f.write(json.dumps({'issues': self.issues[:], 'eventids': list(self.eventids)})) print('### ISSUEDB CACHE SAVED %s' % cachefile) </DeepExtract>
ansibullbot
positive
def __init__(self, o, channels, up_f): super(IDAUp, self).__init__() for i in range(1, len(channels)): c = channels[i] f = int(up_f[i]) proj = DeformConv(c, o) node = DeformConv(o, o) up = nn.ConvTranspose2d(o, o, f * 2, stride=f, padding=f // 2, output_padding=0, groups=o, bias=False) <DeepExtract> w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2.0 * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] </DeepExtract> setattr(self, 'proj_' + str(i), proj) setattr(self, 'up_' + str(i), up) setattr(self, 'node_' + str(i), node)
def __init__(self, o, channels, up_f): super(IDAUp, self).__init__() for i in range(1, len(channels)): c = channels[i] f = int(up_f[i]) proj = DeformConv(c, o) node = DeformConv(o, o) up = nn.ConvTranspose2d(o, o, f * 2, stride=f, padding=f // 2, output_padding=0, groups=o, bias=False) w = up.weight.data f = math.ceil(w.size(2) / 2) c = (2 * f - 1 - f % 2) / (2.0 * f) for i in range(w.size(2)): for j in range(w.size(3)): w[0, 0, i, j] = (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) for c in range(1, w.size(0)): w[c, 0, :, :] = w[0, 0, :, :] setattr(self, 'proj_' + str(i), proj) setattr(self, 'up_' + str(i), up) setattr(self, 'node_' + str(i), node)
CenterNet-CondInst
positive
def _execute_expression(self, expression: Any): """ This does the bulk of the work of executing a logical form, recursively executing a single expression. Basically, if the expression is a function we know about, we evaluate its arguments then call the function. If it's a list, we evaluate all elements of the list. If it's a constant (or a zero-argument function), we evaluate the constant. """ if isinstance(expression, list): if isinstance(expression[0], list): <DeepExtract> if isinstance(expression[0], list): if isinstance(expression[0][0], list): function = self._execute_expression(expression[0][0]) elif expression[0][0] in self._functions: function = self._functions[expression[0][0]] elif self._allow_composition and expression[0][0] == '*': function = '*' elif isinstance(expression[0][0], str): raise ExecutionError(f'Unrecognized function: {expression[0][0]}') else: raise ExecutionError(f'Unsupported expression type: {expression[0]}') arguments = [self._execute_expression(arg) for arg in expression[0][1:]] try: if self._allow_composition and function == '*': function = self._create_composed_function(arguments[0], arguments[1]) function = function(*arguments) except (TypeError, ValueError): if self._allow_currying: curried_function = self._get_curried_function(function, arguments) if curried_function: function = curried_function traceback.print_exc() raise ExecutionError(f'Error executing expression {expression[0]} (see stderr for stack trace)') elif isinstance(expression[0], str): if expression[0] not in self._functions: raise ExecutionError(f'Unrecognized constant: {expression[0]}') if isinstance(self._function_types[expression[0]][0], FunctionType): function = self._functions[expression[0]] else: function = self._functions[expression[0]]() function = self._functions[expression[0]] else: raise ExecutionError('Not sure how you got here. Please open a github issue with details.') </DeepExtract> elif expression[0] in self._functions: function = self._functions[expression[0]] elif self._allow_composition and expression[0] == '*': function = '*' elif isinstance(expression[0], str): raise ExecutionError(f'Unrecognized function: {expression[0]}') else: raise ExecutionError(f'Unsupported expression type: {expression}') arguments = [self._execute_expression(arg) for arg in expression[1:]] try: if self._allow_composition and function == '*': return self._create_composed_function(arguments[0], arguments[1]) return function(*arguments) except (TypeError, ValueError): if self._allow_currying: <DeepExtract> signature = inspect.signature(function) parameters = signature.parameters if len(parameters) != len(arguments) + 1: curried_function = None missing_arg_index = 0 parameter_types = list(parameters.values()) for parameter in parameter_types: argument = arguments[missing_arg_index] if isinstance(argument, (list, set)): arg_type = infer_collection_type(argument) else: arg_type = type(argument) if parameter.annotation == arg_type: missing_arg_index += 1 if missing_arg_index == len(parameters) - 1: break else: break arg_type = parameter_types[missing_arg_index].annotation def curried_function(x: arg_type) -> signature.return_annotation: new_arguments = arguments[:missing_arg_index] + [x] + arguments[missing_arg_index:] curried_function = function(*new_arguments) curried_function = curried_function </DeepExtract> if curried_function: return curried_function traceback.print_exc() raise ExecutionError(f'Error executing expression {expression} (see stderr for stack trace)') elif isinstance(expression, str): if expression not in self._functions: raise ExecutionError(f'Unrecognized constant: {expression}') if isinstance(self._function_types[expression][0], FunctionType): return self._functions[expression] else: return self._functions[expression]() return self._functions[expression] else: raise ExecutionError('Not sure how you got here. Please open a github issue with details.')
def _execute_expression(self, expression: Any): """ This does the bulk of the work of executing a logical form, recursively executing a single expression. Basically, if the expression is a function we know about, we evaluate its arguments then call the function. If it's a list, we evaluate all elements of the list. If it's a constant (or a zero-argument function), we evaluate the constant. """ if isinstance(expression, list): if isinstance(expression[0], list): if isinstance(expression[0], list): if isinstance(expression[0][0], list): function = self._execute_expression(expression[0][0]) elif expression[0][0] in self._functions: function = self._functions[expression[0][0]] elif self._allow_composition and expression[0][0] == '*': function = '*' elif isinstance(expression[0][0], str): raise ExecutionError(f'Unrecognized function: {expression[0][0]}') else: raise ExecutionError(f'Unsupported expression type: {expression[0]}') arguments = [self._execute_expression(arg) for arg in expression[0][1:]] try: if self._allow_composition and function == '*': function = self._create_composed_function(arguments[0], arguments[1]) function = function(*arguments) except (TypeError, ValueError): if self._allow_currying: curried_function = self._get_curried_function(function, arguments) if curried_function: function = curried_function traceback.print_exc() raise ExecutionError(f'Error executing expression {expression[0]} (see stderr for stack trace)') elif isinstance(expression[0], str): if expression[0] not in self._functions: raise ExecutionError(f'Unrecognized constant: {expression[0]}') if isinstance(self._function_types[expression[0]][0], FunctionType): function = self._functions[expression[0]] else: function = self._functions[expression[0]]() function = self._functions[expression[0]] else: raise ExecutionError('Not sure how you got here. Please open a github issue with details.') elif expression[0] in self._functions: function = self._functions[expression[0]] elif self._allow_composition and expression[0] == '*': function = '*' elif isinstance(expression[0], str): raise ExecutionError(f'Unrecognized function: {expression[0]}') else: raise ExecutionError(f'Unsupported expression type: {expression}') arguments = [self._execute_expression(arg) for arg in expression[1:]] try: if self._allow_composition and function == '*': return self._create_composed_function(arguments[0], arguments[1]) return function(*arguments) except (TypeError, ValueError): if self._allow_currying: signature = inspect.signature(function) parameters = signature.parameters if len(parameters) != len(arguments) + 1: curried_function = None missing_arg_index = 0 parameter_types = list(parameters.values()) for parameter in parameter_types: argument = arguments[missing_arg_index] if isinstance(argument, (list, set)): arg_type = infer_collection_type(argument) else: arg_type = type(argument) if parameter.annotation == arg_type: missing_arg_index += 1 if missing_arg_index == len(parameters) - 1: break else: break arg_type = parameter_types[missing_arg_index].annotation def curried_function(x: arg_type) -> signature.return_annotation: new_arguments = arguments[:missing_arg_index] + [x] + arguments[missing_arg_index:] curried_function = function(*new_arguments) curried_function = curried_function if curried_function: return curried_function traceback.print_exc() raise ExecutionError(f'Error executing expression {expression} (see stderr for stack trace)') elif isinstance(expression, str): if expression not in self._functions: raise ExecutionError(f'Unrecognized constant: {expression}') if isinstance(self._function_types[expression][0], FunctionType): return self._functions[expression] else: return self._functions[expression]() return self._functions[expression] else: raise ExecutionError('Not sure how you got here. Please open a github issue with details.')
allennlp-semparse
positive
def cluster_ls(config: Config) -> None: for name in config.clusters: <DeepExtract> if name not in config.clusters: print('Unknown cluster', name) return 1 args = config.get(['clusters', name]) print(name) if name == config.default_cluster: print(' default') for (key, value) in args.items(): print(f' {key}: {value}') </DeepExtract> print()
def cluster_ls(config: Config) -> None: for name in config.clusters: if name not in config.clusters: print('Unknown cluster', name) return 1 args = config.get(['clusters', name]) print(name) if name == config.default_cluster: print(' default') for (key, value) in args.items(): print(f' {key}: {value}') print()
cowait
positive
def Show(**options): """Shows the plot. For options, see Config. options: keyword args used to invoke various pyplot functions """ clf = options.pop('clf', True) <DeepExtract> names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale', 'xticks', 'yticks', 'axis', 'xlim', 'ylim'] for name in names: if name in options: getattr(pyplot, name)(options[name]) loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3, 'lower right': 4, 'right': 5, 'center left': 6, 'center right': 7, 'lower center': 8, 'upper center': 9, 'center': 10} global LEGEND LEGEND = options.get('legend', LEGEND) if LEGEND: global LOC LOC = options.get('loc', LOC) pyplot.legend(loc=LOC) </DeepExtract> pyplot.show() if clf: <DeepExtract> global LOC LOC = None _Brewer.ClearIter() pyplot.clf() fig = pyplot.gcf() fig.set_size_inches(8, 6) </DeepExtract>
def Show(**options): """Shows the plot. For options, see Config. options: keyword args used to invoke various pyplot functions """ clf = options.pop('clf', True) names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale', 'xticks', 'yticks', 'axis', 'xlim', 'ylim'] for name in names: if name in options: getattr(pyplot, name)(options[name]) loc_dict = {'upper right': 1, 'upper left': 2, 'lower left': 3, 'lower right': 4, 'right': 5, 'center left': 6, 'center right': 7, 'lower center': 8, 'upper center': 9, 'center': 10} global LEGEND LEGEND = options.get('legend', LEGEND) if LEGEND: global LOC LOC = options.get('loc', LOC) pyplot.legend(loc=LOC) pyplot.show() if clf: global LOC LOC = None _Brewer.ClearIter() pyplot.clf() fig = pyplot.gcf() fig.set_size_inches(8, 6) </DeepExtract>
data-science-ipython-notebooks
positive
@torch.no_grad() def feed_data(self, data): """Accept data from dataloader, and then add two-order degradations to obtain LQ images. """ if self.is_train and self.opt.get('high_order_degradation', True): self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt) self.kernel1 = data['kernel1'].to(self.device) self.kernel2 = data['kernel2'].to(self.device) self.sinc_kernel = data['sinc_kernel'].to(self.device) (ori_h, ori_w) = self.gt.size()[2:4] out = filter2D(self.gt_usm, self.kernel1) updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, scale_factor=scale, mode=mode) gray_noise_prob = self.opt['gray_noise_prob'] if np.random.uniform() < self.opt['gaussian_noise_prob']: out = random_add_gaussian_noise_pt(out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt(out, scale_range=self.opt['poisson_scale_range'], gray_prob=gray_noise_prob, clip=True, rounds=False) jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) if np.random.uniform() < self.opt['second_blur_prob']: out = filter2D(out, self.kernel2) updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range2'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range2'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) gray_noise_prob = self.opt['gray_noise_prob2'] if np.random.uniform() < self.opt['gaussian_noise_prob2']: out = random_add_gaussian_noise_pt(out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt(out, scale_range=self.opt['poisson_scale_range2'], gray_prob=gray_noise_prob, clip=True, rounds=False) if np.random.uniform() < 0.5: mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) else: jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.0 gt_size = self.opt['gt_size'] ((self.gt, self.gt_usm), self.lq) = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, self.opt['scale']) <DeepExtract> (b, c, h, w) = self.lq.size() if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() (_, c, h, w) = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.lq = lq_dequeue self.gt = gt_dequeue else: self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_ptr = self.queue_ptr + b </DeepExtract> self.gt_usm = self.usm_sharpener(self.gt) self.lq = self.lq.contiguous() else: self.lq = data['lq'].to(self.device) if 'gt' in data: self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt)
@torch.no_grad() def feed_data(self, data): """Accept data from dataloader, and then add two-order degradations to obtain LQ images. """ if self.is_train and self.opt.get('high_order_degradation', True): self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt) self.kernel1 = data['kernel1'].to(self.device) self.kernel2 = data['kernel2'].to(self.device) self.sinc_kernel = data['sinc_kernel'].to(self.device) (ori_h, ori_w) = self.gt.size()[2:4] out = filter2D(self.gt_usm, self.kernel1) updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, scale_factor=scale, mode=mode) gray_noise_prob = self.opt['gray_noise_prob'] if np.random.uniform() < self.opt['gaussian_noise_prob']: out = random_add_gaussian_noise_pt(out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt(out, scale_range=self.opt['poisson_scale_range'], gray_prob=gray_noise_prob, clip=True, rounds=False) jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) if np.random.uniform() < self.opt['second_blur_prob']: out = filter2D(out, self.kernel2) updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] if updown_type == 'up': scale = np.random.uniform(1, self.opt['resize_range2'][1]) elif updown_type == 'down': scale = np.random.uniform(self.opt['resize_range2'][0], 1) else: scale = 1 mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) gray_noise_prob = self.opt['gray_noise_prob2'] if np.random.uniform() < self.opt['gaussian_noise_prob2']: out = random_add_gaussian_noise_pt(out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) else: out = random_add_poisson_noise_pt(out, scale_range=self.opt['poisson_scale_range2'], gray_prob=gray_noise_prob, clip=True, rounds=False) if np.random.uniform() < 0.5: mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) else: jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) out = torch.clamp(out, 0, 1) out = self.jpeger(out, quality=jpeg_p) mode = random.choice(['area', 'bilinear', 'bicubic']) out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) out = filter2D(out, self.sinc_kernel) self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.0 gt_size = self.opt['gt_size'] ((self.gt, self.gt_usm), self.lq) = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, self.opt['scale']) (b, c, h, w) = self.lq.size() if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() (_, c, h, w) = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.lq = lq_dequeue self.gt = gt_dequeue else: self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_ptr = self.queue_ptr + b self.gt_usm = self.usm_sharpener(self.gt) self.lq = self.lq.contiguous() else: self.lq = data['lq'].to(self.device) if 'gt' in data: self.gt = data['gt'].to(self.device) self.gt_usm = self.usm_sharpener(self.gt)
BasicSR
positive
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape)) unique_ids = features['unique_ids'] input_ids = features['input_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] is_training = mode == tf.estimator.ModeKeys.TRAIN <DeepExtract> model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable('cls/squad/output_weights', [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable('cls/squad/output_bias', [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) (start_logits, end_logits) = (start_logits, end_logits) </DeepExtract> tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assigment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: seq_length = modeling.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) return loss start_positions = features['start_positions'] end_positions = features['end_positions'] <DeepExtract> one_hot_positions = tf.one_hot(start_positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(start_logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) start_loss = loss </DeepExtract> <DeepExtract> one_hot_positions = tf.one_hot(end_positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(end_logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) end_loss = loss </DeepExtract> total_loss = (start_loss + end_loss) / 2.0 train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.PREDICT: predictions = {'unique_ids': unique_ids, 'start_logits': start_logits, 'end_logits': end_logits} output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError('Only TRAIN and PREDICT modes are supported: %s' % mode) return output_spec return model_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): """The `model_fn` for TPUEstimator.""" tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape)) unique_ids = features['unique_ids'] input_ids = features['input_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] is_training = mode == tf.estimator.ModeKeys.TRAIN model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] output_weights = tf.get_variable('cls/squad/output_weights', [2, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable('cls/squad/output_bias', [2], initializer=tf.zeros_initializer()) final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size]) logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [batch_size, seq_length, 2]) logits = tf.transpose(logits, [2, 0, 1]) unstacked_logits = tf.unstack(logits, axis=0) (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) (start_logits, end_logits) = (start_logits, end_logits) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assigment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if var.name in initialized_variable_names: init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: seq_length = modeling.get_shape_list(input_ids)[1] def compute_loss(logits, positions): one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) return loss start_positions = features['start_positions'] end_positions = features['end_positions'] one_hot_positions = tf.one_hot(start_positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(start_logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) start_loss = loss one_hot_positions = tf.one_hot(end_positions, depth=seq_length, dtype=tf.float32) log_probs = tf.nn.log_softmax(end_logits, axis=-1) loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) end_loss = loss total_loss = (start_loss + end_loss) / 2.0 train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.PREDICT: predictions = {'unique_ids': unique_ids, 'start_logits': start_logits, 'end_logits': end_logits} output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) else: raise ValueError('Only TRAIN and PREDICT modes are supported: %s' % mode) return output_spec return model_fn
DAPPLE
positive
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]): net = None <DeepExtract> if norm == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True) elif norm == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True) elif norm == 'none': norm_layer = None else: raise NotImplementedError('normalization layer [%s] is not found' % norm) norm_layer = norm_layer </DeepExtract> if netD == 'basic': net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid) elif netD == 'n_layers': net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid) elif netD == 'pixel': net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % net) return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', init_gain=0.02, gpu_ids=[]): net = None if norm == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True) elif norm == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True) elif norm == 'none': norm_layer = None else: raise NotImplementedError('normalization layer [%s] is not found' % norm) norm_layer = norm_layer if netD == 'basic': net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid) elif netD == 'n_layers': net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid) elif netD == 'pixel': net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % net) return init_net(net, init_type, init_gain, gpu_ids)
dualFace
positive
def get_word_with_pos(text, type): <DeepExtract> tagged = nltk.tag.pos_tag(text.split()) tagged = tagged </DeepExtract> words = [w for (w, pos) in tagged if pos == type] return words
def get_word_with_pos(text, type): tagged = nltk.tag.pos_tag(text.split()) tagged = tagged words = [w for (w, pos) in tagged if pos == type] return words
aurum-datadiscovery
positive
def execute_command(command: Union[str, List[str]], shell: bool=True, no_fail: bool=False, executable: Optional[str]=None) -> Tuple[Optional[list], Optional[list]]: """ Execute a command. Notes: If ``no_fail`` is ``True``, then a warning is logged and ``False`` is returned so that the calling function can debug the situation. Args: command (Union[str, List[str]]): An array of string commands to send. shell (bool, optional): Specifies whether the command should be executed using bash instead of Python. no_fail (bool, optional): If ``True`` then ARC will not crash if an error is encountered. executable (str, optional): Select a specific shell to run with, e.g., '/bin/bash'. Default shell of the subprocess command is '/bin/sh'. Returns: Tuple[list, list]: - A list of lines of standard output stream. - A list of lines of the standard error stream. """ error = None if not isinstance(command, list): command = [command] command = [' && '.join(command)] (i, max_times_to_try) = (1, 30) sleep_time = 60 while i < max_times_to_try: try: if executable is None: completed_process = subprocess.run(command, shell=shell, capture_output=True) else: completed_process = subprocess.run(command, shell=shell, capture_output=True, executable=executable) return (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr)) except subprocess.CalledProcessError as e: error = e if no_fail: <DeepExtract> logger.warning('The server command is erroneous.') logger.warning(f'Tried to submit the following command:\n{command}') logger.warning('And got the following status (cmd, message, output, return code)') logger.warning(e.cmd) logger.info('\n') logger.warning(e) logger.info('\n') logger.warning(e.output) logger.info('\n') logger.warning(e.returncode) </DeepExtract> return (None, None) else: <DeepExtract> logger.error('The server command is erroneous.') logger.error(f'Tried to submit the following command:\n{command}') logger.error('And got the following status (cmd, message, output, return code)') logger.error(e.cmd) logger.info('\n') logger.error(e) logger.info('\n') logger.error(e.output) logger.info('\n') logger.error(e.returncode) </DeepExtract> logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.') logger.info('ZZZZZ..... ZZZZZ.....') time.sleep(sleep_time * i) i += 1 raise SettingsError(f'The command "{command}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
def execute_command(command: Union[str, List[str]], shell: bool=True, no_fail: bool=False, executable: Optional[str]=None) -> Tuple[Optional[list], Optional[list]]: """ Execute a command. Notes: If ``no_fail`` is ``True``, then a warning is logged and ``False`` is returned so that the calling function can debug the situation. Args: command (Union[str, List[str]]): An array of string commands to send. shell (bool, optional): Specifies whether the command should be executed using bash instead of Python. no_fail (bool, optional): If ``True`` then ARC will not crash if an error is encountered. executable (str, optional): Select a specific shell to run with, e.g., '/bin/bash'. Default shell of the subprocess command is '/bin/sh'. Returns: Tuple[list, list]: - A list of lines of standard output stream. - A list of lines of the standard error stream. """ error = None if not isinstance(command, list): command = [command] command = [' && '.join(command)] (i, max_times_to_try) = (1, 30) sleep_time = 60 while i < max_times_to_try: try: if executable is None: completed_process = subprocess.run(command, shell=shell, capture_output=True) else: completed_process = subprocess.run(command, shell=shell, capture_output=True, executable=executable) return (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr)) except subprocess.CalledProcessError as e: error = e if no_fail: logger.warning('The server command is erroneous.') logger.warning(f'Tried to submit the following command:\n{command}') logger.warning('And got the following status (cmd, message, output, return code)') logger.warning(e.cmd) logger.info('\n') logger.warning(e) logger.info('\n') logger.warning(e.output) logger.info('\n') logger.warning(e.returncode) return (None, None) else: logger.error('The server command is erroneous.') logger.error(f'Tried to submit the following command:\n{command}') logger.error('And got the following status (cmd, message, output, return code)') logger.error(e.cmd) logger.info('\n') logger.error(e) logger.info('\n') logger.error(e.output) logger.info('\n') logger.error(e.returncode) logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.') logger.info('ZZZZZ..... ZZZZZ.....') time.sleep(sleep_time * i) i += 1 raise SettingsError(f'The command "{command}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
ARC
positive
def array_to_binary_tree(array, start, end): if start > end: return None mid = (start + end) // 2 root = Node(array[mid]) <DeepExtract> if start > mid - 1: root.left = None mid = (start + mid - 1) // 2 root = Node(array[mid]) root.left = array_to_binary_tree(array, start, mid - 1) root.right = array_to_binary_tree(array, mid + 1, mid - 1) root.left = root </DeepExtract> <DeepExtract> if mid + 1 > end: root.right = None mid = (mid + 1 + end) // 2 root = Node(array[mid]) root.left = array_to_binary_tree(array, mid + 1, mid - 1) root.right = array_to_binary_tree(array, mid + 1, end) root.right = root </DeepExtract> return root
def array_to_binary_tree(array, start, end): if start > end: return None mid = (start + end) // 2 root = Node(array[mid]) if start > mid - 1: root.left = None mid = (start + mid - 1) // 2 root = Node(array[mid]) root.left = array_to_binary_tree(array, start, mid - 1) root.right = array_to_binary_tree(array, mid + 1, mid - 1) root.left = root if mid + 1 > end: root.right = None mid = (mid + 1 + end) // 2 root = Node(array[mid]) root.left = array_to_binary_tree(array, mid + 1, mid - 1) root.right = array_to_binary_tree(array, mid + 1, end) root.right = root return root
CtCI-6th-Edition-Python
positive
def parse_amount(x, assumed_currency=None): """Parses a number and currency.""" if not x: return None <DeepExtract> x = x.strip() m = re.fullmatch('^(-|\\+)(.*)$', x) if m is not None: sign = -1 if m.group(1) == '-' else 1 (sign, amount_str) = (sign, m.group(2).strip()) (sign, amount_str) = parse_negative_parentheses(x) </DeepExtract> m = re.fullmatch('(?:[(][^)]+[)])?\\s*([\\$€£]|[A-Z]{3})?\\s*((?:[0-9](?:,?[0-9])*|(?=\\.))(?:\\.[0-9]+)?)(?:\\s+([\\$€£]|[A-Z]{3}))?', amount_str) if m is None: raise ValueError('Failed to parse amount from %r' % amount_str) if m.group(1): if len(m.group(1)) == 3: currency = m.group(1) else: currency = {'$': 'USD', '€': 'EUR', '£': 'GBP'}[m.group(1)] elif m.group(3): if len(m.group(3)) == 3: currency = m.group(3) else: currency = {'$': 'USD', '€': 'EUR', '£': 'GBP'}[m.group(3)] elif assumed_currency is not None: currency = assumed_currency else: raise ValueError('Failed to determine currency from %r' % amount_str) number = D(m.group(2)) return Amount(number * sign, currency)
def parse_amount(x, assumed_currency=None): """Parses a number and currency.""" if not x: return None x = x.strip() m = re.fullmatch('^(-|\\+)(.*)$', x) if m is not None: sign = -1 if m.group(1) == '-' else 1 (sign, amount_str) = (sign, m.group(2).strip()) (sign, amount_str) = parse_negative_parentheses(x) m = re.fullmatch('(?:[(][^)]+[)])?\\s*([\\$€£]|[A-Z]{3})?\\s*((?:[0-9](?:,?[0-9])*|(?=\\.))(?:\\.[0-9]+)?)(?:\\s+([\\$€£]|[A-Z]{3}))?', amount_str) if m is None: raise ValueError('Failed to parse amount from %r' % amount_str) if m.group(1): if len(m.group(1)) == 3: currency = m.group(1) else: currency = {'$': 'USD', '€': 'EUR', '£': 'GBP'}[m.group(1)] elif m.group(3): if len(m.group(3)) == 3: currency = m.group(3) else: currency = {'$': 'USD', '€': 'EUR', '£': 'GBP'}[m.group(3)] elif assumed_currency is not None: currency = assumed_currency else: raise ValueError('Failed to determine currency from %r' % amount_str) number = D(m.group(2)) return Amount(number * sign, currency)
beancount-import
positive
def fit(self, X, y=None): X = process_dataframe(X, False) self.column2dict = {column: Dictionary(X[column]) for column in X.columns} self.column2transformer = {} for column in X.columns: klass = import_by_package_url(self.transformer_package) kwargs = gather_kwargs_from_signature_and_attributes(klass, self) kwargs.update({'id2word': self.column2dict[column]}) transformer = klass(**kwargs) <DeepExtract> dic = self.column2dict[column] bows = [dic.doc2bow(item) for item in X[column]] bows = bows </DeepExtract> self.column2transformer[column] = transformer.fit(bows) return self
def fit(self, X, y=None): X = process_dataframe(X, False) self.column2dict = {column: Dictionary(X[column]) for column in X.columns} self.column2transformer = {} for column in X.columns: klass = import_by_package_url(self.transformer_package) kwargs = gather_kwargs_from_signature_and_attributes(klass, self) kwargs.update({'id2word': self.column2dict[column]}) transformer = klass(**kwargs) dic = self.column2dict[column] bows = [dic.doc2bow(item) for item in X[column]] bows = bows self.column2transformer[column] = transformer.fit(bows) return self
auto-flow
positive
def list(self, request, **kwargs): <DeepExtract> app = self.get_queryset()[0] </DeepExtract> perm_name = 'api.{}'.format(self.perm) usernames = [u.username for u in get_users_with_perms(app) if u.has_perm(perm_name, app)] return Response({'users': usernames})
def list(self, request, **kwargs): app = self.get_queryset()[0] perm_name = 'api.{}'.format(self.perm) usernames = [u.username for u in get_users_with_perms(app) if u.has_perm(perm_name, app)] return Response({'users': usernames})
controller
positive
def __str__(self): <DeepExtract> for k in self.__dict__: if isinstance(self.__dict__[k], np.float32): self.__dict__[k] = np.float64(self.__dict__[k]) </DeepExtract> return self.formatter.format(self)
def __str__(self): for k in self.__dict__: if isinstance(self.__dict__[k], np.float32): self.__dict__[k] = np.float64(self.__dict__[k]) return self.formatter.format(self)
Aegean
positive
def make_bstable(self): """ This function calculates bi-spectra from full complex visibility data. It will output uvdata.BSTable object. Args: N/A Returns: uvdata.BSTable object """ import multiprocessing as mp Ndata = len(self['u']) timetag = [] for i in np.arange(Ndata): timetag.append('%04d-%03d-%02d-%02d-%5.2f_%d' % (self.loc[i, 'year'], self.loc[i, 'doy'], self.loc[i, 'hour'], self.loc[i, 'min'], self.loc[i, 'sec'], self.loc[i, 'ch'])) timetag = np.asarray(timetag) timetagset = sorted(set(timetag)) Ntt = len(timetagset) bstable = {} for column in BSTable.bstable_columns: if column in ['uvdistave', 'uvdistmax', 'uvdistmin', 'uvdist12', 'uvdist23', 'uvdist31']: continue bstable[column] = [] for itt in np.arange(Ntt): idx = timetag == timetagset[itt] sts = self.loc[idx, 'st1'].tolist() + self.loc[idx, 'st2'].tolist() sts = sorted(set(sts)) Nsts = len(sts) if Nsts < 3: continue stsid = np.arange(Nsts) Ntrimax = (Nsts - 1) * (Nsts - 2) / 2 Nbl = Nsts * (Nsts - 1) / 2 rank = 0 Ntri = 0 matrix = None for (stid1, stid2, stid3) in itertools.combinations(stsid, 3): if Ntri >= Ntrimax: break st1 = sts[stid1] st2 = sts[stid2] st3 = sts[stid3] <DeepExtract> stmin = np.min([stid1, stid2]) stmax = np.max([stid1, stid2]) blid1 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 </DeepExtract> <DeepExtract> stmin = np.min([stid2, stid3]) stmax = np.max([stid2, stid3]) blid2 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 </DeepExtract> <DeepExtract> stmin = np.min([stid1, stid3]) stmax = np.max([stid1, stid3]) blid3 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 </DeepExtract> row = np.zeros(Nbl) row[blid1] = 1 row[blid2] = 1 row[blid3] = -1 if matrix is None: tmpmatrix = np.asarray([row]) else: tmpmatrix = np.append(matrix, row).reshape(Ntri + 1, Nbl) tmprank = np.linalg.matrix_rank(tmpmatrix) if rank == tmprank: continue isbl1 = True isbl2 = True isbl3 = True bl1idx = idx & (self['st1'] == st1) & (self['st2'] == st2) bl2idx = idx & (self['st1'] == st2) & (self['st2'] == st3) bl3idx = idx & (self['st1'] == st1) & (self['st2'] == st3) if np.where(bl1idx)[0].shape[0] == 0: isbl1 = False if np.where(bl2idx)[0].shape[0] == 0: isbl2 = False if np.where(bl3idx)[0].shape[0] == 0: isbl3 = False if False in [isbl1, isbl2, isbl3]: continue bl1data = self.loc[bl1idx, :].reset_index(drop=True).loc[0, :] bl2data = self.loc[bl2idx, :].reset_index(drop=True).loc[0, :] bl3data = self.loc[bl3idx, :].reset_index(drop=True).loc[0, :] amp = bl1data.loc['amp'] * bl2data.loc['amp'] * bl3data.loc['amp'] phase = bl1data.loc['phase'] + bl2data.loc['phase'] - bl3data.loc['phase'] ratio_12 = bl1data.loc['sigma'] / bl1data.loc['amp'] ratio_23 = bl2data.loc['sigma'] / bl2data.loc['amp'] ratio_13 = bl3data.loc['sigma'] / bl3data.loc['amp'] sigma = amp * np.sqrt(ratio_12 ** 2 + ratio_23 ** 2 + ratio_13 ** 2) bstable['jd'].append(bl1data.loc['jd']) bstable['year'].append(bl1data.loc['year']) bstable['doy'].append(bl1data.loc['doy']) bstable['hour'].append(bl1data.loc['hour']) bstable['min'].append(bl1data.loc['min']) bstable['sec'].append(bl1data.loc['sec']) bstable['freq'].append(bl1data.loc['freq']) bstable['stokesid'].append(bl1data.loc['stokesid']) bstable['bandid'].append(bl1data.loc['bandid']) bstable['ifid'].append(bl1data.loc['ifid']) bstable['ch'].append(bl1data.loc['ch']) bstable['u12'].append(bl1data.loc['u']) bstable['v12'].append(bl1data.loc['v']) bstable['w12'].append(bl1data.loc['w']) bstable['u23'].append(bl2data.loc['u']) bstable['v23'].append(bl2data.loc['v']) bstable['w23'].append(bl2data.loc['w']) bstable['u31'].append(-bl3data.loc['u']) bstable['v31'].append(-bl3data.loc['v']) bstable['w31'].append(-bl3data.loc['w']) bstable['st1'].append(st1) bstable['st2'].append(st2) bstable['st3'].append(st3) bstable['amp'].append(amp) bstable['phase'].append(phase) bstable['sigma'].append(sigma) rank += 1 Ntri += 1 matrix = tmpmatrix.copy() bstable = BSTable(bstable) bstable['uvdist12'] = np.sqrt(np.square(bstable['u12']) + np.square(bstable['v12'])) bstable['uvdist23'] = np.sqrt(np.square(bstable['u23']) + np.square(bstable['v23'])) bstable['uvdist31'] = np.sqrt(np.square(bstable['u31']) + np.square(bstable['v31'])) bstable['uvdistave'] = bstable['uvdist12'] bstable['uvdistmin'] = bstable['uvdist12'] bstable['uvdistmax'] = bstable['uvdist12'] for i in np.arange(len(bstable['uvdist12'])): uvdists = bstable.loc[i, ['uvdist12', 'uvdist23', 'uvdist31']] bstable.loc[i, 'uvdistave'] = np.mean(uvdists) bstable.loc[i, 'uvdistmax'] = np.max(uvdists) bstable.loc[i, 'uvdistmin'] = np.min(uvdists) bstable = bstable[BSTable.bstable_columns] for i in np.arange(len(BSTable.bstable_columns)): column = BSTable.bstable_columns[i] bstable[column] = BSTable.bstable_types[i](bstable[column]) bstable['phase'] *= np.pi / 180.0 bstable['phase'] = np.arctan2(np.sin(bstable['phase']), np.cos(bstable['phase'])) * 180.0 / np.pi return bstable
def make_bstable(self): """ This function calculates bi-spectra from full complex visibility data. It will output uvdata.BSTable object. Args: N/A Returns: uvdata.BSTable object """ import multiprocessing as mp Ndata = len(self['u']) timetag = [] for i in np.arange(Ndata): timetag.append('%04d-%03d-%02d-%02d-%5.2f_%d' % (self.loc[i, 'year'], self.loc[i, 'doy'], self.loc[i, 'hour'], self.loc[i, 'min'], self.loc[i, 'sec'], self.loc[i, 'ch'])) timetag = np.asarray(timetag) timetagset = sorted(set(timetag)) Ntt = len(timetagset) bstable = {} for column in BSTable.bstable_columns: if column in ['uvdistave', 'uvdistmax', 'uvdistmin', 'uvdist12', 'uvdist23', 'uvdist31']: continue bstable[column] = [] for itt in np.arange(Ntt): idx = timetag == timetagset[itt] sts = self.loc[idx, 'st1'].tolist() + self.loc[idx, 'st2'].tolist() sts = sorted(set(sts)) Nsts = len(sts) if Nsts < 3: continue stsid = np.arange(Nsts) Ntrimax = (Nsts - 1) * (Nsts - 2) / 2 Nbl = Nsts * (Nsts - 1) / 2 rank = 0 Ntri = 0 matrix = None for (stid1, stid2, stid3) in itertools.combinations(stsid, 3): if Ntri >= Ntrimax: break st1 = sts[stid1] st2 = sts[stid2] st3 = sts[stid3] stmin = np.min([stid1, stid2]) stmax = np.max([stid1, stid2]) blid1 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 stmin = np.min([stid2, stid3]) stmax = np.max([stid2, stid3]) blid2 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 stmin = np.min([stid1, stid3]) stmax = np.max([stid1, stid3]) blid3 = stmin * Nsts - stmin * (stmin + 1) / 2 + stmax - stmin - 1 row = np.zeros(Nbl) row[blid1] = 1 row[blid2] = 1 row[blid3] = -1 if matrix is None: tmpmatrix = np.asarray([row]) else: tmpmatrix = np.append(matrix, row).reshape(Ntri + 1, Nbl) tmprank = np.linalg.matrix_rank(tmpmatrix) if rank == tmprank: continue isbl1 = True isbl2 = True isbl3 = True bl1idx = idx & (self['st1'] == st1) & (self['st2'] == st2) bl2idx = idx & (self['st1'] == st2) & (self['st2'] == st3) bl3idx = idx & (self['st1'] == st1) & (self['st2'] == st3) if np.where(bl1idx)[0].shape[0] == 0: isbl1 = False if np.where(bl2idx)[0].shape[0] == 0: isbl2 = False if np.where(bl3idx)[0].shape[0] == 0: isbl3 = False if False in [isbl1, isbl2, isbl3]: continue bl1data = self.loc[bl1idx, :].reset_index(drop=True).loc[0, :] bl2data = self.loc[bl2idx, :].reset_index(drop=True).loc[0, :] bl3data = self.loc[bl3idx, :].reset_index(drop=True).loc[0, :] amp = bl1data.loc['amp'] * bl2data.loc['amp'] * bl3data.loc['amp'] phase = bl1data.loc['phase'] + bl2data.loc['phase'] - bl3data.loc['phase'] ratio_12 = bl1data.loc['sigma'] / bl1data.loc['amp'] ratio_23 = bl2data.loc['sigma'] / bl2data.loc['amp'] ratio_13 = bl3data.loc['sigma'] / bl3data.loc['amp'] sigma = amp * np.sqrt(ratio_12 ** 2 + ratio_23 ** 2 + ratio_13 ** 2) bstable['jd'].append(bl1data.loc['jd']) bstable['year'].append(bl1data.loc['year']) bstable['doy'].append(bl1data.loc['doy']) bstable['hour'].append(bl1data.loc['hour']) bstable['min'].append(bl1data.loc['min']) bstable['sec'].append(bl1data.loc['sec']) bstable['freq'].append(bl1data.loc['freq']) bstable['stokesid'].append(bl1data.loc['stokesid']) bstable['bandid'].append(bl1data.loc['bandid']) bstable['ifid'].append(bl1data.loc['ifid']) bstable['ch'].append(bl1data.loc['ch']) bstable['u12'].append(bl1data.loc['u']) bstable['v12'].append(bl1data.loc['v']) bstable['w12'].append(bl1data.loc['w']) bstable['u23'].append(bl2data.loc['u']) bstable['v23'].append(bl2data.loc['v']) bstable['w23'].append(bl2data.loc['w']) bstable['u31'].append(-bl3data.loc['u']) bstable['v31'].append(-bl3data.loc['v']) bstable['w31'].append(-bl3data.loc['w']) bstable['st1'].append(st1) bstable['st2'].append(st2) bstable['st3'].append(st3) bstable['amp'].append(amp) bstable['phase'].append(phase) bstable['sigma'].append(sigma) rank += 1 Ntri += 1 matrix = tmpmatrix.copy() bstable = BSTable(bstable) bstable['uvdist12'] = np.sqrt(np.square(bstable['u12']) + np.square(bstable['v12'])) bstable['uvdist23'] = np.sqrt(np.square(bstable['u23']) + np.square(bstable['v23'])) bstable['uvdist31'] = np.sqrt(np.square(bstable['u31']) + np.square(bstable['v31'])) bstable['uvdistave'] = bstable['uvdist12'] bstable['uvdistmin'] = bstable['uvdist12'] bstable['uvdistmax'] = bstable['uvdist12'] for i in np.arange(len(bstable['uvdist12'])): uvdists = bstable.loc[i, ['uvdist12', 'uvdist23', 'uvdist31']] bstable.loc[i, 'uvdistave'] = np.mean(uvdists) bstable.loc[i, 'uvdistmax'] = np.max(uvdists) bstable.loc[i, 'uvdistmin'] = np.min(uvdists) bstable = bstable[BSTable.bstable_columns] for i in np.arange(len(BSTable.bstable_columns)): column = BSTable.bstable_columns[i] bstable[column] = BSTable.bstable_types[i](bstable[column]) bstable['phase'] *= np.pi / 180.0 bstable['phase'] = np.arctan2(np.sin(bstable['phase']), np.cos(bstable['phase'])) * 180.0 / np.pi return bstable
eat
positive
def test_unconfirmed_and_collaborative(self): <DeepExtract> self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=True) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) </DeepExtract> <DeepExtract> file = export_dataset(self.project.id, 'JSONL', confirmed_only) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) dataset = dataset </DeepExtract> expected_dataset = [{**self.data1, 'label': [list(self.span1.to_tuple()), list(self.span2.to_tuple())], 'Comments': sorted([self.comment1.to_string(), self.comment2.to_string()])}, {**self.data2, 'label': [], 'Comments': []}] self.assertEqual(dataset, expected_dataset)
def test_unconfirmed_and_collaborative(self): self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=True) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) file = export_dataset(self.project.id, 'JSONL', confirmed_only) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) dataset = dataset expected_dataset = [{**self.data1, 'label': [list(self.span1.to_tuple()), list(self.span2.to_tuple())], 'Comments': sorted([self.comment1.to_string(), self.comment2.to_string()])}, {**self.data2, 'label': [], 'Comments': []}] self.assertEqual(dataset, expected_dataset)
doccano
positive
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon): """Batch normalization on `input_layer` without tf.layers.""" shape = input_layer.shape num_channels = shape[3] if self.data_format == 'NHWC' else shape[1] <DeepExtract> var = tf.get_variable('beta', [num_channels], tf.float32, *args, **kwargs) beta = tf.cast(var, tf.float32) </DeepExtract> if use_scale: <DeepExtract> var = tf.get_variable('gamma', [num_channels], tf.float32, *args, **kwargs) gamma = tf.cast(var, tf.float32) </DeepExtract> else: gamma = tf.constant(1.0, tf.float32, [num_channels]) moving_mean = tf.get_variable('moving_mean', [num_channels], tf.float32, initializer=tf.zeros_initializer(), trainable=False) moving_variance = tf.get_variable('moving_variance', [num_channels], tf.float32, initializer=tf.ones_initializer(), trainable=False) if self.phase_train: (bn, batch_mean, batch_variance) = tf.nn.fused_batch_norm(input_layer, gamma, beta, epsilon=epsilon, data_format=self.data_format, is_training=True) mean_update = moving_averages.assign_moving_average(moving_mean, batch_mean, decay=decay, zero_debias=False) variance_update = moving_averages.assign_moving_average(moving_variance, batch_variance, decay=decay, zero_debias=False) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update) else: (bn, _, _) = tf.nn.fused_batch_norm(input_layer, gamma, beta, mean=moving_mean, variance=moving_variance, epsilon=epsilon, data_format=self.data_format, is_training=False) return bn
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon): """Batch normalization on `input_layer` without tf.layers.""" shape = input_layer.shape num_channels = shape[3] if self.data_format == 'NHWC' else shape[1] var = tf.get_variable('beta', [num_channels], tf.float32, *args, **kwargs) beta = tf.cast(var, tf.float32) if use_scale: var = tf.get_variable('gamma', [num_channels], tf.float32, *args, **kwargs) gamma = tf.cast(var, tf.float32) else: gamma = tf.constant(1.0, tf.float32, [num_channels]) moving_mean = tf.get_variable('moving_mean', [num_channels], tf.float32, initializer=tf.zeros_initializer(), trainable=False) moving_variance = tf.get_variable('moving_variance', [num_channels], tf.float32, initializer=tf.ones_initializer(), trainable=False) if self.phase_train: (bn, batch_mean, batch_variance) = tf.nn.fused_batch_norm(input_layer, gamma, beta, epsilon=epsilon, data_format=self.data_format, is_training=True) mean_update = moving_averages.assign_moving_average(moving_mean, batch_mean, decay=decay, zero_debias=False) variance_update = moving_averages.assign_moving_average(moving_variance, batch_variance, decay=decay, zero_debias=False) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update) else: (bn, _, _) = tf.nn.fused_batch_norm(input_layer, gamma, beta, mean=moving_mean, variance=moving_variance, epsilon=epsilon, data_format=self.data_format, is_training=False) return bn
deeplearning-benchmark
positive
def receiveMessage(self, currentTime, msg): super().receiveMessage(currentTime, msg) if self.state == 'AWAITING_SPREAD': if msg.body['msg'] == 'QUERY_SPREAD': if self.mkt_closed: return <DeepExtract> r_T = self.updateEstimates() (bid, bid_vol, ask, ask_vol) = self.getKnownBidAsk(self.symbol) if bid and ask: mid = int((ask + bid) / 2) spread = abs(ask - bid) if np.random.rand() < self.percent_aggr: adjust_int = 0 else: adjust_int = np.random.randint(0, self.depth_spread * spread) if r_T < mid: buy = False p = bid + adjust_int elif r_T >= mid: buy = True p = ask - adjust_int else: buy = np.random.randint(0, 1 + 1) p = r_T self.placeLimitOrder(self.symbol, self.size, buy, p) </DeepExtract> self.state = 'AWAITING_WAKEUP'
def receiveMessage(self, currentTime, msg): super().receiveMessage(currentTime, msg) if self.state == 'AWAITING_SPREAD': if msg.body['msg'] == 'QUERY_SPREAD': if self.mkt_closed: return r_T = self.updateEstimates() (bid, bid_vol, ask, ask_vol) = self.getKnownBidAsk(self.symbol) if bid and ask: mid = int((ask + bid) / 2) spread = abs(ask - bid) if np.random.rand() < self.percent_aggr: adjust_int = 0 else: adjust_int = np.random.randint(0, self.depth_spread * spread) if r_T < mid: buy = False p = bid + adjust_int elif r_T >= mid: buy = True p = ask - adjust_int else: buy = np.random.randint(0, 1 + 1) p = r_T self.placeLimitOrder(self.symbol, self.size, buy, p) self.state = 'AWAITING_WAKEUP'
abides
positive
def __config_gpu_for_caffe(): import os <DeepExtract> parser = argparse.ArgumentParser() parser.add_argument('-c', '--gpu_core_id', default='-1', type=int) args = parser.parse_args() gpu_core_id = args.gpu_core_id gpu_core_id = gpu_core_id </DeepExtract> os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_core_id) const.GPU_CORE_ID = gpu_core_id
def __config_gpu_for_caffe(): import os parser = argparse.ArgumentParser() parser.add_argument('-c', '--gpu_core_id', default='-1', type=int) args = parser.parse_args() gpu_core_id = args.gpu_core_id gpu_core_id = gpu_core_id os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_core_id) const.GPU_CORE_ID = gpu_core_id
deep-smoke-machine
positive
def _get_ground_truth(self, locations, gt_instances): num_loc_list = [len(loc) for loc in locations] loc_to_size_range = [] for (l, loc_per_level) in enumerate(locations): loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l]) loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1)) loc_to_size_range = torch.cat(loc_to_size_range, dim=0) locations = torch.cat(locations, dim=0) <DeepExtract> labels = [] reg_targets = [] target_inds = [] (xs, ys) = (locations[:, 0], locations[:, 1]) num_targets = 0 for im_i in range(len(gt_instances)): targets_per_im = gt_instances[im_i] bboxes = targets_per_im.gt_boxes.tensor labels_per_im = targets_per_im.gt_classes if bboxes.numel() == 0: labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes) reg_targets.append(locations.new_zeros((locations.size(0), 4))) target_inds.append(labels_per_im.new_zeros(locations.size(0)) - 1) continue area = targets_per_im.gt_boxes.area() l = xs[:, None] - bboxes[:, 0][None] t = ys[:, None] - bboxes[:, 1][None] r = bboxes[:, 2][None] - xs[:, None] b = bboxes[:, 3][None] - ys[:, None] reg_targets_per_im = torch.stack([l, t, r, b], dim=2) if self.center_sample: if targets_per_im.has('gt_bitmasks_full'): bitmasks = targets_per_im.gt_bitmasks_full else: bitmasks = None is_in_boxes = self.get_sample_region(bboxes, self.strides, num_loc_list, xs, ys, bitmasks=bitmasks, radius=self.radius) else: is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0 max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0] is_cared_in_the_level = (max_reg_targets_per_im >= loc_to_size_range[:, [0]]) & (max_reg_targets_per_im <= loc_to_size_range[:, [1]]) locations_to_gt_area = area[None].repeat(len(locations), 1) locations_to_gt_area[is_in_boxes == 0] = INF locations_to_gt_area[is_cared_in_the_level == 0] = INF (locations_to_min_area, locations_to_gt_inds) = locations_to_gt_area.min(dim=1) reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds] target_inds_per_im = locations_to_gt_inds + num_targets num_targets += len(targets_per_im) labels_per_im = labels_per_im[locations_to_gt_inds] labels_per_im[locations_to_min_area == INF] = self.num_classes labels.append(labels_per_im) reg_targets.append(reg_targets_per_im) target_inds.append(target_inds_per_im) training_targets = {'labels': labels, 'reg_targets': reg_targets, 'target_inds': target_inds} </DeepExtract> training_targets['locations'] = [locations.clone() for _ in range(len(gt_instances))] training_targets['im_inds'] = [locations.new_ones(locations.size(0), dtype=torch.long) * i for i in range(len(gt_instances))] training_targets = {k: self._transpose(v, num_loc_list) for (k, v) in training_targets.items()} training_targets['fpn_levels'] = [loc.new_ones(len(loc), dtype=torch.long) * level for (level, loc) in enumerate(training_targets['locations'])] reg_targets = training_targets['reg_targets'] for l in range(len(reg_targets)): reg_targets[l] = reg_targets[l] / float(self.strides[l]) return training_targets
def _get_ground_truth(self, locations, gt_instances): num_loc_list = [len(loc) for loc in locations] loc_to_size_range = [] for (l, loc_per_level) in enumerate(locations): loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l]) loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1)) loc_to_size_range = torch.cat(loc_to_size_range, dim=0) locations = torch.cat(locations, dim=0) labels = [] reg_targets = [] target_inds = [] (xs, ys) = (locations[:, 0], locations[:, 1]) num_targets = 0 for im_i in range(len(gt_instances)): targets_per_im = gt_instances[im_i] bboxes = targets_per_im.gt_boxes.tensor labels_per_im = targets_per_im.gt_classes if bboxes.numel() == 0: labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes) reg_targets.append(locations.new_zeros((locations.size(0), 4))) target_inds.append(labels_per_im.new_zeros(locations.size(0)) - 1) continue area = targets_per_im.gt_boxes.area() l = xs[:, None] - bboxes[:, 0][None] t = ys[:, None] - bboxes[:, 1][None] r = bboxes[:, 2][None] - xs[:, None] b = bboxes[:, 3][None] - ys[:, None] reg_targets_per_im = torch.stack([l, t, r, b], dim=2) if self.center_sample: if targets_per_im.has('gt_bitmasks_full'): bitmasks = targets_per_im.gt_bitmasks_full else: bitmasks = None is_in_boxes = self.get_sample_region(bboxes, self.strides, num_loc_list, xs, ys, bitmasks=bitmasks, radius=self.radius) else: is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0 max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0] is_cared_in_the_level = (max_reg_targets_per_im >= loc_to_size_range[:, [0]]) & (max_reg_targets_per_im <= loc_to_size_range[:, [1]]) locations_to_gt_area = area[None].repeat(len(locations), 1) locations_to_gt_area[is_in_boxes == 0] = INF locations_to_gt_area[is_cared_in_the_level == 0] = INF (locations_to_min_area, locations_to_gt_inds) = locations_to_gt_area.min(dim=1) reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds] target_inds_per_im = locations_to_gt_inds + num_targets num_targets += len(targets_per_im) labels_per_im = labels_per_im[locations_to_gt_inds] labels_per_im[locations_to_min_area == INF] = self.num_classes labels.append(labels_per_im) reg_targets.append(reg_targets_per_im) target_inds.append(target_inds_per_im) training_targets = {'labels': labels, 'reg_targets': reg_targets, 'target_inds': target_inds} training_targets['locations'] = [locations.clone() for _ in range(len(gt_instances))] training_targets['im_inds'] = [locations.new_ones(locations.size(0), dtype=torch.long) * i for i in range(len(gt_instances))] training_targets = {k: self._transpose(v, num_loc_list) for (k, v) in training_targets.items()} training_targets['fpn_levels'] = [loc.new_ones(len(loc), dtype=torch.long) * level for (level, loc) in enumerate(training_targets['locations'])] reg_targets = training_targets['reg_targets'] for l in range(len(reg_targets)): reg_targets[l] = reg_targets[l] / float(self.strides[l]) return training_targets
AdelaiDet
positive
@pytest.mark.xfail(reason='Need to implement rank/lead/lag window functions, see https://github.com/dask-contrib/dask-sql/issues/878') def test_window_lead_lag_partition_by(): <DeepExtract> np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=100) elif dt is bool: s = np.where(np.random.randint(2, size=100), True, False) elif dt is float: s = np.random.rand(100) elif dt is str: r = [f'ssssss{x}' for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([r[x] for x in (str, 50)]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([r[x] for x in (str, 50)]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([rt[x] for x in (str, 50)]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(100, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps float = pd.DataFrame(data) </DeepExtract> <DeepExtract> c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n SELECT\n LEAD(b,1,10) OVER (PARTITION BY c ORDER BY a) AS a3,\n LEAD(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS a5,\n\n LAG(b,1) OVER (PARTITION BY c ORDER BY a) AS b3,\n LAG(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS b5\n FROM a\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n SELECT\n LEAD(b,1,10) OVER (PARTITION BY c ORDER BY a) AS a3,\n LEAD(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS a5,\n\n LAG(b,1) OVER (PARTITION BY c ORDER BY a) AS b3,\n LAG(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS b5\n FROM a\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
@pytest.mark.xfail(reason='Need to implement rank/lead/lag window functions, see https://github.com/dask-contrib/dask-sql/issues/878') def test_window_lead_lag_partition_by(): np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=100) elif dt is bool: s = np.where(np.random.randint(2, size=100), True, False) elif dt is float: s = np.random.rand(100) elif dt is str: r = [f'ssssss{x}' for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([r[x] for x in (str, 50)]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([r[x] for x in (str, 50)]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] (str, 50) = np.random.randint(10, size=100) s = np.array([rt[x] for x in (str, 50)]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(100, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps float = pd.DataFrame(data) c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n SELECT\n LEAD(b,1,10) OVER (PARTITION BY c ORDER BY a) AS a3,\n LEAD(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS a5,\n\n LAG(b,1) OVER (PARTITION BY c ORDER BY a) AS b3,\n LAG(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS b5\n FROM a\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n SELECT\n LEAD(b,1,10) OVER (PARTITION BY c ORDER BY a) AS a3,\n LEAD(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS a5,\n\n LAG(b,1) OVER (PARTITION BY c ORDER BY a) AS b3,\n LAG(b,1) OVER (PARTITION BY c ORDER BY b, a ASC NULLS LAST) AS b5\n FROM a\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
dask-sql
positive
@patch('backend.lambdas.jobs.handlers.table') def test_it_lists_jobs(self, table): <DeepExtract> stub = {'Id': job_id, 'Sk': job_id, 'CreatedAt': created_at, 'Type': 'Job', 'JobStatus': 'RUNNING', **kwargs} </DeepExtract> table.query.return_value = {'Items': [stub]} response = handlers.list_jobs_handler({'queryStringParameters': None}, SimpleNamespace()) resp_body = json.loads(response['body']) assert 200 == response['statusCode'] assert 1 == len(resp_body['Jobs']) assert stub == resp_body['Jobs'][0]
@patch('backend.lambdas.jobs.handlers.table') def test_it_lists_jobs(self, table): stub = {'Id': job_id, 'Sk': job_id, 'CreatedAt': created_at, 'Type': 'Job', 'JobStatus': 'RUNNING', **kwargs} table.query.return_value = {'Items': [stub]} response = handlers.list_jobs_handler({'queryStringParameters': None}, SimpleNamespace()) resp_body = json.loads(response['body']) assert 200 == response['statusCode'] assert 1 == len(resp_body['Jobs']) assert stub == resp_body['Jobs'][0]
amazon-s3-find-and-forget
positive
def viterbi_decode(acts): """Adapted from original CREPE viterbi decdoding, but in TF.""" num_steps = acts.shape[1] <DeepExtract> initial_distribution = tfp.distributions.Categorical(probs=tf.ones([360]) / 360) bins = tf.range(360, dtype=tf.float32) (xx, yy) = tf.meshgrid(bins, bins) min_transition = 1e-05 transition = tf.maximum(12 - abs(xx - yy), min_transition) transition = transition / tf.reduce_sum(transition, axis=1)[:, None] transition = tf.cast(transition, tf.float32) transition_distribution = tfp.distributions.Categorical(probs=transition) self_emission = 0.1 emission = tf.eye(360) * self_emission + tf.ones(shape=(360, 360)) * ((1 - self_emission) / 360.0) emission = tf.cast(emission, tf.float32)[None, ...] observation_distribution = tfp.distributions.Multinomial(total_count=1, probs=emission) hmm = tfp.distributions.HiddenMarkovModel(initial_distribution=initial_distribution, transition_distribution=transition_distribution, observation_distribution=observation_distribution, num_steps=num_steps) </DeepExtract> centers = hmm.posterior_mode(acts) return centers
def viterbi_decode(acts): """Adapted from original CREPE viterbi decdoding, but in TF.""" num_steps = acts.shape[1] initial_distribution = tfp.distributions.Categorical(probs=tf.ones([360]) / 360) bins = tf.range(360, dtype=tf.float32) (xx, yy) = tf.meshgrid(bins, bins) min_transition = 1e-05 transition = tf.maximum(12 - abs(xx - yy), min_transition) transition = transition / tf.reduce_sum(transition, axis=1)[:, None] transition = tf.cast(transition, tf.float32) transition_distribution = tfp.distributions.Categorical(probs=transition) self_emission = 0.1 emission = tf.eye(360) * self_emission + tf.ones(shape=(360, 360)) * ((1 - self_emission) / 360.0) emission = tf.cast(emission, tf.float32)[None, ...] observation_distribution = tfp.distributions.Multinomial(total_count=1, probs=emission) hmm = tfp.distributions.HiddenMarkovModel(initial_distribution=initial_distribution, transition_distribution=transition_distribution, observation_distribution=observation_distribution, num_steps=num_steps) centers = hmm.posterior_mode(acts) return centers
ddsp
positive
@override_settings(**middleware_override_settings) def test_middleware_saves_user(self): <DeepExtract> user = user or self.user if superuser is not None: user.is_superuser = True if superuser is None else superuser user.is_active = True user.save() self.client.force_login(user) </DeepExtract> self.client.post(reverse('admin:tests_book_add'), data={'isbn': '9780147_513731'}) book = Book.objects.get() historical_book = book.history.all()[0] self.assertEqual(historical_book.history_user, self.user, 'Middleware should make the request available to retrieve history_user.')
@override_settings(**middleware_override_settings) def test_middleware_saves_user(self): user = user or self.user if superuser is not None: user.is_superuser = True if superuser is None else superuser user.is_active = True user.save() self.client.force_login(user) self.client.post(reverse('admin:tests_book_add'), data={'isbn': '9780147_513731'}) book = Book.objects.get() historical_book = book.history.all()[0] self.assertEqual(historical_book.history_user, self.user, 'Middleware should make the request available to retrieve history_user.')
django-simple-history
positive
def xception_base(input_image, is_training=True): <DeepExtract> feature_dict = {} bn_axis = -1 if 'channels_last' == 'channels_last' else 1 inputs = tf.layers.conv2d(input_image, 32, (3, 3), use_bias=False, name='block1_conv1', strides=(2, 2), padding='valid', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv1_act') inputs = tf.layers.conv2d(inputs, 64, (3, 3), use_bias=False, name='block1_conv2', strides=(1, 1), padding='valid', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv2_act') residual = tf.layers.conv2d(inputs, 128, (1, 1), use_bias=False, name='conv2d_1', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format='channels_last', activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv1', reuse=None) inputs = relu_separable_bn_block(inputs, 128, 'block2_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block2_pool') feature_dict['C2'] = inputs inputs = tf.add(inputs, residual, name='residual_add_0') residual = tf.layers.conv2d(inputs, 256, (1, 1), use_bias=False, name='conv2d_2', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 256, 'block3_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 256, 'block3_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block3_pool') inputs = tf.add(inputs, residual, name='residual_add_1') feature_dict['C3'] = inputs residual = tf.layers.conv2d(inputs, 728, (1, 1), use_bias=False, name='conv2d_3', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 728, 'block4_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, 'block4_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block4_pool') inputs = tf.add(inputs, residual, name='residual_add_2') feature_dict['C4'] = inputs for index in range(8): residual = inputs prefix = 'block' + str(index + 5) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv2', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv3', is_training, 'channels_last', False) inputs = tf.add(inputs, residual, name=prefix + '_residual_add') residual = tf.layers.conv2d(inputs, 1024, (1, 1), use_bias=False, name='conv2d_4', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 728, 'block13_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 1024, 'block13_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block13_pool') inputs = tf.add(inputs, residual, name='residual_add_3') feature_dict['C5'] = inputs feature_dict = feature_dict </DeepExtract> pyramid_dict = {} with tf.variable_scope('build_pyramid'): with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY), activation_fn=None, normalizer_fn=None): P5 = slim.conv2d(feature_dict['C5'], num_outputs=256, kernel_size=[1, 1], stride=1, scope='build_P5') pyramid_dict['P5'] = P5 for level in range(4, 2, -1): <DeepExtract> with tf.variable_scope('build_P%d' % level): level_name = 'build_P%d' % level.split('_')[1] (h, w) = (tf.shape(feature_dict['C%d' % level])[1], tf.shape(feature_dict['C%d' % level])[2]) upsample_p = tf.image.resize_bilinear(pyramid_dict['P%d' % (level + 1)], size=[h, w], name='up_sample_' + level_name) reduce_dim_c = slim.conv2d(feature_dict['C%d' % level], num_outputs=256, kernel_size=[1, 1], stride=1, scope='reduce_dim_' + level_name) add_f = 0.5 * upsample_p + 0.5 * reduce_dim_c pyramid_dict['P%d' % level] = add_f </DeepExtract> for level in range(5, 2, -1): pyramid_dict['P%d' % level] = slim.conv2d(pyramid_dict['P%d' % level], num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=1, scope='fuse_P%d' % level) p6 = slim.conv2d(pyramid_dict['P5'] if cfgs.USE_P5 else feature_dict['C5'], num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=2, scope='p6_conv') pyramid_dict['P6'] = p6 p7 = tf.nn.relu(p6, name='p6_relu') p7 = slim.conv2d(p7, num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=2, scope='p7_conv') pyramid_dict['P7'] = p7 return pyramid_dict
def xception_base(input_image, is_training=True): feature_dict = {} bn_axis = -1 if 'channels_last' == 'channels_last' else 1 inputs = tf.layers.conv2d(input_image, 32, (3, 3), use_bias=False, name='block1_conv1', strides=(2, 2), padding='valid', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv1_act') inputs = tf.layers.conv2d(inputs, 64, (3, 3), use_bias=False, name='block1_conv2', strides=(1, 1), padding='valid', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv2_act') residual = tf.layers.conv2d(inputs, 128, (1, 1), use_bias=False, name='conv2d_1', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format='channels_last', activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv1', reuse=None) inputs = relu_separable_bn_block(inputs, 128, 'block2_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block2_pool') feature_dict['C2'] = inputs inputs = tf.add(inputs, residual, name='residual_add_0') residual = tf.layers.conv2d(inputs, 256, (1, 1), use_bias=False, name='conv2d_2', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 256, 'block3_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 256, 'block3_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block3_pool') inputs = tf.add(inputs, residual, name='residual_add_1') feature_dict['C3'] = inputs residual = tf.layers.conv2d(inputs, 728, (1, 1), use_bias=False, name='conv2d_3', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 728, 'block4_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, 'block4_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block4_pool') inputs = tf.add(inputs, residual, name='residual_add_2') feature_dict['C4'] = inputs for index in range(8): residual = inputs prefix = 'block' + str(index + 5) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv2', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 728, prefix + '_sepconv3', is_training, 'channels_last', False) inputs = tf.add(inputs, residual, name=prefix + '_residual_add') residual = tf.layers.conv2d(inputs, 1024, (1, 1), use_bias=False, name='conv2d_4', strides=(2, 2), padding='same', data_format='channels_last', activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = relu_separable_bn_block(inputs, 728, 'block13_sepconv1', is_training, 'channels_last', False) inputs = relu_separable_bn_block(inputs, 1024, 'block13_sepconv2', is_training, 'channels_last', False) inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format='channels_last', name='block13_pool') inputs = tf.add(inputs, residual, name='residual_add_3') feature_dict['C5'] = inputs feature_dict = feature_dict pyramid_dict = {} with tf.variable_scope('build_pyramid'): with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY), activation_fn=None, normalizer_fn=None): P5 = slim.conv2d(feature_dict['C5'], num_outputs=256, kernel_size=[1, 1], stride=1, scope='build_P5') pyramid_dict['P5'] = P5 for level in range(4, 2, -1): with tf.variable_scope('build_P%d' % level): level_name = 'build_P%d' % level.split('_')[1] (h, w) = (tf.shape(feature_dict['C%d' % level])[1], tf.shape(feature_dict['C%d' % level])[2]) upsample_p = tf.image.resize_bilinear(pyramid_dict['P%d' % (level + 1)], size=[h, w], name='up_sample_' + level_name) reduce_dim_c = slim.conv2d(feature_dict['C%d' % level], num_outputs=256, kernel_size=[1, 1], stride=1, scope='reduce_dim_' + level_name) add_f = 0.5 * upsample_p + 0.5 * reduce_dim_c pyramid_dict['P%d' % level] = add_f for level in range(5, 2, -1): pyramid_dict['P%d' % level] = slim.conv2d(pyramid_dict['P%d' % level], num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=1, scope='fuse_P%d' % level) p6 = slim.conv2d(pyramid_dict['P5'] if cfgs.USE_P5 else feature_dict['C5'], num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=2, scope='p6_conv') pyramid_dict['P6'] = p6 p7 = tf.nn.relu(p6, name='p6_relu') p7 = slim.conv2d(p7, num_outputs=256, kernel_size=[3, 3], padding='SAME', stride=2, scope='p7_conv') pyramid_dict['P7'] = p7 return pyramid_dict
CSL_RetinaNet_Tensorflow
positive
def obtain_original_filename(def_stub, pe=None, file_data=None, use_arch=False, ext='.bin'): """ Attempt to obtain the original filename, either from the export directory or the pe.FileInfo, of the input file. If the filename cannot be recovered from either of those locations, append the applicable architecture string and file extension to the default stub and return that value. If no pefile.PE object is provided or can be created from the provided file data, return the default stub appended with ".bin". :param def_stub: Default filename stub, sans extension, to utilize if the filename cannot be recovered. :param pe: pefile.PE object :param file_data: Input file data :param use_arch: Flag indicating if the file architecture should be included in the name, False by default. :param ext: Extension to default to if it could not be determined. (defaults to ".bin") :return: The recovered filename from the pe metadata or a generated filename using def_stub. """ if file_data: <DeepExtract> if not file_data: pe = None try: pe = pefile.PE(data=file_data) except pefile.PEFormatError as e: logger.debug('A pefile.PE object on the file data could not be created: {}'.format(e)) pe = None </DeepExtract> if pe: <DeepExtract> if file_data: pe = obtain_pe(file_data) if pe: if pe.is_driver(): ext = '.sys' elif pe.is_exe(): ext = '.exe' elif pe.is_dll(): ext = '.dll' else: ext = '.bin' else: ext = None </DeepExtract> <DeepExtract> if file_data: pe = obtain_pe(file_data) if pe: is64 = is_64bit(pe=pe) if is64: if False: arch = '64-bit' else: arch = 'x64' elif is64 is False: if False: arch = '32-bit' else: arch = 'x86' else: arch = 'Undetermined' else: arch = None </DeepExtract> filename = _obtain_exportdir_fname(pe) or _obtain_exif_fname(pe) if isinstance(filename, bytes): filename = filename.decode('ascii', 'backslashreplace') if filename: filename = Path(filename) if use_arch: (base, ext) = (filename.stem, filename.suffix) filename = base + '_' + arch + ext return str(filename) else: return def_stub + '_' + arch + ext else: return def_stub + ext
def obtain_original_filename(def_stub, pe=None, file_data=None, use_arch=False, ext='.bin'): """ Attempt to obtain the original filename, either from the export directory or the pe.FileInfo, of the input file. If the filename cannot be recovered from either of those locations, append the applicable architecture string and file extension to the default stub and return that value. If no pefile.PE object is provided or can be created from the provided file data, return the default stub appended with ".bin". :param def_stub: Default filename stub, sans extension, to utilize if the filename cannot be recovered. :param pe: pefile.PE object :param file_data: Input file data :param use_arch: Flag indicating if the file architecture should be included in the name, False by default. :param ext: Extension to default to if it could not be determined. (defaults to ".bin") :return: The recovered filename from the pe metadata or a generated filename using def_stub. """ if file_data: if not file_data: pe = None try: pe = pefile.PE(data=file_data) except pefile.PEFormatError as e: logger.debug('A pefile.PE object on the file data could not be created: {}'.format(e)) pe = None if pe: if file_data: pe = obtain_pe(file_data) if pe: if pe.is_driver(): ext = '.sys' elif pe.is_exe(): ext = '.exe' elif pe.is_dll(): ext = '.dll' else: ext = '.bin' else: ext = None if file_data: pe = obtain_pe(file_data) if pe: is64 = is_64bit(pe=pe) if is64: if False: arch = '64-bit' else: arch = 'x64' elif is64 is False: if False: arch = '32-bit' else: arch = 'x86' else: arch = 'Undetermined' else: arch = None filename = _obtain_exportdir_fname(pe) or _obtain_exif_fname(pe) if isinstance(filename, bytes): filename = filename.decode('ascii', 'backslashreplace') if filename: filename = Path(filename) if use_arch: (base, ext) = (filename.stem, filename.suffix) filename = base + '_' + arch + ext return str(filename) else: return def_stub + '_' + arch + ext else: return def_stub + ext
DC3-MWCP
positive
def testElseClauseNotOnSameLineAsElse(self): <DeepExtract> self.assertEquals('Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', self.PerformSingleLineLint(' else DoSomethingElse();')) </DeepExtract> <DeepExtract> self.assertEquals('Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', self.PerformSingleLineLint(' else ifDoSomethingElse();')) </DeepExtract> <DeepExtract> self.assertEquals('', self.PerformSingleLineLint(' } else if (blah) {')) </DeepExtract> <DeepExtract> self.assertEquals('', self.PerformSingleLineLint(' variable_ends_in_else = true;')) </DeepExtract>
def testElseClauseNotOnSameLineAsElse(self): self.assertEquals('Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', self.PerformSingleLineLint(' else DoSomethingElse();')) self.assertEquals('Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', self.PerformSingleLineLint(' else ifDoSomethingElse();')) self.assertEquals('', self.PerformSingleLineLint(' } else if (blah) {')) self.assertEquals('', self.PerformSingleLineLint(' variable_ends_in_else = true;')) </DeepExtract>
cpplint
positive
def __init__(self, group): self.globals = Globals() mgeo = gtk.gdk.screen_get_default().get_monitor_geometry(group.get_monitor()) button_window = group.button.window if button_window: (wx, wy) = button_window.get_origin() else: (wx, wy) = (0, 0) if group.dockbar_r().orient in ('left', 'right') or wy < mgeo.height / 2: GroupPopup.__init__(self, group, False, type_='locked_list') self.point('down', 20) else: GroupPopup.__init__(self, group, type_='locked_list') child = group.popup.alignment.get_child() if child: group.popup.alignment.remove(child) <DeepExtract> old_child = self.alignment.get_child() if old_child: self.alignment.remove(old_child) self.alignment.add(group.window_list) if self.popup_showing: group.window_list.show_all() self.resize(10, 10) </DeepExtract> group.window_list.apply_mini_mode() <DeepExtract> group = self.group_r() for window in group: if self.globals.settings['show_only_current_desktop'] and (not window.is_on_current_desktop()) or (self.globals.settings['show_only_current_monitor'] and group.get_monitor() != window.get_monitor()): window.item.hide() else: window.item.show() gtk.VBox.show_all(self) </DeepExtract> if not group.get_windows(): <DeepExtract> if self.globals.gtkmenu_showing: return group = self.group_r() if self.window: self.set_previews(None) CairoPopup.hide(self) self.popup_showing = False if self.show_sid is not None: gobject.source_remove(self.show_sid) self.show_sid = None self.cancel_hide_request() shown_popup = self.globals.get_shown_popup() locked_popup = self.globals.get_locked_popup() if locked_popup and locked_popup.group_r().get_windows() and (shown_popup is None or shown_popup is self): locked_popup.show() if shown_popup is self: self.globals.set_shown_popup(None) if group.menu is not None: group.menu.delete_menu() group.menu = None if not group.locked_popup: self.set_child_(group.window_list) group.menu_is_shown = False </DeepExtract> else: gobject.idle_add(self.__on_realized) self.overlap_sid = self.globals.connect('locked-list-overlap-changed', self.__set_own_strut)
def __init__(self, group): self.globals = Globals() mgeo = gtk.gdk.screen_get_default().get_monitor_geometry(group.get_monitor()) button_window = group.button.window if button_window: (wx, wy) = button_window.get_origin() else: (wx, wy) = (0, 0) if group.dockbar_r().orient in ('left', 'right') or wy < mgeo.height / 2: GroupPopup.__init__(self, group, False, type_='locked_list') self.point('down', 20) else: GroupPopup.__init__(self, group, type_='locked_list') child = group.popup.alignment.get_child() if child: group.popup.alignment.remove(child) old_child = self.alignment.get_child() if old_child: self.alignment.remove(old_child) self.alignment.add(group.window_list) if self.popup_showing: group.window_list.show_all() self.resize(10, 10) group.window_list.apply_mini_mode() group = self.group_r() for window in group: if self.globals.settings['show_only_current_desktop'] and (not window.is_on_current_desktop()) or (self.globals.settings['show_only_current_monitor'] and group.get_monitor() != window.get_monitor()): window.item.hide() else: window.item.show() gtk.VBox.show_all(self) if not group.get_windows(): if self.globals.gtkmenu_showing: return group = self.group_r() if self.window: self.set_previews(None) CairoPopup.hide(self) self.popup_showing = False if self.show_sid is not None: gobject.source_remove(self.show_sid) self.show_sid = None self.cancel_hide_request() shown_popup = self.globals.get_shown_popup() locked_popup = self.globals.get_locked_popup() if locked_popup and locked_popup.group_r().get_windows() and (shown_popup is None or shown_popup is self): locked_popup.show() if shown_popup is self: self.globals.set_shown_popup(None) if group.menu is not None: group.menu.delete_menu() group.menu = None if not group.locked_popup: self.set_child_(group.window_list) group.menu_is_shown = False else: gobject.idle_add(self.__on_realized) self.overlap_sid = self.globals.connect('locked-list-overlap-changed', self.__set_own_strut)
dockbarx
positive
def notify(settings: Settings, event: FlowEvent, payload: Any=None, error: Any=None): """ Go through all the notification channels declared in the settings and call them one by one. Only call those matching the current event. As this function is blocking, make sure none of your channels take too long to run. Whenever an error happened in the notification, a debug message is logged into the chaostoolkit log for review but this should not impact the experiment itself. When no settings were provided, no notifications are sent. Equally, if the settings do not define a `notifications` entry. Here is an example of settings: ```yaml notifications: - type: plugin module: somepackage.somemodule events: - init-failed - run-failed - type: http url: http://example.com headers: Authorization: "Bearer token" - type: http url: https://private.com verify_tls: false forward_event_payload: false headers: Authorization: "Bearer token" events: - discovery-completed - run-failed ``` In this sample, the first channel will be the `notify` function of the `somepackage.somemopdule` Python module. The other two notifications will be sent over HTTP with the third one not forwarding the event payload itself (hence being a GET rather than a POST). Notice how the first and third channels take an `events` sequence. That list represents the events which those endpoints are interested in. In other words, they will only be called for those specific events. The second channel will be applied to all events. The payload event is a dictionary made of the following entries: - `"event"`: the event name - `"payload"`: the payload associated to this event (may be None) - `"phase"`: which phase this event was raised from - `"error"`: if an error was passed on to the function - `"ts"`: a UTC timestamp of when the event was raised """ if not settings: return notification_channels = settings.get('notifications') if not notification_channels: return event_payload = {'name': event.value, 'payload': payload, 'phase': 'unknown', 'ts': datetime.utcnow().replace(tzinfo=timezone.utc).timestamp()} if error: event_payload['error'] = error event_class = event.__class__ if event_class is DiscoverFlowEvent: event_payload['phase'] = 'discovery' elif event_class is InitFlowEvent: event_payload['phase'] = 'init' elif event_class is RunFlowEvent: event_payload['phase'] = 'run' elif event_class is ValidateFlowEvent: event_payload['phase'] = 'validate' for channel in notification_channels: events = channel.get('events') if events and event.value not in events: continue channel_type = channel.get('type') if channel_type == 'http': <DeepExtract> url = channel.get('url') headers = channel.get('headers') verify_tls = channel.get('verify_tls', True) forward_event_payload = channel.get('forward_event_payload', True) if url: try: if forward_event_payload: payload_encoded = json.loads(json.dumps(event_payload, cls=PayloadEncoder)) resp = requests.post(url, headers=headers, verify=verify_tls, timeout=(2, 5), json=payload_encoded) else: resp = requests.get(url, headers=headers, verify=verify_tls, timeout=(2, 5)) resp.raise_for_status() except HTTPError as ex: logger.debug(f'notification sent to {url} failed with: {ex}') except Exception as ex: logger.debug('failed calling notification endpoint', exc_info=ex) else: logger.debug('missing url in notification channel') </DeepExtract> elif channel_type == 'plugin': <DeepExtract> mod_name = channel.get('module') func_name = channel.get('func', 'notify') try: mod = importlib.import_module(mod_name) except ImportError: logger.debug("could not find Python plugin '{mod}' for notification".format(mod=mod_name)) else: funcs = inspect.getmembers(mod, inspect.isfunction) for (name, f) in funcs: if name == func_name: try: f(channel, event_payload) except Exception as err: logger.debug('failed calling notification plugin', exc_info=err) break else: logger.debug("could not find function '{f}' in plugin '{mod}' for notification".format(mod=mod_name, f=func_name)) </DeepExtract>
def notify(settings: Settings, event: FlowEvent, payload: Any=None, error: Any=None): """ Go through all the notification channels declared in the settings and call them one by one. Only call those matching the current event. As this function is blocking, make sure none of your channels take too long to run. Whenever an error happened in the notification, a debug message is logged into the chaostoolkit log for review but this should not impact the experiment itself. When no settings were provided, no notifications are sent. Equally, if the settings do not define a `notifications` entry. Here is an example of settings: ```yaml notifications: - type: plugin module: somepackage.somemodule events: - init-failed - run-failed - type: http url: http://example.com headers: Authorization: "Bearer token" - type: http url: https://private.com verify_tls: false forward_event_payload: false headers: Authorization: "Bearer token" events: - discovery-completed - run-failed ``` In this sample, the first channel will be the `notify` function of the `somepackage.somemopdule` Python module. The other two notifications will be sent over HTTP with the third one not forwarding the event payload itself (hence being a GET rather than a POST). Notice how the first and third channels take an `events` sequence. That list represents the events which those endpoints are interested in. In other words, they will only be called for those specific events. The second channel will be applied to all events. The payload event is a dictionary made of the following entries: - `"event"`: the event name - `"payload"`: the payload associated to this event (may be None) - `"phase"`: which phase this event was raised from - `"error"`: if an error was passed on to the function - `"ts"`: a UTC timestamp of when the event was raised """ if not settings: return notification_channels = settings.get('notifications') if not notification_channels: return event_payload = {'name': event.value, 'payload': payload, 'phase': 'unknown', 'ts': datetime.utcnow().replace(tzinfo=timezone.utc).timestamp()} if error: event_payload['error'] = error event_class = event.__class__ if event_class is DiscoverFlowEvent: event_payload['phase'] = 'discovery' elif event_class is InitFlowEvent: event_payload['phase'] = 'init' elif event_class is RunFlowEvent: event_payload['phase'] = 'run' elif event_class is ValidateFlowEvent: event_payload['phase'] = 'validate' for channel in notification_channels: events = channel.get('events') if events and event.value not in events: continue channel_type = channel.get('type') if channel_type == 'http': url = channel.get('url') headers = channel.get('headers') verify_tls = channel.get('verify_tls', True) forward_event_payload = channel.get('forward_event_payload', True) if url: try: if forward_event_payload: payload_encoded = json.loads(json.dumps(event_payload, cls=PayloadEncoder)) resp = requests.post(url, headers=headers, verify=verify_tls, timeout=(2, 5), json=payload_encoded) else: resp = requests.get(url, headers=headers, verify=verify_tls, timeout=(2, 5)) resp.raise_for_status() except HTTPError as ex: logger.debug(f'notification sent to {url} failed with: {ex}') except Exception as ex: logger.debug('failed calling notification endpoint', exc_info=ex) else: logger.debug('missing url in notification channel') elif channel_type == 'plugin': mod_name = channel.get('module') func_name = channel.get('func', 'notify') try: mod = importlib.import_module(mod_name) except ImportError: logger.debug("could not find Python plugin '{mod}' for notification".format(mod=mod_name)) else: funcs = inspect.getmembers(mod, inspect.isfunction) for (name, f) in funcs: if name == func_name: try: f(channel, event_payload) except Exception as err: logger.debug('failed calling notification plugin', exc_info=err) break else: logger.debug("could not find function '{f}' in plugin '{mod}' for notification".format(mod=mod_name, f=func_name)) </DeepExtract>
chaostoolkit-lib
positive
def reset(self): if not self.deterministic: <DeepExtract> self.desired_goal = [random.randint(0, 1) for _ in range(self.environment_dimension)] </DeepExtract> <DeepExtract> self.state = [random.randint(0, 1) for _ in range(self.environment_dimension)] </DeepExtract> else: self.desired_goal = [0 for _ in range(self.environment_dimension)] self.state = [1 for _ in range(self.environment_dimension)] self.state.extend(self.desired_goal) self.achieved_goal = self.state[:self.environment_dimension] self.step_count = 0 return {'observation': np.array(self.state[:self.environment_dimension]), 'desired_goal': np.array(self.desired_goal), 'achieved_goal': np.array(self.achieved_goal)}
def reset(self): if not self.deterministic: self.desired_goal = [random.randint(0, 1) for _ in range(self.environment_dimension)] self.state = [random.randint(0, 1) for _ in range(self.environment_dimension)] else: self.desired_goal = [0 for _ in range(self.environment_dimension)] self.state = [1 for _ in range(self.environment_dimension)] self.state.extend(self.desired_goal) self.achieved_goal = self.state[:self.environment_dimension] self.step_count = 0 return {'observation': np.array(self.state[:self.environment_dimension]), 'desired_goal': np.array(self.desired_goal), 'achieved_goal': np.array(self.achieved_goal)}
Deep-Reinforcement-Learning-Algorithms-with-PyTorch
positive
def getAccounts(self): """ Return all accounts installed in the wallet database """ <DeepExtract> if self.keyStorage: pubkeys = self.keyStorage.getPublicKeys() else: pubkeys = list(Wallet.keys.keys()) </DeepExtract> accounts = [] for pubkey in pubkeys: if pubkey[:len(self.prefix)] == self.prefix: accounts.append(self.getAccount(pubkey)) return accounts
def getAccounts(self): """ Return all accounts installed in the wallet database """ if self.keyStorage: pubkeys = self.keyStorage.getPublicKeys() else: pubkeys = list(Wallet.keys.keys()) accounts = [] for pubkey in pubkeys: if pubkey[:len(self.prefix)] == self.prefix: accounts.append(self.getAccount(pubkey)) return accounts
CocosFactory
positive
def test_replicate_aip_offline_staging_uncompressed(self): """Ensure that a replica is created and stored correctly as a tarball.""" space_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='space') replication_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='replication') staging_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='offline') replica_space = models.Space.objects.create(access_protocol=models.Space.OFFLINE_REPLICA_STAGING, path='/', staging_path=staging_dir) models.OfflineReplicaStaging.objects.create(space=replica_space) aip = models.Package.objects.get(uuid='0d4e739b-bf60-4b87-bc20-67a379b28cea') aip.current_location.space.staging_path = space_dir aip.current_location.space.save() <DeepExtract> staging_files_count_initial = sum([len(files) for (_, _, files) in os.walk(staging_dir)]) </DeepExtract> <DeepExtract> staging_dirs_count_initial = sum([len(dirs) for (_, dirs, _) in os.walk(staging_dir)]) </DeepExtract> aip.current_location.replicators.create(space=replica_space, relative_path=replication_dir, purpose=models.Location.REPLICATOR) assert aip.replicas.count() == 0 aip.create_replicas() replica = aip.replicas.first() assert aip.replicas.count() == 1 assert replica is not None expected_replica_path = os.path.join(replication_dir, 'working_bag.tar') assert os.path.exists(expected_replica_path) assert replica.current_path == expected_replica_path assert staging_files_count_initial == recursive_file_count(staging_dir) assert staging_dirs_count_initial == recursive_dir_count(staging_dir)
def test_replicate_aip_offline_staging_uncompressed(self): """Ensure that a replica is created and stored correctly as a tarball.""" space_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='space') replication_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='replication') staging_dir = tempfile.mkdtemp(dir=self.tmp_dir, prefix='offline') replica_space = models.Space.objects.create(access_protocol=models.Space.OFFLINE_REPLICA_STAGING, path='/', staging_path=staging_dir) models.OfflineReplicaStaging.objects.create(space=replica_space) aip = models.Package.objects.get(uuid='0d4e739b-bf60-4b87-bc20-67a379b28cea') aip.current_location.space.staging_path = space_dir aip.current_location.space.save() staging_files_count_initial = sum([len(files) for (_, _, files) in os.walk(staging_dir)]) staging_dirs_count_initial = sum([len(dirs) for (_, dirs, _) in os.walk(staging_dir)]) aip.current_location.replicators.create(space=replica_space, relative_path=replication_dir, purpose=models.Location.REPLICATOR) assert aip.replicas.count() == 0 aip.create_replicas() replica = aip.replicas.first() assert aip.replicas.count() == 1 assert replica is not None expected_replica_path = os.path.join(replication_dir, 'working_bag.tar') assert os.path.exists(expected_replica_path) assert replica.current_path == expected_replica_path assert staging_files_count_initial == recursive_file_count(staging_dir) assert staging_dirs_count_initial == recursive_dir_count(staging_dir)
archivematica-storage-service
positive
def get_with_prefix(p, base=None, prefer=None, nullable=False): """ Get file path by searching its prefix. If `base` is a directory, equals to get "base/p*". Otherwise, equals to get "base.p*". Only one result will be return. If more than one match, give the first one with suffix in `prefer`. """ if not base: base = './' if os.path.isdir(base): pattern = os.path.join(base, p) else: pattern = f"{base.rstrip('.')}.{p}" matches = glob(pattern + '*') if len(matches) == 1: return matches[0] <DeepExtract> if prefer is None: if nullable: prefer = [] else: raise TypeError('arg cannot be None') if not isinstance(prefer, (list, tuple, np.ndarray)): prefer = [prefer] prefer = prefer </DeepExtract> for suffix in prefer: if pattern + suffix in matches: return pattern + suffix if nullable: return None raise FileNotFoundError(f'{pattern}* not exists or has more than one matches')
def get_with_prefix(p, base=None, prefer=None, nullable=False): """ Get file path by searching its prefix. If `base` is a directory, equals to get "base/p*". Otherwise, equals to get "base.p*". Only one result will be return. If more than one match, give the first one with suffix in `prefer`. """ if not base: base = './' if os.path.isdir(base): pattern = os.path.join(base, p) else: pattern = f"{base.rstrip('.')}.{p}" matches = glob(pattern + '*') if len(matches) == 1: return matches[0] if prefer is None: if nullable: prefer = [] else: raise TypeError('arg cannot be None') if not isinstance(prefer, (list, tuple, np.ndarray)): prefer = [prefer] prefer = prefer for suffix in prefer: if pattern + suffix in matches: return pattern + suffix if nullable: return None raise FileNotFoundError(f'{pattern}* not exists or has more than one matches')
deepks-kit
positive
def add_feed(self) -> None: """Prompt the user for a feed and add it, if possible.""" <DeepExtract> assert self._footer_window is not None assert isinstance('Enter the URL or path of the feed: ', str) self._curs_set(1) self._stdscr.timeout(-1) self._footer_window.addstr(1, 0, ' ' * (self._footer_window.getmaxyx()[1] - 1)) self._footer_window.addstr(1, 0, 'Enter the URL or path of the feed: ') entry_pad = curses.newpad(1, 999) current_x = 0 scroll_x = 0 input_char = None while input_char not in [curses.KEY_ENTER, 10]: if input_char is not None: if input_char in [curses.KEY_BACKSPACE, 127]: if current_x > 0: entry_pad.delch(0, current_x - 1) current_x -= 1 if scroll_x > 0: scroll_x -= 1 else: if current_x + len('Enter the URL or path of the feed: ') > self._footer_window.getmaxyx()[1] - 1: scroll_x += 1 entry_pad.addch(0, current_x, input_char) current_x += 1 entry_pad.refresh(0, scroll_x, self._parent_y - 1, len('Enter the URL or path of the feed: '), self._parent_y - 1, self._footer_window.getmaxyx()[1] - 1) input_char = self._footer_window.getch() self._stdscr.timeout(self.INPUT_TIMEOUT) self._footer_window.clear() self._curs_set(0) path = entry_pad.instr(0, 0, entry_pad.getmaxyx()[1]).decode('utf-8').strip() </DeepExtract> try: if 'http' in path: feed = Feed(url=path) else: feed = Feed(file=path) if feed.validated: self.database.replace_feed(feed) self.database.replace_episodes(feed, feed.parse_episodes()) self.menus_valid = False <DeepExtract> assert isinstance("Feed '%s' successfully added" % str(feed), str) self._status = "Feed '%s' successfully added" % str(feed) self._status_timer = self.STATUS_TIMEOUT </DeepExtract> except FeedError as e: if isinstance(e, FeedLoadError): <DeepExtract> assert isinstance('FeedLoadError: %s' % str(e), str) self._status = 'FeedLoadError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract> elif isinstance(e, FeedDownloadError): <DeepExtract> assert isinstance('FeedDownloadError: %s' % str(e), str) self._status = 'FeedDownloadError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract> elif isinstance(e, FeedParseError): <DeepExtract> assert isinstance('FeedParseError: %s' % str(e), str) self._status = 'FeedParseError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract> elif isinstance(e, FeedStructureError): <DeepExtract> assert isinstance('FeedStructureError: %s' % str(e), str) self._status = 'FeedStructureError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract> else: <DeepExtract> assert isinstance('FeedError [ambiguous]: %s' % str(e), str) self._status = 'FeedError [ambiguous]: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract>
def add_feed(self) -> None: """Prompt the user for a feed and add it, if possible.""" assert self._footer_window is not None assert isinstance('Enter the URL or path of the feed: ', str) self._curs_set(1) self._stdscr.timeout(-1) self._footer_window.addstr(1, 0, ' ' * (self._footer_window.getmaxyx()[1] - 1)) self._footer_window.addstr(1, 0, 'Enter the URL or path of the feed: ') entry_pad = curses.newpad(1, 999) current_x = 0 scroll_x = 0 input_char = None while input_char not in [curses.KEY_ENTER, 10]: if input_char is not None: if input_char in [curses.KEY_BACKSPACE, 127]: if current_x > 0: entry_pad.delch(0, current_x - 1) current_x -= 1 if scroll_x > 0: scroll_x -= 1 else: if current_x + len('Enter the URL or path of the feed: ') > self._footer_window.getmaxyx()[1] - 1: scroll_x += 1 entry_pad.addch(0, current_x, input_char) current_x += 1 entry_pad.refresh(0, scroll_x, self._parent_y - 1, len('Enter the URL or path of the feed: '), self._parent_y - 1, self._footer_window.getmaxyx()[1] - 1) input_char = self._footer_window.getch() self._stdscr.timeout(self.INPUT_TIMEOUT) self._footer_window.clear() self._curs_set(0) path = entry_pad.instr(0, 0, entry_pad.getmaxyx()[1]).decode('utf-8').strip() try: if 'http' in path: feed = Feed(url=path) else: feed = Feed(file=path) if feed.validated: self.database.replace_feed(feed) self.database.replace_episodes(feed, feed.parse_episodes()) self.menus_valid = False assert isinstance("Feed '%s' successfully added" % str(feed), str) self._status = "Feed '%s' successfully added" % str(feed) self._status_timer = self.STATUS_TIMEOUT except FeedError as e: if isinstance(e, FeedLoadError): assert isinstance('FeedLoadError: %s' % str(e), str) self._status = 'FeedLoadError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT elif isinstance(e, FeedDownloadError): assert isinstance('FeedDownloadError: %s' % str(e), str) self._status = 'FeedDownloadError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT elif isinstance(e, FeedParseError): assert isinstance('FeedParseError: %s' % str(e), str) self._status = 'FeedParseError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT elif isinstance(e, FeedStructureError): assert isinstance('FeedStructureError: %s' % str(e), str) self._status = 'FeedStructureError: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT else: assert isinstance('FeedError [ambiguous]: %s' % str(e), str) self._status = 'FeedError [ambiguous]: %s' % str(e) self._status_timer = self.STATUS_TIMEOUT </DeepExtract>
castero
positive
def check_wr_data(self): wr_data = self.wr_bitarray if wr_data.all(False): if self.parent.debug: print('[{:02}] {:20} nothing to burn'.format(self.id, self.name)) return False if len(wr_data.bytes) != len(self.bitarray.bytes): raise esptool.FatalError('Data does not fit: the block%d size is %d bytes, data is %d bytes' % (self.id, len(self.bitarray.bytes), len(wr_data.bytes))) <DeepExtract> if not self.is_readable(): error_msg = '\t{} is read-protected.'.format(self.name) 'The written value can not be read, the efuse/block looks as all 0.\n' error_msg += '\tBurn in this case may damage an already written value.' self.parent.print_error_msg(error_msg) if not self.is_writeable(): error_msg = '\t{} is write-protected. Burn is not possible.'.format(self.name) self.parent.print_error_msg(error_msg) </DeepExtract> if self.get_bitstring().all(False): print('[{:02}] {:20} is empty, will burn the new value'.format(self.id, self.name)) elif self.get_bitstring() == wr_data: print('[{:02}] {:20} is already written the same value, continue with EMPTY_BLOCK'.format(self.id, self.name)) wr_data.set(0) else: print('[{:02}] {:20} is not empty'.format(self.id, self.name)) print('\t(written ):', self.get_bitstring()) print('\t(to write):', wr_data) mask = self.get_bitstring() & wr_data if mask == wr_data: print('\tAll wr_data bits are set in the written block, continue with EMPTY_BLOCK.') wr_data.set(0) else: <DeepExtract> if self.id == 0: coding_scheme = self.parent.REGS.CODING_SCHEME_NONE else: coding_scheme = self.parent.coding_scheme </DeepExtract> if coding_scheme == self.parent.REGS.CODING_SCHEME_NONE: print('\t(coding scheme = NONE)') elif coding_scheme == self.parent.REGS.CODING_SCHEME_RS: print('\t(coding scheme = RS)') error_msg = '\tBurn into %s is forbidden (RS coding scheme does not allow this).' % self.name self.parent.print_error_msg(error_msg) elif coding_scheme == self.parent.REGS.CODING_SCHEME_34: print('\t(coding scheme = 3/4)') data_can_not_be_burn = False for i in range(0, self.get_bitstring().len, 6 * 8): rd_chunk = self.get_bitstring()[i:i + 6 * 8] wr_chunk = wr_data[i:i + 6 * 8] if rd_chunk.any(True): if wr_chunk.any(True): print('\twritten chunk [%d] and wr_chunk are not empty. ' % (i // (6 * 8)), end='') if rd_chunk == wr_chunk: print('wr_chunk == rd_chunk. Countinue with empty chunk.') wr_data[i:i + 6 * 8].set(0) else: print('wr_chunk != rd_chunk. Can not burn.') print('\twritten ', rd_chunk) print('\tto write', wr_chunk) data_can_not_be_burn = True if data_can_not_be_burn: error_msg = '\tBurn into %s is forbidden (3/4 coding scheme does not allow this).' % self.name self.parent.print_error_msg(error_msg) else: raise esptool.FatalError('The coding scheme ({}) is not supported'.format(coding_scheme))
def check_wr_data(self): wr_data = self.wr_bitarray if wr_data.all(False): if self.parent.debug: print('[{:02}] {:20} nothing to burn'.format(self.id, self.name)) return False if len(wr_data.bytes) != len(self.bitarray.bytes): raise esptool.FatalError('Data does not fit: the block%d size is %d bytes, data is %d bytes' % (self.id, len(self.bitarray.bytes), len(wr_data.bytes))) if not self.is_readable(): error_msg = '\t{} is read-protected.'.format(self.name) 'The written value can not be read, the efuse/block looks as all 0.\n' error_msg += '\tBurn in this case may damage an already written value.' self.parent.print_error_msg(error_msg) if not self.is_writeable(): error_msg = '\t{} is write-protected. Burn is not possible.'.format(self.name) self.parent.print_error_msg(error_msg) if self.get_bitstring().all(False): print('[{:02}] {:20} is empty, will burn the new value'.format(self.id, self.name)) elif self.get_bitstring() == wr_data: print('[{:02}] {:20} is already written the same value, continue with EMPTY_BLOCK'.format(self.id, self.name)) wr_data.set(0) else: print('[{:02}] {:20} is not empty'.format(self.id, self.name)) print('\t(written ):', self.get_bitstring()) print('\t(to write):', wr_data) mask = self.get_bitstring() & wr_data if mask == wr_data: print('\tAll wr_data bits are set in the written block, continue with EMPTY_BLOCK.') wr_data.set(0) else: if self.id == 0: coding_scheme = self.parent.REGS.CODING_SCHEME_NONE else: coding_scheme = self.parent.coding_scheme if coding_scheme == self.parent.REGS.CODING_SCHEME_NONE: print('\t(coding scheme = NONE)') elif coding_scheme == self.parent.REGS.CODING_SCHEME_RS: print('\t(coding scheme = RS)') error_msg = '\tBurn into %s is forbidden (RS coding scheme does not allow this).' % self.name self.parent.print_error_msg(error_msg) elif coding_scheme == self.parent.REGS.CODING_SCHEME_34: print('\t(coding scheme = 3/4)') data_can_not_be_burn = False for i in range(0, self.get_bitstring().len, 6 * 8): rd_chunk = self.get_bitstring()[i:i + 6 * 8] wr_chunk = wr_data[i:i + 6 * 8] if rd_chunk.any(True): if wr_chunk.any(True): print('\twritten chunk [%d] and wr_chunk are not empty. ' % (i // (6 * 8)), end='') if rd_chunk == wr_chunk: print('wr_chunk == rd_chunk. Countinue with empty chunk.') wr_data[i:i + 6 * 8].set(0) else: print('wr_chunk != rd_chunk. Can not burn.') print('\twritten ', rd_chunk) print('\tto write', wr_chunk) data_can_not_be_burn = True if data_can_not_be_burn: error_msg = '\tBurn into %s is forbidden (3/4 coding scheme does not allow this).' % self.name self.parent.print_error_msg(error_msg) else: raise esptool.FatalError('The coding scheme ({}) is not supported'.format(coding_scheme))
esptool
positive
def kerneltodis(kernel): <DeepExtract> nkernel = copy.deepcopy(kernel) size = len(kernel) for i in range(size): nkernel[i, :] /= np.sqrt(kernel[i, i]) nkernel[:, i] /= np.sqrt(kernel[i, i]) nkernel[i, i] = 1.0 nk = nkernel.clip(max=1) </DeepExtract> size = len(kernel) dis = np.zeros((size, size), dtype=np.float64) for i in range(size): for j in range(i - 1): dis[i, j] = dis[j, i] = np.sqrt(2.0 - 2.0 * nk[i, j]) return dis.clip(min=0)
def kerneltodis(kernel): nkernel = copy.deepcopy(kernel) size = len(kernel) for i in range(size): nkernel[i, :] /= np.sqrt(kernel[i, i]) nkernel[:, i] /= np.sqrt(kernel[i, i]) nkernel[i, i] = 1.0 nk = nkernel.clip(max=1) size = len(kernel) dis = np.zeros((size, size), dtype=np.float64) for i in range(size): for j in range(i - 1): dis[i, j] = dis[j, i] = np.sqrt(2.0 - 2.0 * nk[i, j]) return dis.clip(min=0)
ASAP
positive
def must_get_account(account_address: typing.Union[diem_types.AccountAddress, str]) -> rpc.Account: """must_get_account raises AccountNotFoundError if account could not be found by given address""" <DeepExtract> address = utils.account_address_hex(account_address) account = self.execute('get_account', [address], _parse_obj(lambda : rpc.Account())) </DeepExtract> if account is None: hex = utils.account_address_hex(account_address) raise AccountNotFoundError(f'account not found by address: {hex}') return account
def must_get_account(account_address: typing.Union[diem_types.AccountAddress, str]) -> rpc.Account: """must_get_account raises AccountNotFoundError if account could not be found by given address""" address = utils.account_address_hex(account_address) account = self.execute('get_account', [address], _parse_obj(lambda : rpc.Account())) if account is None: hex = utils.account_address_hex(account_address) raise AccountNotFoundError(f'account not found by address: {hex}') return account
client-sdk-python
positive
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given adjective. """ <DeepExtract> n = 0 p = False for ch in adjective.endswith('e') and adjective[:-1] or adjective: v = ch in VOWELS n += int(v and (not p)) p = v n = n </DeepExtract> if adjective in grade_irregular: return grade_irregular[adjective][suffix != COMPARATIVE] elif adjective in grade_uninflected: return '%s %s' % (suffix == COMPARATIVE and 'more' or 'most', adjective) elif n <= 2 and adjective.endswith('e'): suffix = suffix.lstrip('e') elif n == 1 and len(adjective) >= 3 and (adjective[-1] not in VOWELS) and (adjective[-2] in VOWELS) and (adjective[-3] not in VOWELS): if not adjective.endswith('w'): suffix = adjective[-1] + suffix elif n == 1: pass elif n == 2 and adjective.endswith('y'): adjective = adjective[:-1] + 'i' elif n == 2 and adjective[-2:] in ('er', 'le', 'ow'): pass else: return '%s %s' % (suffix == COMPARATIVE and 'more' or 'most', adjective) return adjective + suffix
def grade(adjective, suffix=COMPARATIVE): """ Returns the comparative or superlative form of the given adjective. """ n = 0 p = False for ch in adjective.endswith('e') and adjective[:-1] or adjective: v = ch in VOWELS n += int(v and (not p)) p = v n = n if adjective in grade_irregular: return grade_irregular[adjective][suffix != COMPARATIVE] elif adjective in grade_uninflected: return '%s %s' % (suffix == COMPARATIVE and 'more' or 'most', adjective) elif n <= 2 and adjective.endswith('e'): suffix = suffix.lstrip('e') elif n == 1 and len(adjective) >= 3 and (adjective[-1] not in VOWELS) and (adjective[-2] in VOWELS) and (adjective[-3] not in VOWELS): if not adjective.endswith('w'): suffix = adjective[-1] + suffix elif n == 1: pass elif n == 2 and adjective.endswith('y'): adjective = adjective[:-1] + 'i' elif n == 2 and adjective[-2:] in ('er', 'le', 'ow'): pass else: return '%s %s' % (suffix == COMPARATIVE and 'more' or 'most', adjective) return adjective + suffix
corpkit
positive
def test_put_object(self): <DeepExtract> self.put_body_stream = open(self.temp_put_obj_file_path, 'r+b') file_stats = os.stat(self.temp_put_obj_file_path) self.data_len = file_stats.st_size headers = HttpHeaders([('host', self._build_endpoint_string(self.region, self.bucket_name)), ('Content-Type', 'text/plain'), ('Content-Length', str(self.data_len))]) if path is None: path = self.put_test_object_path request = HttpRequest('PUT', path, headers, self.put_body_stream) request = request </DeepExtract> <DeepExtract> s3_client = s3_client_new(False, self.region, 5 * MB) s3_request = s3_client.make_request(request=request, type=S3RequestType.PUT_OBJECT, on_headers=self._on_request_headers, on_body=self._on_request_body) finished_future = s3_request.finished_future try: finished_future.result(self.timeout) except Exception as e: self.assertEqual(e.name, exception_name) else: self._validate_successful_get_response(S3RequestType.PUT_OBJECT is S3RequestType.PUT_OBJECT) shutdown_event = s3_request.shutdown_event s3_request = None self.assertTrue(shutdown_event.wait(self.timeout)) </DeepExtract> self.put_body_stream.close()
def test_put_object(self): self.put_body_stream = open(self.temp_put_obj_file_path, 'r+b') file_stats = os.stat(self.temp_put_obj_file_path) self.data_len = file_stats.st_size headers = HttpHeaders([('host', self._build_endpoint_string(self.region, self.bucket_name)), ('Content-Type', 'text/plain'), ('Content-Length', str(self.data_len))]) if path is None: path = self.put_test_object_path request = HttpRequest('PUT', path, headers, self.put_body_stream) request = request s3_client = s3_client_new(False, self.region, 5 * MB) s3_request = s3_client.make_request(request=request, type=S3RequestType.PUT_OBJECT, on_headers=self._on_request_headers, on_body=self._on_request_body) finished_future = s3_request.finished_future try: finished_future.result(self.timeout) except Exception as e: self.assertEqual(e.name, exception_name) else: self._validate_successful_get_response(S3RequestType.PUT_OBJECT is S3RequestType.PUT_OBJECT) shutdown_event = s3_request.shutdown_event s3_request = None self.assertTrue(shutdown_event.wait(self.timeout)) self.put_body_stream.close()
aws-crt-python
positive