before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" setup_cfg = os.path.join(root, 'setup.cfg') parser = configparser.SafeConfigParser() with open(setup_cfg, 'r') as f: parser.readfp(f) VCS = parser.get('versioneer', 'VCS') def get(parser, name): if parser.has_option('versioneer', name): return parser.get('versioneer', name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, 'style') or '' <DeepExtract> if parser.has_option('versioneer', 'versionfile_source'): cfg.versionfile_source = parser.get('versioneer', 'versionfile_source') cfg.versionfile_source = None </DeepExtract> <DeepExtract> if parser.has_option('versioneer', 'versionfile_build'): cfg.versionfile_build = parser.get('versioneer', 'versionfile_build') cfg.versionfile_build = None </DeepExtract> <DeepExtract> if parser.has_option('versioneer', 'tag_prefix'): cfg.tag_prefix = parser.get('versioneer', 'tag_prefix') cfg.tag_prefix = None </DeepExtract> if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = '' <DeepExtract> if parser.has_option('versioneer', 'parentdir_prefix'): cfg.parentdir_prefix = parser.get('versioneer', 'parentdir_prefix') cfg.parentdir_prefix = None </DeepExtract> <DeepExtract> if parser.has_option('versioneer', 'verbose'): cfg.verbose = parser.get('versioneer', 'verbose') cfg.verbose = None </DeepExtract> return cfg
def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" setup_cfg = os.path.join(root, 'setup.cfg') parser = configparser.SafeConfigParser() with open(setup_cfg, 'r') as f: parser.readfp(f) VCS = parser.get('versioneer', 'VCS') def get(parser, name): if parser.has_option('versioneer', name): return parser.get('versioneer', name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, 'style') or '' if parser.has_option('versioneer', 'versionfile_source'): cfg.versionfile_source = parser.get('versioneer', 'versionfile_source') cfg.versionfile_source = None if parser.has_option('versioneer', 'versionfile_build'): cfg.versionfile_build = parser.get('versioneer', 'versionfile_build') cfg.versionfile_build = None if parser.has_option('versioneer', 'tag_prefix'): cfg.tag_prefix = parser.get('versioneer', 'tag_prefix') cfg.tag_prefix = None if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = '' if parser.has_option('versioneer', 'parentdir_prefix'): cfg.parentdir_prefix = parser.get('versioneer', 'parentdir_prefix') cfg.parentdir_prefix = None if parser.has_option('versioneer', 'verbose'): cfg.verbose = parser.get('versioneer', 'verbose') cfg.verbose = None return cfg
asreview
positive
def cqtCell2Sparse(c, M): bins = M.shape[1] / 2 - 1 spLen = M[bins + 1 - 1] cSparse = np.zeros((bins, spLen)) M = M[:bins + 1 - 1] step = 1 distinctHops = math.log(M[bins + 1 - 1] / M[2], 2) + 1 curNumCoef = M[bins + 1 - 1] for ii in range(distinctHops): idx = [M == curNumCoef] + [false] temp = cell2mat(c[idx].T).T idx += list(range(0, len(cSparse), step)) cSparse[idx] = temp step = step * 2 curNumCoef = curNumCoef / 2 <DeepExtract> index_res = np.where(cSparse > 0) index_list = [index for index in index_res] value_list = [cSparse[index[0]][index[1]] for index in index_res] cSparse = (index_list, value_list) </DeepExtract> return cSparse
def cqtCell2Sparse(c, M): bins = M.shape[1] / 2 - 1 spLen = M[bins + 1 - 1] cSparse = np.zeros((bins, spLen)) M = M[:bins + 1 - 1] step = 1 distinctHops = math.log(M[bins + 1 - 1] / M[2], 2) + 1 curNumCoef = M[bins + 1 - 1] for ii in range(distinctHops): idx = [M == curNumCoef] + [false] temp = cell2mat(c[idx].T).T idx += list(range(0, len(cSparse), step)) cSparse[idx] = temp step = step * 2 curNumCoef = curNumCoef / 2 index_res = np.where(cSparse > 0) index_list = [index for index in index_res] value_list = [cSparse[index[0]][index[1]] for index in index_res] cSparse = (index_list, value_list) return cSparse
2021
positive
@property def log(): <DeepExtract> if not self.exists: raise DataJointError(message or 'Schema `{db}` has not been created.'.format(db=self.database)) </DeepExtract> if self._log is None: self._log = Log(self.connection, self.database) return self._log
@property def log(): if not self.exists: raise DataJointError(message or 'Schema `{db}` has not been created.'.format(db=self.database)) if self._log is None: self._log = Log(self.connection, self.database) return self._log
datajoint-python
positive
def __init__(self, layout, step=5, start=0, end=-1, **kwds): super().__init__(layout, start, end, **kwds) <DeepExtract> main = [i for i in range(30, 255 + 1, step)] self._levels = main + [i for i in reversed(main[0:len(main) - 1])] </DeepExtract> self._level_count = len(self._levels)
def __init__(self, layout, step=5, start=0, end=-1, **kwds): super().__init__(layout, start, end, **kwds) main = [i for i in range(30, 255 + 1, step)] self._levels = main + [i for i in reversed(main[0:len(main) - 1])] self._level_count = len(self._levels)
BiblioPixelAnimations
positive
def setupUi(self, LoginDialog): LoginDialog.setObjectName('LoginDialog') LoginDialog.resize(300, 250) LoginDialog.setMinimumSize(QtCore.QSize(300, 250)) self.verticalLayout = QtWidgets.QVBoxLayout(LoginDialog) self.verticalLayout.setObjectName('verticalLayout') spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding) self.verticalLayout.addItem(spacerItem) self.logo_label = QtWidgets.QLabel(LoginDialog) self.logo_label.setText('') self.logo_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.logo_label.setObjectName('logo_label') self.verticalLayout.addWidget(self.logo_label) self.label_2 = QtWidgets.QLabel(LoginDialog) self.label_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.label_2.setObjectName('label_2') self.verticalLayout.addWidget(self.label_2) self.heading_label = QtWidgets.QLabel(LoginDialog) self.heading_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.heading_label.setObjectName('heading_label') self.verticalLayout.addWidget(self.heading_label) self.email_line_edit = QtWidgets.QLineEdit(LoginDialog) self.email_line_edit.setObjectName('email_line_edit') self.verticalLayout.addWidget(self.email_line_edit) self.password_line_edit = QtWidgets.QLineEdit(LoginDialog) self.password_line_edit.setEchoMode(QtWidgets.QLineEdit.EchoMode.Password) self.password_line_edit.setObjectName('password_line_edit') self.verticalLayout.addWidget(self.password_line_edit) self.sign_in_button = QtWidgets.QPushButton(LoginDialog) self.sign_in_button.setObjectName('sign_in_button') self.verticalLayout.addWidget(self.sign_in_button) self.register_button = QtWidgets.QLabel(LoginDialog) self.register_button.setStyleSheet('color: blue;\ntext-decoration: underline;') self.register_button.setTextFormat(QtCore.Qt.TextFormat.RichText) self.register_button.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.register_button.setObjectName('register_button') self.verticalLayout.addWidget(self.register_button) self.change_password_button = QtWidgets.QLabel(LoginDialog) self.change_password_button.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.change_password_button.setObjectName('change_password_button') self.verticalLayout.addWidget(self.change_password_button) spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding) self.verticalLayout.addItem(spacerItem1) <DeepExtract> _translate = QtCore.QCoreApplication.translate LoginDialog.setWindowTitle(_translate('LoginDialog', 'Sign In - Blobbackup')) self.label_2.setText(_translate('LoginDialog', 'Blobbackup')) self.heading_label.setText(_translate('LoginDialog', 'Sign in to your account.')) self.email_line_edit.setPlaceholderText(_translate('LoginDialog', 'Email')) self.password_line_edit.setPlaceholderText(_translate('LoginDialog', 'Password')) self.sign_in_button.setText(_translate('LoginDialog', 'Sign In')) self.register_button.setText(_translate('LoginDialog', '<html><head/><body><p><a href="https://blobbackup.com"><span style=" color:#0068da;">Don\'t have an account yet?</span></a></p></body></html>')) self.change_password_button.setText(_translate('LoginDialog', '<html><head/><body><p><a href="https://blobbackup.com"><span style=" text-decoration: underline; color:#0068da;">Change your password</span></a></p></body></html>')) </DeepExtract> QtCore.QMetaObject.connectSlotsByName(LoginDialog)
def setupUi(self, LoginDialog): LoginDialog.setObjectName('LoginDialog') LoginDialog.resize(300, 250) LoginDialog.setMinimumSize(QtCore.QSize(300, 250)) self.verticalLayout = QtWidgets.QVBoxLayout(LoginDialog) self.verticalLayout.setObjectName('verticalLayout') spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding) self.verticalLayout.addItem(spacerItem) self.logo_label = QtWidgets.QLabel(LoginDialog) self.logo_label.setText('') self.logo_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.logo_label.setObjectName('logo_label') self.verticalLayout.addWidget(self.logo_label) self.label_2 = QtWidgets.QLabel(LoginDialog) self.label_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.label_2.setObjectName('label_2') self.verticalLayout.addWidget(self.label_2) self.heading_label = QtWidgets.QLabel(LoginDialog) self.heading_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.heading_label.setObjectName('heading_label') self.verticalLayout.addWidget(self.heading_label) self.email_line_edit = QtWidgets.QLineEdit(LoginDialog) self.email_line_edit.setObjectName('email_line_edit') self.verticalLayout.addWidget(self.email_line_edit) self.password_line_edit = QtWidgets.QLineEdit(LoginDialog) self.password_line_edit.setEchoMode(QtWidgets.QLineEdit.EchoMode.Password) self.password_line_edit.setObjectName('password_line_edit') self.verticalLayout.addWidget(self.password_line_edit) self.sign_in_button = QtWidgets.QPushButton(LoginDialog) self.sign_in_button.setObjectName('sign_in_button') self.verticalLayout.addWidget(self.sign_in_button) self.register_button = QtWidgets.QLabel(LoginDialog) self.register_button.setStyleSheet('color: blue;\ntext-decoration: underline;') self.register_button.setTextFormat(QtCore.Qt.TextFormat.RichText) self.register_button.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.register_button.setObjectName('register_button') self.verticalLayout.addWidget(self.register_button) self.change_password_button = QtWidgets.QLabel(LoginDialog) self.change_password_button.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter) self.change_password_button.setObjectName('change_password_button') self.verticalLayout.addWidget(self.change_password_button) spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding) self.verticalLayout.addItem(spacerItem1) _translate = QtCore.QCoreApplication.translate LoginDialog.setWindowTitle(_translate('LoginDialog', 'Sign In - Blobbackup')) self.label_2.setText(_translate('LoginDialog', 'Blobbackup')) self.heading_label.setText(_translate('LoginDialog', 'Sign in to your account.')) self.email_line_edit.setPlaceholderText(_translate('LoginDialog', 'Email')) self.password_line_edit.setPlaceholderText(_translate('LoginDialog', 'Password')) self.sign_in_button.setText(_translate('LoginDialog', 'Sign In')) self.register_button.setText(_translate('LoginDialog', '<html><head/><body><p><a href="https://blobbackup.com"><span style=" color:#0068da;">Don\'t have an account yet?</span></a></p></body></html>')) self.change_password_button.setText(_translate('LoginDialog', '<html><head/><body><p><a href="https://blobbackup.com"><span style=" text-decoration: underline; color:#0068da;">Change your password</span></a></p></body></html>')) QtCore.QMetaObject.connectSlotsByName(LoginDialog)
BlobBackup
positive
@mock.patch.object(time, 'sleep', autospec=True) def test_enabled_sleep_type_after_with_repeat(self, mock_sleep): """When sleep_type==AFTER_WITH_REPEAT, sleep should be between 2 steps().""" <DeepExtract> env = mock.create_autospec(env_interface.AndroidEnvInterface) env.action_spec.return_value = {'action_type': specs.DiscreteArray(num_values=len(action_type.ActionType), name='action_type'), 'touch_position': specs.BoundedArray(shape=(2,), dtype=np.float32, minimum=[0.0, 0.0], maximum=[1.0, 1.0], name='touch_position')} env = env </DeepExtract> wrapper = rate_limit_wrapper.RateLimitWrapper(env, rate=1 / 33.33, sleep_type=rate_limit_wrapper.RateLimitWrapper.SleepType.AFTER_WITH_REPEAT) _ = wrapper.reset() mock_sleep.assert_not_called() @_with_timestamp def _sleep_fn(sleep_time): _sleep_fn.timestamp = time.time() self.assertBetween(sleep_time, 0.0, 33.33) mock_sleep.side_effect = _sleep_fn @_with_timestamp def _step_fn(action): if len(_step_fn.timestamps) % 2 == 0: self.assertEqual(action['action_type'], np.array(action_type.ActionType.LIFT, dtype=np.uint8)) else: self.assertEqual(action['action_type'], np.array(action_type.ActionType.REPEAT, dtype=np.uint8)) _step_fn.timestamps.append(time.time()) return dm_env.transition(reward=1.0, observation=None) _step_fn.timestamps = [] env.step.side_effect = _step_fn timestep = wrapper.step({'action_type': np.array(action_type.ActionType.LIFT, dtype=np.uint8), 'touch_position': np.array([0.123, 0.456])}) self.assertEqual(env.step.call_count, 2) self.assertLen(_step_fn.timestamps, 2) self.assertGreaterEqual(_sleep_fn.timestamp, _step_fn.timestamps[0]) self.assertLessEqual(_sleep_fn.timestamp, _step_fn.timestamps[1]) self.assertEqual(timestep.reward, 2.0)
@mock.patch.object(time, 'sleep', autospec=True) def test_enabled_sleep_type_after_with_repeat(self, mock_sleep): """When sleep_type==AFTER_WITH_REPEAT, sleep should be between 2 steps().""" env = mock.create_autospec(env_interface.AndroidEnvInterface) env.action_spec.return_value = {'action_type': specs.DiscreteArray(num_values=len(action_type.ActionType), name='action_type'), 'touch_position': specs.BoundedArray(shape=(2,), dtype=np.float32, minimum=[0.0, 0.0], maximum=[1.0, 1.0], name='touch_position')} env = env wrapper = rate_limit_wrapper.RateLimitWrapper(env, rate=1 / 33.33, sleep_type=rate_limit_wrapper.RateLimitWrapper.SleepType.AFTER_WITH_REPEAT) _ = wrapper.reset() mock_sleep.assert_not_called() @_with_timestamp def _sleep_fn(sleep_time): _sleep_fn.timestamp = time.time() self.assertBetween(sleep_time, 0.0, 33.33) mock_sleep.side_effect = _sleep_fn @_with_timestamp def _step_fn(action): if len(_step_fn.timestamps) % 2 == 0: self.assertEqual(action['action_type'], np.array(action_type.ActionType.LIFT, dtype=np.uint8)) else: self.assertEqual(action['action_type'], np.array(action_type.ActionType.REPEAT, dtype=np.uint8)) _step_fn.timestamps.append(time.time()) return dm_env.transition(reward=1.0, observation=None) _step_fn.timestamps = [] env.step.side_effect = _step_fn timestep = wrapper.step({'action_type': np.array(action_type.ActionType.LIFT, dtype=np.uint8), 'touch_position': np.array([0.123, 0.456])}) self.assertEqual(env.step.call_count, 2) self.assertLen(_step_fn.timestamps, 2) self.assertGreaterEqual(_sleep_fn.timestamp, _step_fn.timestamps[0]) self.assertLessEqual(_sleep_fn.timestamp, _step_fn.timestamps[1]) self.assertEqual(timestep.reward, 2.0)
android_env
positive
def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in '({[<' or Match('<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) <DeepExtract> for i in xrange(pos, len(line)): char = line[i] if char in '([{': [].append(char) elif char == '<': if i > 0 and line[i - 1] == '<': if [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) elif i > 0 and Search('\\boperator\\s*$', line[0:i]): continue else: [].append('<') elif char in ')]}': while [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) if [][-1] == '(' and char == ')' or ([][-1] == '[' and char == ']') or ([][-1] == '{' and char == '}'): [].pop() if not []: (end_pos, []) = (i + 1, None) else: (end_pos, []) = (-1, None) elif char == '>': if i > 0 and (line[i - 1] == '-' or Search('\\boperator\\s*$', line[0:i - 1])): continue if []: if [][-1] == '<': [].pop() if not []: (end_pos, []) = (i + 1, None) elif char == ';': while [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) (end_pos, []) = (-1, []) </DeepExtract> if end_pos > -1: return (line, linenum, end_pos) while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] <DeepExtract> for i in xrange(0, len(line)): char = line[i] if char in '([{': stack.append(char) elif char == '<': if i > 0 and line[i - 1] == '<': if stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) elif i > 0 and Search('\\boperator\\s*$', line[0:i]): continue else: stack.append('<') elif char in ')]}': while stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) if stack[-1] == '(' and char == ')' or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}'): stack.pop() if not stack: (end_pos, stack) = (i + 1, None) else: (end_pos, stack) = (-1, None) elif char == '>': if i > 0 and (line[i - 1] == '-' or Search('\\boperator\\s*$', line[0:i - 1])): continue if stack: if stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (i + 1, None) elif char == ';': while stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) (end_pos, stack) = (-1, stack) </DeepExtract> if end_pos > -1: return (line, linenum, end_pos) return (line, clean_lines.NumLines(), -1)
def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in '({[<' or Match('<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) for i in xrange(pos, len(line)): char = line[i] if char in '([{': [].append(char) elif char == '<': if i > 0 and line[i - 1] == '<': if [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) elif i > 0 and Search('\\boperator\\s*$', line[0:i]): continue else: [].append('<') elif char in ')]}': while [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) if [][-1] == '(' and char == ')' or ([][-1] == '[' and char == ']') or ([][-1] == '{' and char == '}'): [].pop() if not []: (end_pos, []) = (i + 1, None) else: (end_pos, []) = (-1, None) elif char == '>': if i > 0 and (line[i - 1] == '-' or Search('\\boperator\\s*$', line[0:i - 1])): continue if []: if [][-1] == '<': [].pop() if not []: (end_pos, []) = (i + 1, None) elif char == ';': while [] and [][-1] == '<': [].pop() if not []: (end_pos, []) = (-1, None) (end_pos, []) = (-1, []) if end_pos > -1: return (line, linenum, end_pos) while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] for i in xrange(0, len(line)): char = line[i] if char in '([{': stack.append(char) elif char == '<': if i > 0 and line[i - 1] == '<': if stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) elif i > 0 and Search('\\boperator\\s*$', line[0:i]): continue else: stack.append('<') elif char in ')]}': while stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) if stack[-1] == '(' and char == ')' or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}'): stack.pop() if not stack: (end_pos, stack) = (i + 1, None) else: (end_pos, stack) = (-1, None) elif char == '>': if i > 0 and (line[i - 1] == '-' or Search('\\boperator\\s*$', line[0:i - 1])): continue if stack: if stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (i + 1, None) elif char == ';': while stack and stack[-1] == '<': stack.pop() if not stack: (end_pos, stack) = (-1, None) (end_pos, stack) = (-1, stack) if end_pos > -1: return (line, linenum, end_pos) return (line, clean_lines.NumLines(), -1)
cowry
positive
def get_tool_env(tmp_path: Path, flag_args: List[str], inputs_file: Optional[str]=None, replacement_env: Optional[Mapping[str, str]]=None, extra_env: Optional[Mapping[str, str]]=None, monkeypatch: Optional[pytest.MonkeyPatch]=None, runtime_env_accepts_null: Optional[bool]=None) -> Dict[str, str]: """Get the env vars for a tool's invocation.""" if runtime_env_accepts_null is None: <DeepExtract> global _env_accepts_null if _env_accepts_null is None: result = subprocess.run(['env', '-0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') _env_accepts_null = result.returncode == 0 runtime_env_accepts_null = _env_accepts_null </DeepExtract> args = flag_args.copy() if runtime_env_accepts_null: args.append(get_data('tests/env3.cwl')) else: args.append(get_data('tests/env4.cwl')) if inputs_file is not None: args.append(inputs_file) with working_directory(tmp_path): <DeepExtract> stdout = io.StringIO() stderr = io.StringIO() if replacement_env is not None: assert monkeypatch is not None monkeypatch.setattr(os, 'environ', replacement_env) if extra_env is not None: assert monkeypatch is not None for (k, v) in extra_env.items(): monkeypatch.setenv(k, v) try: rc = main(argsl=args, stdout=stdout, stderr=stderr) except SystemExit as e: if isinstance(e.code, int): rc = e.code else: rc = sys.maxsize (rc, stdout, _) = (rc, stdout.getvalue(), stderr.getvalue()) </DeepExtract> assert rc == 0 output = json.loads(stdout) with open(output['env']['path']) as _: return deserialize_env(_.read())
def get_tool_env(tmp_path: Path, flag_args: List[str], inputs_file: Optional[str]=None, replacement_env: Optional[Mapping[str, str]]=None, extra_env: Optional[Mapping[str, str]]=None, monkeypatch: Optional[pytest.MonkeyPatch]=None, runtime_env_accepts_null: Optional[bool]=None) -> Dict[str, str]: """Get the env vars for a tool's invocation.""" if runtime_env_accepts_null is None: global _env_accepts_null if _env_accepts_null is None: result = subprocess.run(['env', '-0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') _env_accepts_null = result.returncode == 0 runtime_env_accepts_null = _env_accepts_null args = flag_args.copy() if runtime_env_accepts_null: args.append(get_data('tests/env3.cwl')) else: args.append(get_data('tests/env4.cwl')) if inputs_file is not None: args.append(inputs_file) with working_directory(tmp_path): stdout = io.StringIO() stderr = io.StringIO() if replacement_env is not None: assert monkeypatch is not None monkeypatch.setattr(os, 'environ', replacement_env) if extra_env is not None: assert monkeypatch is not None for (k, v) in extra_env.items(): monkeypatch.setenv(k, v) try: rc = main(argsl=args, stdout=stdout, stderr=stderr) except SystemExit as e: if isinstance(e.code, int): rc = e.code else: rc = sys.maxsize (rc, stdout, _) = (rc, stdout.getvalue(), stderr.getvalue()) assert rc == 0 output = json.loads(stdout) with open(output['env']['path']) as _: return deserialize_env(_.read())
cwltool
positive
def make_globs(logical_names, **kwargs): <DeepExtract> rtn = {'libnames': DEFAULT_LIBNAMES, 'non_recursive_libnames': tuple(), 'ocaml_dirnames': tuple(), 'log': DEFAULT_LOG, 'coqc': 'coqc', 'coq_makefile': 'coq_makefile', 'coqdep': 'coqdep', 'walk_tree': True, 'coqc_args': tuple()} rtn.update(kwargs) if for_makefile: if 'make_coqc' in rtn.keys(): rtn['coqc'] = rtn['make_coqc'] if 'passing_make_coqc' in rtn.keys(): rtn['passing_coqc'] = rtn['passing_make_coqc'] kwargs = rtn </DeepExtract> existing_logical_names = [i for i in logical_names if os.path.isfile(filename_of_lib(i, ext='.v', **kwargs))] if len(existing_logical_names) == 0: return filenames_vo_v_glob = [(filename_of_lib(i, ext='.vo', **kwargs), filename_of_lib(i, ext='.v', **kwargs), filename_of_lib(i, ext='.glob', **kwargs)) for i in existing_logical_names] filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.isfile(glob_name) and os.path.getmtime(glob_name) > os.path.getmtime(v_name))] for (vo_name, v_name, glob_name) in filenames_vo_v_glob: if os.path.isfile(glob_name) and (not os.path.getmtime(glob_name) > os.path.getmtime(v_name)): if os.path.getmtime(v_name) > time.time(): kwargs['log']('WARNING: The file %s comes from the future! (%d > %d)' % (v_name, os.path.getmtime(v_name), time.time()), level=LOG_ALWAYS) <DeepExtract> abs_filename = os.path.abspath(glob_name) cwd = os.path.abspath('.') common = os.path.commonprefix([cwd, abs_filename]) if common != cwd: kwargs['log']('WARNING: Not removing %s (%s) because it resides in a parent (%s) of the current directory (%s)' % (glob_name, abs_filename, common, cwd)) return os.remove(glob_name) </DeepExtract> if os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name): <DeepExtract> kwargs = safe_kwargs(fill_kwargs(kwargs)) coqc_prog = get_maybe_passing_arg(kwargs, 'coqc') cmds = [coqc_prog, '-q'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'libnames'): cmds += ['-R', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'): cmds += ['-Q', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'): cmds += ['-I', dirname] cmds += list(get_maybe_passing_arg(kwargs, 'coqc_args')) (v_file_root, ext) = os.path.splitext(fix_path(v_name)) o_file = os.path.join(tempfile.gettempdir(), os.path.basename(v_file_root) + '.vo') if get_coq_accepts_o(coqc_prog, **kwargs): cmds += ['-o', o_file] else: kwargs['log']("WARNING: Clobbering '%s' because coqc does not support -o" % o_file, level=LOG_ALWAYS) cmds += ['-dump-glob', v_file_root + '.glob', v_file_root + ext] kwargs['log'](' '.join(cmds)) try: p = subprocess.Popen(cmds, stdout=subprocess.PIPE) return p.communicate() finally: if os.path.exists(o_file): os.remove(o_file) </DeepExtract> filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name))] filenames_v = [v_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob] filenames_glob = [glob_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob] if len(filenames_vo_v_glob) == 0: return extra_filenames_v = get_all_v_files('.', filenames_v) if kwargs['walk_tree'] else [] <DeepExtract> kwargs = safe_kwargs(fill_kwargs(kwargs, for_makefile=True)) f = tempfile.NamedTemporaryFile(suffix='.coq', prefix='Makefile', dir='.', delete=False) mkfile = os.path.basename(f.name) f.close() cmds = [kwargs['coq_makefile'], 'COQC', '=', get_maybe_passing_arg(kwargs, 'coqc'), 'COQDEP', '=', kwargs['coqdep'], '-o', mkfile] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'libnames'): cmds += ['-R', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'): cmds += ['-Q', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'): cmds += ['-I', dirname] coq_makefile_help = get_coqc_help(kwargs['coq_makefile'], **kwargs) (grouped_args, unrecognized_args) = group_coq_args_split_recognized(get_maybe_passing_arg(kwargs, 'coqc_args'), coq_makefile_help, is_coq_makefile=True) for args in grouped_args: cmds.extend(args) if unrecognized_args: if coq_makefile_supports_arg(coq_makefile_help): skip_next = False for arg in unrecognized_args: if arg in ('-top', '-topfile'): skip_next = True elif skip_next: skip_next = False elif arg not in BAD_ARGS_FOR_MAKE_GLOB: cmds += ['-arg', shlex_quote(arg)] else: kwargs['log']('WARNING: Unrecognized arguments to coq_makefile: %s' % repr(unrecognized_args)) cmds += list(map(fix_path, tuple(sorted(list(filenames_v) + list(extra_filenames_v))))) kwargs['log'](' '.join(cmds)) try: p_make_makefile = subprocess.Popen(cmds, stdout=subprocess.PIPE) (stdout, stderr) = p_make_makefile.communicate() except OSError as e: error('When attempting to run coq_makefile:') error(repr(e)) error('Failed to run coq_makefile using command line:') error(' '.join(cmds)) error('Perhaps you forgot to add COQBIN to your PATH?') error('Try running coqc on your files to get .glob files, to work around this.') sys.exit(1) keep_error_fragment = [] if get_keep_error_reversed(mkfile, **kwargs) else ['KEEP_ERROR=1'] make_cmds = ['make', '-k', '-f', mkfile] + keep_error_fragment + filenames_glob kwargs['log'](' '.join(make_cmds)) try: p_make = subprocess.Popen(make_cmds, stdin=subprocess.PIPE, stdout=sys.stderr) (stdout_make, stderr_make) = p_make.communicate() finally: for filename in (mkfile, mkfile + '.conf', mkfile + '.d', '.%s.d' % mkfile, '.coqdeps.d'): if os.path.exists(filename): os.remove(filename) </DeepExtract>
def make_globs(logical_names, **kwargs): rtn = {'libnames': DEFAULT_LIBNAMES, 'non_recursive_libnames': tuple(), 'ocaml_dirnames': tuple(), 'log': DEFAULT_LOG, 'coqc': 'coqc', 'coq_makefile': 'coq_makefile', 'coqdep': 'coqdep', 'walk_tree': True, 'coqc_args': tuple()} rtn.update(kwargs) if for_makefile: if 'make_coqc' in rtn.keys(): rtn['coqc'] = rtn['make_coqc'] if 'passing_make_coqc' in rtn.keys(): rtn['passing_coqc'] = rtn['passing_make_coqc'] kwargs = rtn existing_logical_names = [i for i in logical_names if os.path.isfile(filename_of_lib(i, ext='.v', **kwargs))] if len(existing_logical_names) == 0: return filenames_vo_v_glob = [(filename_of_lib(i, ext='.vo', **kwargs), filename_of_lib(i, ext='.v', **kwargs), filename_of_lib(i, ext='.glob', **kwargs)) for i in existing_logical_names] filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.isfile(glob_name) and os.path.getmtime(glob_name) > os.path.getmtime(v_name))] for (vo_name, v_name, glob_name) in filenames_vo_v_glob: if os.path.isfile(glob_name) and (not os.path.getmtime(glob_name) > os.path.getmtime(v_name)): if os.path.getmtime(v_name) > time.time(): kwargs['log']('WARNING: The file %s comes from the future! (%d > %d)' % (v_name, os.path.getmtime(v_name), time.time()), level=LOG_ALWAYS) abs_filename = os.path.abspath(glob_name) cwd = os.path.abspath('.') common = os.path.commonprefix([cwd, abs_filename]) if common != cwd: kwargs['log']('WARNING: Not removing %s (%s) because it resides in a parent (%s) of the current directory (%s)' % (glob_name, abs_filename, common, cwd)) return os.remove(glob_name) if os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name): kwargs = safe_kwargs(fill_kwargs(kwargs)) coqc_prog = get_maybe_passing_arg(kwargs, 'coqc') cmds = [coqc_prog, '-q'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'libnames'): cmds += ['-R', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'): cmds += ['-Q', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'): cmds += ['-I', dirname] cmds += list(get_maybe_passing_arg(kwargs, 'coqc_args')) (v_file_root, ext) = os.path.splitext(fix_path(v_name)) o_file = os.path.join(tempfile.gettempdir(), os.path.basename(v_file_root) + '.vo') if get_coq_accepts_o(coqc_prog, **kwargs): cmds += ['-o', o_file] else: kwargs['log']("WARNING: Clobbering '%s' because coqc does not support -o" % o_file, level=LOG_ALWAYS) cmds += ['-dump-glob', v_file_root + '.glob', v_file_root + ext] kwargs['log'](' '.join(cmds)) try: p = subprocess.Popen(cmds, stdout=subprocess.PIPE) return p.communicate() finally: if os.path.exists(o_file): os.remove(o_file) filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name))] filenames_v = [v_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob] filenames_glob = [glob_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob] if len(filenames_vo_v_glob) == 0: return extra_filenames_v = get_all_v_files('.', filenames_v) if kwargs['walk_tree'] else [] kwargs = safe_kwargs(fill_kwargs(kwargs, for_makefile=True)) f = tempfile.NamedTemporaryFile(suffix='.coq', prefix='Makefile', dir='.', delete=False) mkfile = os.path.basename(f.name) f.close() cmds = [kwargs['coq_makefile'], 'COQC', '=', get_maybe_passing_arg(kwargs, 'coqc'), 'COQDEP', '=', kwargs['coqdep'], '-o', mkfile] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'libnames'): cmds += ['-R', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for (physical_name, logical_name) in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'): cmds += ['-Q', physical_name, logical_name if logical_name not in ('', "''", '""') else '""'] for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'): cmds += ['-I', dirname] coq_makefile_help = get_coqc_help(kwargs['coq_makefile'], **kwargs) (grouped_args, unrecognized_args) = group_coq_args_split_recognized(get_maybe_passing_arg(kwargs, 'coqc_args'), coq_makefile_help, is_coq_makefile=True) for args in grouped_args: cmds.extend(args) if unrecognized_args: if coq_makefile_supports_arg(coq_makefile_help): skip_next = False for arg in unrecognized_args: if arg in ('-top', '-topfile'): skip_next = True elif skip_next: skip_next = False elif arg not in BAD_ARGS_FOR_MAKE_GLOB: cmds += ['-arg', shlex_quote(arg)] else: kwargs['log']('WARNING: Unrecognized arguments to coq_makefile: %s' % repr(unrecognized_args)) cmds += list(map(fix_path, tuple(sorted(list(filenames_v) + list(extra_filenames_v))))) kwargs['log'](' '.join(cmds)) try: p_make_makefile = subprocess.Popen(cmds, stdout=subprocess.PIPE) (stdout, stderr) = p_make_makefile.communicate() except OSError as e: error('When attempting to run coq_makefile:') error(repr(e)) error('Failed to run coq_makefile using command line:') error(' '.join(cmds)) error('Perhaps you forgot to add COQBIN to your PATH?') error('Try running coqc on your files to get .glob files, to work around this.') sys.exit(1) keep_error_fragment = [] if get_keep_error_reversed(mkfile, **kwargs) else ['KEEP_ERROR=1'] make_cmds = ['make', '-k', '-f', mkfile] + keep_error_fragment + filenames_glob kwargs['log'](' '.join(make_cmds)) try: p_make = subprocess.Popen(make_cmds, stdin=subprocess.PIPE, stdout=sys.stderr) (stdout_make, stderr_make) = p_make.communicate() finally: for filename in (mkfile, mkfile + '.conf', mkfile + '.d', '.%s.d' % mkfile, '.coqdeps.d'): if os.path.exists(filename): os.remove(filename) </DeepExtract>
coq-tools
positive
def encode(self, chunk: Chunk, palette: AnyNDArray, max_world_version: VersionIdentifierTuple, bounds: Tuple[int, int]) -> Dict[bytes, Optional[bytes]]: chunk_data = chunk.misc.get('bedrock_chunk_data', {}) if isinstance(chunk_data, ChunkData): pass elif isinstance(chunk_data, dict): chunk_data = ChunkData({k: v for (k, v) in chunk_data.items() if isinstance(k, bytes) and isinstance(v, bytes)}) else: chunk_data = ChunkData() chunk_data: ChunkData if self.chunk_version is not None: chunk_data[b'v' if self.chunk_version <= 20 else b','] = bytes([self.chunk_version]) <DeepExtract> raise NotImplementedError </DeepExtract> min_y = bounds[0] // 16 for (cy, sub_chunk) in terrain.items(): chunk_data[b'/' + self._get_sub_chunk_storage_byte(cy, min_y)] = sub_chunk if self._features['finalised_state'] == 'int0-2': chunk_data[b'6'] = struct.pack('<i', chunk.status.as_type(StatusFormats.Bedrock)) if self._features['data_2d'] == 'height512|biome256': d2d: List[bytes] = [self._encode_height(chunk)] chunk.biomes.convert_to_2d() d2d.append(chunk.biomes.astype('uint8').T.tobytes()) chunk_data[b'-'] = b''.join(d2d) if b'+' in chunk_data: chunk_data[b'+'] = None elif self._features['data_2d'] == 'height512|biome4096': <DeepExtract> d2d: List[bytes] = [self._encode_height(chunk)] chunk.biomes.convert_to_3d() highest = next((cy for cy in range(bounds[1] >> 4, bounds[0] >> 4 - 1, -1) if cy in chunk.biomes), None) if highest is None: chunk.biomes.create_section(bounds[0] >> 4) else: for cy in range(highest - 1, bounds[0] >> 4 - 1, -1): if cy not in chunk.biomes: chunk.biomes.add_section(cy, numpy.repeat(chunk.biomes.get_section(cy + 1)[:, :1, :], 4, 1)) for cy in range(bounds[0] >> 4, bounds[0] >> 4 + 25): if cy in chunk.biomes: arr = chunk.biomes.get_section(cy) (palette, arr_uniq) = numpy.unique(arr, return_inverse=True) if len(palette) == 1: d2d.append(b'\x01') else: d2d.append(self._encode_packed_array(arr_uniq.reshape(arr.shape)[_scale_grid])) d2d.append(struct.pack('<I', len(palette))) d2d.append(palette.astype('<i4').tobytes()) else: d2d.append(b'\xff') chunk_data[b'+'] = b''.join(d2d) </DeepExtract> if b'-' in chunk_data: chunk_data[b'-'] = None if self._features['block_entities'] == '31list': <DeepExtract> entities_out = [] for entity in chunk.block_entities: nbt = self._encode_block_entity(entity, self._features['block_entity_format'], self._features['block_entity_coord_format']) if nbt is not None: entities_out.append(nbt) block_entities_out = entities_out </DeepExtract> if block_entities_out: <DeepExtract> chunk_data[b'1'] = b''.join([nbt.save_to(compressed=False, little_endian=True, string_encoder=utf8_escape_encoder) for nbt in block_entities_out if isinstance(nbt, NamedTag)]) </DeepExtract> else: chunk_data[b'1'] = None if self._features['entities'] == '32list': def save_entities(entities_out): if entities_out: <DeepExtract> chunk_data[b'2'] = b''.join([nbt.save_to(compressed=False, little_endian=True, string_encoder=utf8_escape_encoder) for nbt in entities_out if isinstance(nbt, NamedTag)]) </DeepExtract> else: chunk_data[b'2'] = None if amulet.entity_support: <DeepExtract> if self._encode_entity_list(chunk.entities): chunk_data[b'2'] = self._pack_nbt_list(self._encode_entity_list(chunk.entities)) else: chunk_data[b'2'] = None </DeepExtract> else: try: if chunk._native_version[0] == 'bedrock': <DeepExtract> if self._encode_entity_list(chunk._native_entities): chunk_data[b'2'] = self._pack_nbt_list(self._encode_entity_list(chunk._native_entities)) else: chunk_data[b'2'] = None </DeepExtract> except: pass elif self._features['entities'] == 'actor': chunk_data.entity_actor.clear() if amulet.entity_support: chunk_data.entity_actor.extend(self._encode_entity_list(chunk.entities)) else: try: if chunk._native_version[0] == 'bedrock': chunk_data.entity_actor.extend(self._encode_entity_list(chunk._native_entities)) except: pass return chunk_data
def encode(self, chunk: Chunk, palette: AnyNDArray, max_world_version: VersionIdentifierTuple, bounds: Tuple[int, int]) -> Dict[bytes, Optional[bytes]]: chunk_data = chunk.misc.get('bedrock_chunk_data', {}) if isinstance(chunk_data, ChunkData): pass elif isinstance(chunk_data, dict): chunk_data = ChunkData({k: v for (k, v) in chunk_data.items() if isinstance(k, bytes) and isinstance(v, bytes)}) else: chunk_data = ChunkData() chunk_data: ChunkData if self.chunk_version is not None: chunk_data[b'v' if self.chunk_version <= 20 else b','] = bytes([self.chunk_version]) raise NotImplementedError min_y = bounds[0] // 16 for (cy, sub_chunk) in terrain.items(): chunk_data[b'/' + self._get_sub_chunk_storage_byte(cy, min_y)] = sub_chunk if self._features['finalised_state'] == 'int0-2': chunk_data[b'6'] = struct.pack('<i', chunk.status.as_type(StatusFormats.Bedrock)) if self._features['data_2d'] == 'height512|biome256': d2d: List[bytes] = [self._encode_height(chunk)] chunk.biomes.convert_to_2d() d2d.append(chunk.biomes.astype('uint8').T.tobytes()) chunk_data[b'-'] = b''.join(d2d) if b'+' in chunk_data: chunk_data[b'+'] = None elif self._features['data_2d'] == 'height512|biome4096': d2d: List[bytes] = [self._encode_height(chunk)] chunk.biomes.convert_to_3d() highest = next((cy for cy in range(bounds[1] >> 4, bounds[0] >> 4 - 1, -1) if cy in chunk.biomes), None) if highest is None: chunk.biomes.create_section(bounds[0] >> 4) else: for cy in range(highest - 1, bounds[0] >> 4 - 1, -1): if cy not in chunk.biomes: chunk.biomes.add_section(cy, numpy.repeat(chunk.biomes.get_section(cy + 1)[:, :1, :], 4, 1)) for cy in range(bounds[0] >> 4, bounds[0] >> 4 + 25): if cy in chunk.biomes: arr = chunk.biomes.get_section(cy) (palette, arr_uniq) = numpy.unique(arr, return_inverse=True) if len(palette) == 1: d2d.append(b'\x01') else: d2d.append(self._encode_packed_array(arr_uniq.reshape(arr.shape)[_scale_grid])) d2d.append(struct.pack('<I', len(palette))) d2d.append(palette.astype('<i4').tobytes()) else: d2d.append(b'\xff') chunk_data[b'+'] = b''.join(d2d) if b'-' in chunk_data: chunk_data[b'-'] = None if self._features['block_entities'] == '31list': entities_out = [] for entity in chunk.block_entities: nbt = self._encode_block_entity(entity, self._features['block_entity_format'], self._features['block_entity_coord_format']) if nbt is not None: entities_out.append(nbt) block_entities_out = entities_out if block_entities_out: chunk_data[b'1'] = b''.join([nbt.save_to(compressed=False, little_endian=True, string_encoder=utf8_escape_encoder) for nbt in block_entities_out if isinstance(nbt, NamedTag)]) else: chunk_data[b'1'] = None if self._features['entities'] == '32list': def save_entities(entities_out): if entities_out: chunk_data[b'2'] = b''.join([nbt.save_to(compressed=False, little_endian=True, string_encoder=utf8_escape_encoder) for nbt in entities_out if isinstance(nbt, NamedTag)]) else: chunk_data[b'2'] = None if amulet.entity_support: if self._encode_entity_list(chunk.entities): chunk_data[b'2'] = self._pack_nbt_list(self._encode_entity_list(chunk.entities)) else: chunk_data[b'2'] = None else: try: if chunk._native_version[0] == 'bedrock': if self._encode_entity_list(chunk._native_entities): chunk_data[b'2'] = self._pack_nbt_list(self._encode_entity_list(chunk._native_entities)) else: chunk_data[b'2'] = None except: pass elif self._features['entities'] == 'actor': chunk_data.entity_actor.clear() if amulet.entity_support: chunk_data.entity_actor.extend(self._encode_entity_list(chunk.entities)) else: try: if chunk._native_version[0] == 'bedrock': chunk_data.entity_actor.extend(self._encode_entity_list(chunk._native_entities)) except: pass return chunk_data
Amulet-Core
positive
def patch_replication_callback(data_parallel): """ Monkey-patch an existing `DataParallel` object. Add the replication callback. Useful when you have customized `DataParallel` implementation. Examples: > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) > patch_replication_callback(sync_bn) # this is equivalent to > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) """ assert isinstance(data_parallel, DataParallel) old_replicate = data_parallel.replicate @functools.wraps(old_replicate) def new_replicate(module, device_ids): modules = old_replicate(module, device_ids) <DeepExtract> master_copy = modules[0] nr_modules = len(list(master_copy.modules())) ctxs = [CallbackContext() for _ in range(nr_modules)] for (i, module) in enumerate(modules): for (j, m) in enumerate(module.modules()): if hasattr(m, '__data_parallel_replicate__'): m.__data_parallel_replicate__(ctxs[j], i) </DeepExtract> return modules data_parallel.replicate = new_replicate
def patch_replication_callback(data_parallel): """ Monkey-patch an existing `DataParallel` object. Add the replication callback. Useful when you have customized `DataParallel` implementation. Examples: > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) > patch_replication_callback(sync_bn) # this is equivalent to > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) """ assert isinstance(data_parallel, DataParallel) old_replicate = data_parallel.replicate @functools.wraps(old_replicate) def new_replicate(module, device_ids): modules = old_replicate(module, device_ids) master_copy = modules[0] nr_modules = len(list(master_copy.modules())) ctxs = [CallbackContext() for _ in range(nr_modules)] for (i, module) in enumerate(modules): for (j, m) in enumerate(module.modules()): if hasattr(m, '__data_parallel_replicate__'): m.__data_parallel_replicate__(ctxs[j], i) return modules data_parallel.replicate = new_replicate
CAG_UDA
positive
def process_matrix(self, arr: np.ndarray) -> np.ndarray: properties = self.properties transform = properties['transform'] normalize = properties['normalize'] gaussian_sigma = properties['gaussian_sigma'] process_func = properties['process_func'] if transform != 'no': <DeepExtract> if transform == 'log10': arr = np.log10(arr) elif transform == 'log2': arr = np.log2(arr) elif transform == 'log': arr = np.log(arr) arr = arr </DeepExtract> if normalize != 'no': <DeepExtract> if cis: arr = arr.normalize_cis(normalize, method) else: arr = arr.normalize_trans(normalize, method) </DeepExtract> if gaussian_sigma != 'no': <DeepExtract> arr = gaussian_filter(arr, gaussian_sigma) arr = arr </DeepExtract> if process_func != 'no': func = process_func try: if callable(func): arr = func(arr) elif isinstance(func, str): func = eval(func) arr = func(arr) else: raise ValueError('process_func') except Exception as e: log.error(str(e)) raise ValueError('process_func should a one argument function receive a matrix return a processed matrix.') return arr
def process_matrix(self, arr: np.ndarray) -> np.ndarray: properties = self.properties transform = properties['transform'] normalize = properties['normalize'] gaussian_sigma = properties['gaussian_sigma'] process_func = properties['process_func'] if transform != 'no': if transform == 'log10': arr = np.log10(arr) elif transform == 'log2': arr = np.log2(arr) elif transform == 'log': arr = np.log(arr) arr = arr if normalize != 'no': if cis: arr = arr.normalize_cis(normalize, method) else: arr = arr.normalize_trans(normalize, method) if gaussian_sigma != 'no': arr = gaussian_filter(arr, gaussian_sigma) arr = arr if process_func != 'no': func = process_func try: if callable(func): arr = func(arr) elif isinstance(func, str): func = eval(func) arr = func(arr) else: raise ValueError('process_func') except Exception as e: log.error(str(e)) raise ValueError('process_func should a one argument function receive a matrix return a processed matrix.') return arr
CoolBox
positive
@require_admin() def main(request): if request.method == 'GET': cats = session.query(Category).all() return display_plain_template('admin/new_page', cats=cats) if 'submit' in request.form: <DeepExtract> page = Page(request.form['title'], request.form['content'], request.form['excerpt'], category_id=request.form['cat_id']) page.status = 'draft' page.request_path = request.form['request_path'] draft_tags = request.form['tags'].strip() if draft_tags: page.set_meta('draft_tags', draft_tags) page.save() page = page </DeepExtract> return redirect('/admin/edit_page/%d' % page.page_id)
@require_admin() def main(request): if request.method == 'GET': cats = session.query(Category).all() return display_plain_template('admin/new_page', cats=cats) if 'submit' in request.form: page = Page(request.form['title'], request.form['content'], request.form['excerpt'], category_id=request.form['cat_id']) page.status = 'draft' page.request_path = request.form['request_path'] draft_tags = request.form['tags'].strip() if draft_tags: page.set_meta('draft_tags', draft_tags) page.save() page = page return redirect('/admin/edit_page/%d' % page.page_id)
catonmat.net
positive
def HasCircle(self): """ check whether the dependency graph has circles Returns : True : dependency graph has circles False : dependency graph doesn't has circles """ root_node = self._root self._checked_nodes.append(root_node.module.module_cvspath) <DeepExtract> for kid in root_node.Children(): if kid.module.module_cvspath in self._checked_nodes: self._checked_nodes.append(kid.module.module_cvspath) ret = True self._checked_nodes.append(kid.module.module_cvspath) ret = self._has_circle(kid) if ret: ret = True self._checked_nodes.pop() ret = False </DeepExtract> msg = '' if ret: msg = self._checked_nodes[0] for i in range(1, len(self._checked_nodes)): msg = msg + ' -> ' + self._checked_nodes[i] return (ret, msg)
def HasCircle(self): """ check whether the dependency graph has circles Returns : True : dependency graph has circles False : dependency graph doesn't has circles """ root_node = self._root self._checked_nodes.append(root_node.module.module_cvspath) for kid in root_node.Children(): if kid.module.module_cvspath in self._checked_nodes: self._checked_nodes.append(kid.module.module_cvspath) ret = True self._checked_nodes.append(kid.module.module_cvspath) ret = self._has_circle(kid) if ret: ret = True self._checked_nodes.pop() ret = False msg = '' if ret: msg = self._checked_nodes[0] for i in range(1, len(self._checked_nodes)): msg = msg + ' -> ' + self._checked_nodes[i] return (ret, msg)
broc
positive
def forward(self, to_predict: torch.Tensor, decomposition_lengths: Tuple[List]) -> torch.Tensor: """ Callable method to aggregate the byte-pair embeddings from decomposed words. Args: to_predict (~torch.Tensor): The address to extract the embedding on. decomposition_lengths (tuple[list]) : The decomposition lengths of the address components. Return: The embedded address vector tensor. """ device = to_predict.device batch_size = to_predict.size(0) embeddings = torch.zeros(to_predict.size(1), to_predict.size(0), int(to_predict.size(3) / self.maxpool_kernel_size), device=device) to_predict = to_predict.transpose(0, 1).float() for i in range(to_predict.size(0)): lengths = [] for decomposition_length in decomposition_lengths: lengths.append(decomposition_length[i]) packed_sequence = pack_padded_sequence(to_predict[i], lengths, batch_first=True, enforce_sorted=False) (packed_output, _) = self.model(packed_sequence) (padded_output, padded_output_lengths) = pad_packed_sequence(packed_output, batch_first=True) word_context = torch.zeros(padded_output.size(0), padded_output.size(2), device=device) for j in range(batch_size): word_context[j] = padded_output[j, padded_output_lengths[j] - 1, :] projection_output = self.projection_layer(word_context) if self.maxpooling_layer is not None: <DeepExtract> pooled_output = self.maxpooling_layer(projection_output.view(1, projection_output.size(0), projection_output.size(1))) projection_output = pooled_output.view(pooled_output.size(1), pooled_output.size(2)) projection_output = projection_output </DeepExtract> embeddings[i] = projection_output return embeddings.transpose(0, 1)
def forward(self, to_predict: torch.Tensor, decomposition_lengths: Tuple[List]) -> torch.Tensor: """ Callable method to aggregate the byte-pair embeddings from decomposed words. Args: to_predict (~torch.Tensor): The address to extract the embedding on. decomposition_lengths (tuple[list]) : The decomposition lengths of the address components. Return: The embedded address vector tensor. """ device = to_predict.device batch_size = to_predict.size(0) embeddings = torch.zeros(to_predict.size(1), to_predict.size(0), int(to_predict.size(3) / self.maxpool_kernel_size), device=device) to_predict = to_predict.transpose(0, 1).float() for i in range(to_predict.size(0)): lengths = [] for decomposition_length in decomposition_lengths: lengths.append(decomposition_length[i]) packed_sequence = pack_padded_sequence(to_predict[i], lengths, batch_first=True, enforce_sorted=False) (packed_output, _) = self.model(packed_sequence) (padded_output, padded_output_lengths) = pad_packed_sequence(packed_output, batch_first=True) word_context = torch.zeros(padded_output.size(0), padded_output.size(2), device=device) for j in range(batch_size): word_context[j] = padded_output[j, padded_output_lengths[j] - 1, :] projection_output = self.projection_layer(word_context) if self.maxpooling_layer is not None: pooled_output = self.maxpooling_layer(projection_output.view(1, projection_output.size(0), projection_output.size(1))) projection_output = pooled_output.view(pooled_output.size(1), pooled_output.size(2)) projection_output = projection_output embeddings[i] = projection_output return embeddings.transpose(0, 1)
deepparse
positive
def _evict_or_delete_pods(self, node_name: str, pods: List[KubernetesPod], disable_eviction: bool) -> bool: all_done = True action_name = 'deleted' if disable_eviction else 'evicted' logger.info(f'{len(pods)} pods being {action_name} on {node_name}') for pod in pods: try: if disable_eviction: <DeepExtract> self._core_api.delete_namespaced_pod(name=pod.metadata.name, namespace=pod.metadata.namespace, propagation_policy=PROPAGATION_POLICY) </DeepExtract> else: <DeepExtract> self._core_api.create_namespaced_pod_eviction(name=pod.metadata.name, namespace=pod.metadata.namespace, body=V1beta1Eviction(metadata=V1ObjectMeta(name=pod.metadata.name, namespace=pod.metadata.namespace), delete_options=V1DeleteOptions(propagation_policy=PROPAGATION_POLICY))) </DeepExtract> logger.info(f'{pod.metadata.name} ({pod.metadata.namespace}) was {action_name} on {node_name}') except ApiException as e: logger.warning(f"{pod.metadata.name} ({pod.metadata.namespace}) couldn't be {action_name} on {node_name}:{e.status}-{e.reason}") if e.status != NOT_FOUND_STATUS: all_done = False return all_done
def _evict_or_delete_pods(self, node_name: str, pods: List[KubernetesPod], disable_eviction: bool) -> bool: all_done = True action_name = 'deleted' if disable_eviction else 'evicted' logger.info(f'{len(pods)} pods being {action_name} on {node_name}') for pod in pods: try: if disable_eviction: self._core_api.delete_namespaced_pod(name=pod.metadata.name, namespace=pod.metadata.namespace, propagation_policy=PROPAGATION_POLICY) else: self._core_api.create_namespaced_pod_eviction(name=pod.metadata.name, namespace=pod.metadata.namespace, body=V1beta1Eviction(metadata=V1ObjectMeta(name=pod.metadata.name, namespace=pod.metadata.namespace), delete_options=V1DeleteOptions(propagation_policy=PROPAGATION_POLICY))) logger.info(f'{pod.metadata.name} ({pod.metadata.namespace}) was {action_name} on {node_name}') except ApiException as e: logger.warning(f"{pod.metadata.name} ({pod.metadata.namespace}) couldn't be {action_name} on {node_name}:{e.status}-{e.reason}") if e.status != NOT_FOUND_STATUS: all_done = False return all_done
clusterman
positive
def extract_parameters(full_definitions, overwritten=None, incoming_path=''): container = {} if overwritten is None: overwritten = {} for parameter in full_definitions: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (incoming_path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': <DeepExtract> container = {} if overwritten is None: overwritten = {} for parameter in parameter['children']: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': container[name] = extract_parameters(parameter['children'], overwritten, path) if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} container[name][value_name] = extract_parameters(parameter['children'][value]['children'], overwritten, path) if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] container[name] = container </DeepExtract> if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} <DeepExtract> container = {} if overwritten is None: overwritten = {} for parameter in parameter['children'][value]['children']: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': container[name] = extract_parameters(parameter['children'], overwritten, path) if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} container[name][value_name] = extract_parameters(parameter['children'][value]['children'], overwritten, path) if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] container[name][value_name] = container </DeepExtract> if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] return container
def extract_parameters(full_definitions, overwritten=None, incoming_path=''): container = {} if overwritten is None: overwritten = {} for parameter in full_definitions: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (incoming_path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': container = {} if overwritten is None: overwritten = {} for parameter in parameter['children']: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': container[name] = extract_parameters(parameter['children'], overwritten, path) if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} container[name][value_name] = extract_parameters(parameter['children'][value]['children'], overwritten, path) if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] container[name] = container if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} container = {} if overwritten is None: overwritten = {} for parameter in parameter['children'][value]['children']: param_type = parameter['type'] param_subtype = parameter['subtype'] if 'subtype' in parameter else None if 'defaultValue' in parameter: defaultValue = parameter['defaultValue'] elif 'default' in parameter: defaultValue = parameter['default'] else: defaultValue = None value = defaultValue if 'value' in parameter: value = parameter['value'] name = parameter['name'] path = (path + '.' + name).strip('.') if path in overwritten: value = overwritten[path] if param_type == 'string': container[name] = str(value) if param_type == 'number': if param_subtype == 'int': container[name] = int(value) else: container[name] = float(value) if param_type == 'boolean': container[name] = bool(value) if param_type == 'array': container[name] = value if param_type == 'group': container[name] = extract_parameters(parameter['children'], overwritten, path) if param_type == 'choice_group': if '' == value: continue if isinstance(value, six.string_types) and value not in parameter['children']: found_names = [] for (idx, child) in enumerate(parameter['children']): found_names.append(child['name']) if value == child['name']: value = idx break if isinstance(value, six.string_types): names = ', '.join(found_names) raise Exception(str(value) + ' is not available in ' + path + '. ' + names) value_name = parameter['children'][value]['name'] container[name] = {'$value': value_name} container[name][value_name] = extract_parameters(parameter['children'][value]['children'], overwritten, path) if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] container[name][value_name] = container if param_type == 'choice_string' or param_type == 'choice_number': if path in overwritten: for (idx, child) in enumerate(parameter['children']): if overwritten[path] == child['value']: value = idx break if value is None or (value < 0 or value >= len(parameter['children'])): value = 0 container[name] = parameter['children'][value]['value'] return container
aetros-cli
positive
def _cache_add(values: Union[Sequence[float], np.ndarray], logposterior: LogPosterior, weight: float=1): """ Adds the given point to the cache. Dumps and resets the cache if full. """ if self._cache_last == self.cache_size - 1: <DeepExtract> if self._cache_last == -1: return self._enlarge(self._cache_last + 1) self._data.iloc[len(self._data) - (self._cache_last + 1):len(self._data)] = self._cache[:self._cache_last + 1] self._cache_reset() </DeepExtract> <DeepExtract> self._cache[self._cache_last + 1, self._icol[OutPar.weight]] = weight if weight is not None else 1 self._cache[self._cache_last + 1, self._icol[OutPar.minuslogpost]] = -apply_temperature(logposterior.logpost, self.temperature) for (name, value) in zip(self.sampled_params, values): self._cache[self._cache_last + 1, self._icol[name]] = value if logposterior.logpriors is not None: for (name, value) in zip(self.minuslogprior_names, logposterior.logpriors): self._cache[self._cache_last + 1, self._icol[name]] = -value self._cache[self._cache_last + 1, self._icol[OutPar.minuslogprior]] = -logposterior.logprior if logposterior.loglikes is not None: for (name, value) in zip(self.chi2_names, logposterior.loglikes): self._cache[self._cache_last + 1, self._icol[name]] = -2 * value self._cache[self._cache_last + 1, self._icol[OutPar.chi2]] = -2 * logposterior.loglike if len(logposterior.derived): for (name, value) in zip(self.derived_params, logposterior.derived): self._cache[self._cache_last + 1, self._icol[name]] = value </DeepExtract> self._cache_last += 1
def _cache_add(values: Union[Sequence[float], np.ndarray], logposterior: LogPosterior, weight: float=1): """ Adds the given point to the cache. Dumps and resets the cache if full. """ if self._cache_last == self.cache_size - 1: if self._cache_last == -1: return self._enlarge(self._cache_last + 1) self._data.iloc[len(self._data) - (self._cache_last + 1):len(self._data)] = self._cache[:self._cache_last + 1] self._cache_reset() self._cache[self._cache_last + 1, self._icol[OutPar.weight]] = weight if weight is not None else 1 self._cache[self._cache_last + 1, self._icol[OutPar.minuslogpost]] = -apply_temperature(logposterior.logpost, self.temperature) for (name, value) in zip(self.sampled_params, values): self._cache[self._cache_last + 1, self._icol[name]] = value if logposterior.logpriors is not None: for (name, value) in zip(self.minuslogprior_names, logposterior.logpriors): self._cache[self._cache_last + 1, self._icol[name]] = -value self._cache[self._cache_last + 1, self._icol[OutPar.minuslogprior]] = -logposterior.logprior if logposterior.loglikes is not None: for (name, value) in zip(self.chi2_names, logposterior.loglikes): self._cache[self._cache_last + 1, self._icol[name]] = -2 * value self._cache[self._cache_last + 1, self._icol[OutPar.chi2]] = -2 * logposterior.loglike if len(logposterior.derived): for (name, value) in zip(self.derived_params, logposterior.derived): self._cache[self._cache_last + 1, self._icol[name]] = value self._cache_last += 1
cobaya
positive
def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='leakyrelu', mode='CNA'): super(ResidualDenseBlock_5C, self).__init__() <DeepExtract> assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv1 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc) if norm_type else None self.conv1 = sequential(n, a, p, c) </DeepExtract> <DeepExtract> assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv2 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + gc) if norm_type else None self.conv2 = sequential(n, a, p, c) </DeepExtract> <DeepExtract> assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 2 * gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv3 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + 2 * gc) if norm_type else None self.conv3 = sequential(n, a, p, c) </DeepExtract> <DeepExtract> assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 3 * gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv4 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + 3 * gc) if norm_type else None self.conv4 = sequential(n, a, p, c) </DeepExtract> if mode == 'CNA': last_act = None else: last_act = act_type <DeepExtract> assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(3, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 4 * gc, nc, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(last_act) if last_act else None if 'CNA' in mode: n = norm(norm_type, nc) if norm_type else None self.conv5 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and last_act is not None: a = act(last_act, inplace=False) n = norm(norm_type, nc + 4 * gc) if norm_type else None self.conv5 = sequential(n, a, p, c) </DeepExtract>
def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='leakyrelu', mode='CNA'): super(ResidualDenseBlock_5C, self).__init__() assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv1 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc) if norm_type else None self.conv1 = sequential(n, a, p, c) assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv2 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + gc) if norm_type else None self.conv2 = sequential(n, a, p, c) assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 2 * gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv3 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + 2 * gc) if norm_type else None self.conv3 = sequential(n, a, p, c) assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(kernel_size, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 3 * gc, gc, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(act_type) if act_type else None if 'CNA' in mode: n = norm(norm_type, gc) if norm_type else None self.conv4 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and act_type is not None: a = act(act_type, inplace=False) n = norm(norm_type, nc + 3 * gc) if norm_type else None self.conv4 = sequential(n, a, p, c) if mode == 'CNA': last_act = None else: last_act = act_type assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode) padding = get_valid_padding(3, dilation) p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None padding = padding if pad_type == 'zero' else 0 c = nn.Conv2d(nc + 4 * gc, nc, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) a = act(last_act) if last_act else None if 'CNA' in mode: n = norm(norm_type, nc) if norm_type else None self.conv5 = sequential(p, c, n, a) elif mode == 'NAC': if norm_type is None and last_act is not None: a = act(last_act, inplace=False) n = norm(norm_type, nc + 4 * gc) if norm_type else None self.conv5 = sequential(n, a, p, c) </DeepExtract>
DASR
positive
def get_episodes(html, url): if re.match('https://www\\.instagram\\.com/graphql/query/', url): body = json.loads(html) <DeepExtract> user = body['data']['user'] timeline = user['edge_owner_to_timeline_media'] eps = [] for item in timeline['edges']: eps.append(Episode(str(item['node']['shortcode']), 'https://www.instagram.com/p/{}/'.format(item['node']['shortcode']))) end_cursor = None if timeline['page_info']['has_next_page']: end_cursor = timeline['page_info']['end_cursor'] (eps, cursor) = (reversed(eps), end_cursor) </DeepExtract> if cursor: variables = parse_qs(urlparse(url).query)['variables'][0] variables = json.loads(variables) <DeepExtract> cache_next_page[url] = 'https://www.instagram.com/graphql/query/?{}'.format(urlencode({'query_hash': '2c5d4d8b70cad329c4a6ebe3abb6eedd', 'variables': json.dumps({'id': variables['id'], 'first': 12, 'after': cursor})})) </DeepExtract> return eps if re.match('https://www\\.instagram\\.com/[^/]+/', url): <DeepExtract> shared_data = re.search('window\\._sharedData = ([\\s\\S]+?);</script', html).group(1) shared_data = json.loads(shared_data) data = shared_data['entry_data']['ProfilePage'][0]['graphql'] </DeepExtract> <DeepExtract> user = data['user'] timeline = user['edge_owner_to_timeline_media'] eps = [] for item in timeline['edges']: eps.append(Episode(str(item['node']['shortcode']), 'https://www.instagram.com/p/{}/'.format(item['node']['shortcode']))) end_cursor = None if timeline['page_info']['has_next_page']: end_cursor = timeline['page_info']['end_cursor'] (eps, cursor) = (reversed(eps), end_cursor) </DeepExtract> if cursor: <DeepExtract> cache_next_page[url] = 'https://www.instagram.com/graphql/query/?{}'.format(urlencode({'query_hash': '2c5d4d8b70cad329c4a6ebe3abb6eedd', 'variables': json.dumps({'id': data['user']['id'], 'first': 12, 'after': cursor})})) </DeepExtract> return eps raise Exception('unknown URL: {}'.format(url))
def get_episodes(html, url): if re.match('https://www\\.instagram\\.com/graphql/query/', url): body = json.loads(html) user = body['data']['user'] timeline = user['edge_owner_to_timeline_media'] eps = [] for item in timeline['edges']: eps.append(Episode(str(item['node']['shortcode']), 'https://www.instagram.com/p/{}/'.format(item['node']['shortcode']))) end_cursor = None if timeline['page_info']['has_next_page']: end_cursor = timeline['page_info']['end_cursor'] (eps, cursor) = (reversed(eps), end_cursor) if cursor: variables = parse_qs(urlparse(url).query)['variables'][0] variables = json.loads(variables) cache_next_page[url] = 'https://www.instagram.com/graphql/query/?{}'.format(urlencode({'query_hash': '2c5d4d8b70cad329c4a6ebe3abb6eedd', 'variables': json.dumps({'id': variables['id'], 'first': 12, 'after': cursor})})) return eps if re.match('https://www\\.instagram\\.com/[^/]+/', url): shared_data = re.search('window\\._sharedData = ([\\s\\S]+?);</script', html).group(1) shared_data = json.loads(shared_data) data = shared_data['entry_data']['ProfilePage'][0]['graphql'] user = data['user'] timeline = user['edge_owner_to_timeline_media'] eps = [] for item in timeline['edges']: eps.append(Episode(str(item['node']['shortcode']), 'https://www.instagram.com/p/{}/'.format(item['node']['shortcode']))) end_cursor = None if timeline['page_info']['has_next_page']: end_cursor = timeline['page_info']['end_cursor'] (eps, cursor) = (reversed(eps), end_cursor) if cursor: cache_next_page[url] = 'https://www.instagram.com/graphql/query/?{}'.format(urlencode({'query_hash': '2c5d4d8b70cad329c4a6ebe3abb6eedd', 'variables': json.dumps({'id': data['user']['id'], 'first': 12, 'after': cursor})})) return eps raise Exception('unknown URL: {}'.format(url))
ComicCrawler
positive
def __unpack_object(self, object): assert isinstance(object, dict), 'Object values must be a dict! Passed %s (%s)' % (type(object), object) ret = {} for (key, value) in object.items(): assert isinstance(key, str) if isinstance(value, str): ret[key] = value elif isinstance(value, int): ret[key] = value elif isinstance(value, float): ret[key] = value elif value is None: ret[key] = value elif value in (True, False): ret[key] = value elif isinstance(value, dict): <DeepExtract> assert isinstance(value, dict), 'Object values must be a dict! Passed %s (%s)' % (type(value), value) ret = {} for (key, value) in value.items(): assert isinstance(key, str) if isinstance(value, str): ret[key] = value elif isinstance(value, int): ret[key] = value elif isinstance(value, float): ret[key] = value elif value is None: ret[key] = value elif value in (True, False): ret[key] = value elif isinstance(value, dict): ret[key] = self.__unpack_object(value) else: raise ValueError('Unknown type in object: %s (%s)' % (type(value), value)) ret[key] = ret </DeepExtract> else: raise ValueError('Unknown type in object: %s (%s)' % (type(value), value)) return ret
def __unpack_object(self, object): assert isinstance(object, dict), 'Object values must be a dict! Passed %s (%s)' % (type(object), object) ret = {} for (key, value) in object.items(): assert isinstance(key, str) if isinstance(value, str): ret[key] = value elif isinstance(value, int): ret[key] = value elif isinstance(value, float): ret[key] = value elif value is None: ret[key] = value elif value in (True, False): ret[key] = value elif isinstance(value, dict): assert isinstance(value, dict), 'Object values must be a dict! Passed %s (%s)' % (type(value), value) ret = {} for (key, value) in value.items(): assert isinstance(key, str) if isinstance(value, str): ret[key] = value elif isinstance(value, int): ret[key] = value elif isinstance(value, float): ret[key] = value elif value is None: ret[key] = value elif value in (True, False): ret[key] = value elif isinstance(value, dict): ret[key] = self.__unpack_object(value) else: raise ValueError('Unknown type in object: %s (%s)' % (type(value), value)) ret[key] = ret else: raise ValueError('Unknown type in object: %s (%s)' % (type(value), value)) return ret
ChromeController
positive
@apply_defaults def __init__(self, task_id, owner=configuration.conf.get('operators', 'DEFAULT_OWNER'), email=None, email_on_retry=True, email_on_failure=True, retries=0, retry_delay=timedelta(seconds=300), retry_exponential_backoff=False, max_retry_delay=None, start_date=None, end_date=None, schedule_interval=None, depends_on_past=False, wait_for_downstream=False, dag=None, params=None, default_args=None, adhoc=False, priority_weight=1, weight_rule=WeightRule.DOWNSTREAM, queue=configuration.conf.get('celery', 'default_queue'), pool=None, sla=None, execution_timeout=None, on_failure_callback=None, on_success_callback=None, on_retry_callback=None, trigger_rule=TriggerRule.ALL_SUCCESS, resources=None, run_as_user=None, task_concurrency=None, executor_config=None, inlets=None, outlets=None, *args, **kwargs): if args or kwargs: warnings.warn('Invalid arguments were passed to {c}. Support for passing such arguments will be dropped in Airflow 2.0. Invalid arguments were:\n*args: {a}\n**kwargs: {k}'.format(c=self.__class__.__name__, a=args, k=kwargs), category=PendingDeprecationWarning) validate_key(task_id) self.task_id = task_id self.owner = owner self.email = email self.email_on_retry = email_on_retry self.email_on_failure = email_on_failure self.start_date = start_date if start_date and (not isinstance(start_date, datetime)): self.log.warning("start_date for %s isn't datetime.datetime", self) self.end_date = end_date if not TriggerRule.is_valid(trigger_rule): raise AirflowException("The trigger_rule must be one of {all_triggers},'{d}.{t}'; received '{tr}'.".format(all_triggers=TriggerRule.all_triggers, d=dag.dag_id if dag else '', t=task_id, tr=trigger_rule)) self.trigger_rule = trigger_rule self.depends_on_past = depends_on_past self.wait_for_downstream = wait_for_downstream if wait_for_downstream: self.depends_on_past = True if schedule_interval: self.log.warning('schedule_interval is used for %s, though it has been deprecated as a task parameter, you need to specify it as a DAG parameter instead', self) self._schedule_interval = schedule_interval self.retries = retries self.queue = queue self.pool = pool self.sla = sla self.execution_timeout = execution_timeout self.on_failure_callback = on_failure_callback self.on_success_callback = on_success_callback self.on_retry_callback = on_retry_callback if isinstance(retry_delay, timedelta): self.retry_delay = retry_delay else: self.log.debug("Retry_delay isn't timedelta object, assuming secs") self.retry_delay = timedelta(seconds=retry_delay) self.retry_exponential_backoff = retry_exponential_backoff self.max_retry_delay = max_retry_delay self.params = params or {} self.adhoc = adhoc self.priority_weight = priority_weight if not WeightRule.is_valid(weight_rule): raise AirflowException("The weight_rule must be one of {all_weight_rules},'{d}.{t}'; received '{tr}'.".format(all_weight_rules=WeightRule.all_weight_rules, d=dag.dag_id if dag else '', t=task_id, tr=weight_rule)) self.weight_rule = weight_rule self.resources = Resources(**resources or {}) self.run_as_user = run_as_user self.task_concurrency = task_concurrency self.executor_config = executor_config or {} <DeepExtract> if serialize_json: stored_value = json.dumps(value) else: stored_value = str(value) session.query(cls).filter(cls.key == key).delete() session.add(Variable(key=key, val=stored_value)) session.flush() </DeepExtract> <DeepExtract> if serialize_json: stored_value = json.dumps(value) else: stored_value = str(value) session.query(cls).filter(cls.key == key).delete() session.add(Variable(key=key, val=stored_value)) session.flush() </DeepExtract> if not dag and _CONTEXT_MANAGER_DAG: dag = _CONTEXT_MANAGER_DAG if dag: self.dag = dag self._log = logging.getLogger('airflow.task.operators') self.inlets = [] self.outlets = [] self.lineage_data = None self._inlets = {'auto': False, 'task_ids': [], 'datasets': []} self._outlets = {'datasets': []} if inlets: self._inlets.update(inlets) if outlets: self._outlets.update(outlets) self._comps = {'task_id', 'dag_id', 'owner', 'email', 'email_on_retry', 'retry_delay', 'retry_exponential_backoff', 'max_retry_delay', 'start_date', 'schedule_interval', 'depends_on_past', 'wait_for_downstream', 'adhoc', 'priority_weight', 'sla', 'execution_timeout', 'on_failure_callback', 'on_success_callback', 'on_retry_callback'}
@apply_defaults def __init__(self, task_id, owner=configuration.conf.get('operators', 'DEFAULT_OWNER'), email=None, email_on_retry=True, email_on_failure=True, retries=0, retry_delay=timedelta(seconds=300), retry_exponential_backoff=False, max_retry_delay=None, start_date=None, end_date=None, schedule_interval=None, depends_on_past=False, wait_for_downstream=False, dag=None, params=None, default_args=None, adhoc=False, priority_weight=1, weight_rule=WeightRule.DOWNSTREAM, queue=configuration.conf.get('celery', 'default_queue'), pool=None, sla=None, execution_timeout=None, on_failure_callback=None, on_success_callback=None, on_retry_callback=None, trigger_rule=TriggerRule.ALL_SUCCESS, resources=None, run_as_user=None, task_concurrency=None, executor_config=None, inlets=None, outlets=None, *args, **kwargs): if args or kwargs: warnings.warn('Invalid arguments were passed to {c}. Support for passing such arguments will be dropped in Airflow 2.0. Invalid arguments were:\n*args: {a}\n**kwargs: {k}'.format(c=self.__class__.__name__, a=args, k=kwargs), category=PendingDeprecationWarning) validate_key(task_id) self.task_id = task_id self.owner = owner self.email = email self.email_on_retry = email_on_retry self.email_on_failure = email_on_failure self.start_date = start_date if start_date and (not isinstance(start_date, datetime)): self.log.warning("start_date for %s isn't datetime.datetime", self) self.end_date = end_date if not TriggerRule.is_valid(trigger_rule): raise AirflowException("The trigger_rule must be one of {all_triggers},'{d}.{t}'; received '{tr}'.".format(all_triggers=TriggerRule.all_triggers, d=dag.dag_id if dag else '', t=task_id, tr=trigger_rule)) self.trigger_rule = trigger_rule self.depends_on_past = depends_on_past self.wait_for_downstream = wait_for_downstream if wait_for_downstream: self.depends_on_past = True if schedule_interval: self.log.warning('schedule_interval is used for %s, though it has been deprecated as a task parameter, you need to specify it as a DAG parameter instead', self) self._schedule_interval = schedule_interval self.retries = retries self.queue = queue self.pool = pool self.sla = sla self.execution_timeout = execution_timeout self.on_failure_callback = on_failure_callback self.on_success_callback = on_success_callback self.on_retry_callback = on_retry_callback if isinstance(retry_delay, timedelta): self.retry_delay = retry_delay else: self.log.debug("Retry_delay isn't timedelta object, assuming secs") self.retry_delay = timedelta(seconds=retry_delay) self.retry_exponential_backoff = retry_exponential_backoff self.max_retry_delay = max_retry_delay self.params = params or {} self.adhoc = adhoc self.priority_weight = priority_weight if not WeightRule.is_valid(weight_rule): raise AirflowException("The weight_rule must be one of {all_weight_rules},'{d}.{t}'; received '{tr}'.".format(all_weight_rules=WeightRule.all_weight_rules, d=dag.dag_id if dag else '', t=task_id, tr=weight_rule)) self.weight_rule = weight_rule self.resources = Resources(**resources or {}) self.run_as_user = run_as_user self.task_concurrency = task_concurrency self.executor_config = executor_config or {} if serialize_json: stored_value = json.dumps(value) else: stored_value = str(value) session.query(cls).filter(cls.key == key).delete() session.add(Variable(key=key, val=stored_value)) session.flush() if serialize_json: stored_value = json.dumps(value) else: stored_value = str(value) session.query(cls).filter(cls.key == key).delete() session.add(Variable(key=key, val=stored_value)) session.flush() if not dag and _CONTEXT_MANAGER_DAG: dag = _CONTEXT_MANAGER_DAG if dag: self.dag = dag self._log = logging.getLogger('airflow.task.operators') self.inlets = [] self.outlets = [] self.lineage_data = None self._inlets = {'auto': False, 'task_ids': [], 'datasets': []} self._outlets = {'datasets': []} if inlets: self._inlets.update(inlets) if outlets: self._outlets.update(outlets) self._comps = {'task_id', 'dag_id', 'owner', 'email', 'email_on_retry', 'retry_delay', 'retry_exponential_backoff', 'max_retry_delay', 'start_date', 'schedule_interval', 'depends_on_past', 'wait_for_downstream', 'adhoc', 'priority_weight', 'sla', 'execution_timeout', 'on_failure_callback', 'on_success_callback', 'on_retry_callback'}
docker-airflow
positive
def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 <DeepExtract> if None in [2, 321, 321, 3]: inputs = tf.placeholder(tf.float32, (2, 321, 321, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(321), [321, 1]) + np.reshape(np.arange(321), [1, 321]), [1, 321, 321, 1]), [2, 1, 1, 3])) </DeepExtract> with slim.arg_scope(resnet_utils.resnet_arg_scope()): <DeepExtract> block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (_, end_points) = resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet') </DeepExtract> endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self): global_pool = False num_classes = 10 if None in [2, 321, 321, 3]: inputs = tf.placeholder(tf.float32, (2, 321, 321, 3)) else: inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(321), [321, 1]) + np.reshape(np.arange(321), [1, 321]), [1, 321, 321, 1]), [2, 1, 1, 3])) with slim.arg_scope(resnet_utils.resnet_arg_scope()): block = resnet_v2.resnet_v2_block blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)] (_, end_points) = resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=False, reuse=reuse, scope='resnet') endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]} for endpoint in endpoint_to_shape: shape = endpoint_to_shape[endpoint] self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
caad_18
positive
def get_kitti_image_info(path, training=True, label_info=True, velodyne=False, calib=False, image_ids=7481, extend_matrix=True, num_worker=8, relative_path=True, with_imageshape=True): root_path = pathlib.Path(path) if not isinstance(image_ids, list): image_ids = list(range(image_ids)) def map_func(idx): image_info = {'image_idx': idx, 'pointcloud_num_features': 4} annotations = None if velodyne: <DeepExtract> image_info['velodyne_path'] = get_kitti_info_path(idx, path, 'velodyne', '.bin', training, relative_path, exist_check) </DeepExtract> <DeepExtract> image_info['img_path'] = get_kitti_info_path(idx, path, 'image_2', '.png', training, relative_path, exist_check) </DeepExtract> if with_imageshape: img_path = image_info['img_path'] if relative_path: img_path = str(root_path / img_path) image_info['img_shape'] = np.array(io.imread(img_path).shape[:2], dtype=np.int32) if label_info: <DeepExtract> label_path = get_kitti_info_path(idx, path, 'label_2', '.txt', training, relative_path, exist_check) </DeepExtract> if relative_path: label_path = str(root_path / label_path) <DeepExtract> annotations = {} annotations.update({'name': [], 'truncated': [], 'occluded': [], 'alpha': [], 'bbox': [], 'dimensions': [], 'location': [], 'rotation_y': []}) with open(label_path, 'r') as f: lines = f.readlines() content = [line.strip().split(' ') for line in lines] num_objects = len([x[0] for x in content if x[0] != 'DontCare']) annotations['name'] = np.array([x[0] for x in content]) num_gt = len(annotations['name']) annotations['truncated'] = np.array([float(x[1]) for x in content]) annotations['occluded'] = np.array([int(float(x[2])) for x in content]) annotations['alpha'] = np.array([float(x[3]) for x in content]) annotations['bbox'] = np.array([[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4) annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] for x in content]).reshape(-1, 3)[:, [2, 0, 1]] annotations['location'] = np.array([[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3) annotations['rotation_y'] = np.array([float(x[14]) for x in content]).reshape(-1) if len(content) != 0 and len(content[0]) == 16: annotations['score'] = np.array([float(x[15]) for x in content]) else: annotations['score'] = np.zeros((annotations['bbox'].shape[0],)) index = list(range(num_objects)) + [-1] * (num_gt - num_objects) annotations['index'] = np.array(index, dtype=np.int32) annotations['group_ids'] = np.arange(num_gt, dtype=np.int32) annotations = annotations </DeepExtract> if calib: <DeepExtract> calib_path = get_kitti_info_path(idx, path, 'calib', '.txt', training, False, exist_check) </DeepExtract> with open(calib_path, 'r') as f: lines = f.readlines() P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]]).reshape([3, 4]) P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]]).reshape([3, 4]) P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]]).reshape([3, 4]) P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]]).reshape([3, 4]) if extend_matrix: <DeepExtract> P0 = np.concatenate([P0, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P0 = P0 </DeepExtract> <DeepExtract> P1 = np.concatenate([P1, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P1 = P1 </DeepExtract> <DeepExtract> P2 = np.concatenate([P2, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P2 = P2 </DeepExtract> <DeepExtract> P3 = np.concatenate([P3, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P3 = P3 </DeepExtract> image_info['calib/P0'] = P0 image_info['calib/P1'] = P1 image_info['calib/P2'] = P2 image_info['calib/P3'] = P3 R0_rect = np.array([float(info) for info in lines[4].split(' ')[1:10]]).reshape([3, 3]) if extend_matrix: rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) rect_4x4[3, 3] = 1.0 rect_4x4[:3, :3] = R0_rect else: rect_4x4 = R0_rect image_info['calib/R0_rect'] = rect_4x4 Tr_velo_to_cam = np.array([float(info) for info in lines[5].split(' ')[1:13]]).reshape([3, 4]) Tr_imu_to_velo = np.array([float(info) for info in lines[6].split(' ')[1:13]]).reshape([3, 4]) if extend_matrix: <DeepExtract> Tr_velo_to_cam = np.concatenate([Tr_velo_to_cam, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) Tr_velo_to_cam = Tr_velo_to_cam </DeepExtract> <DeepExtract> Tr_imu_to_velo = np.concatenate([Tr_imu_to_velo, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) Tr_imu_to_velo = Tr_imu_to_velo </DeepExtract> image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo if annotations is not None: image_info['annos'] = annotations <DeepExtract> min_height = [40, 25, 25] max_occlusion = [0, 1, 2] max_trunc = [0.15, 0.3, 0.5] annos = image_info['annos'] dims = annos['dimensions'] bbox = annos['bbox'] height = bbox[:, 3] - bbox[:, 1] occlusion = annos['occluded'] truncation = annos['truncated'] diff = [] easy_mask = np.ones((len(dims),), dtype=np.bool) moderate_mask = np.ones((len(dims),), dtype=np.bool) hard_mask = np.ones((len(dims),), dtype=np.bool) i = 0 for (h, o, t) in zip(height, occlusion, truncation): if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: easy_mask[i] = False if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: moderate_mask[i] = False if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: hard_mask[i] = False i += 1 is_easy = easy_mask is_moderate = np.logical_xor(easy_mask, moderate_mask) is_hard = np.logical_xor(hard_mask, moderate_mask) for i in range(len(dims)): if is_easy[i]: diff.append(0) elif is_moderate[i]: diff.append(1) elif is_hard[i]: diff.append(2) else: diff.append(-1) annos['difficulty'] = np.array(diff, np.int32) return diff </DeepExtract> return image_info with futures.ThreadPoolExecutor(num_worker) as executor: image_infos = executor.map(map_func, image_ids) return list(image_infos)
def get_kitti_image_info(path, training=True, label_info=True, velodyne=False, calib=False, image_ids=7481, extend_matrix=True, num_worker=8, relative_path=True, with_imageshape=True): root_path = pathlib.Path(path) if not isinstance(image_ids, list): image_ids = list(range(image_ids)) def map_func(idx): image_info = {'image_idx': idx, 'pointcloud_num_features': 4} annotations = None if velodyne: image_info['velodyne_path'] = get_kitti_info_path(idx, path, 'velodyne', '.bin', training, relative_path, exist_check) image_info['img_path'] = get_kitti_info_path(idx, path, 'image_2', '.png', training, relative_path, exist_check) if with_imageshape: img_path = image_info['img_path'] if relative_path: img_path = str(root_path / img_path) image_info['img_shape'] = np.array(io.imread(img_path).shape[:2], dtype=np.int32) if label_info: label_path = get_kitti_info_path(idx, path, 'label_2', '.txt', training, relative_path, exist_check) if relative_path: label_path = str(root_path / label_path) annotations = {} annotations.update({'name': [], 'truncated': [], 'occluded': [], 'alpha': [], 'bbox': [], 'dimensions': [], 'location': [], 'rotation_y': []}) with open(label_path, 'r') as f: lines = f.readlines() content = [line.strip().split(' ') for line in lines] num_objects = len([x[0] for x in content if x[0] != 'DontCare']) annotations['name'] = np.array([x[0] for x in content]) num_gt = len(annotations['name']) annotations['truncated'] = np.array([float(x[1]) for x in content]) annotations['occluded'] = np.array([int(float(x[2])) for x in content]) annotations['alpha'] = np.array([float(x[3]) for x in content]) annotations['bbox'] = np.array([[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4) annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] for x in content]).reshape(-1, 3)[:, [2, 0, 1]] annotations['location'] = np.array([[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3) annotations['rotation_y'] = np.array([float(x[14]) for x in content]).reshape(-1) if len(content) != 0 and len(content[0]) == 16: annotations['score'] = np.array([float(x[15]) for x in content]) else: annotations['score'] = np.zeros((annotations['bbox'].shape[0],)) index = list(range(num_objects)) + [-1] * (num_gt - num_objects) annotations['index'] = np.array(index, dtype=np.int32) annotations['group_ids'] = np.arange(num_gt, dtype=np.int32) annotations = annotations if calib: calib_path = get_kitti_info_path(idx, path, 'calib', '.txt', training, False, exist_check) with open(calib_path, 'r') as f: lines = f.readlines() P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]]).reshape([3, 4]) P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]]).reshape([3, 4]) P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]]).reshape([3, 4]) P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]]).reshape([3, 4]) if extend_matrix: P0 = np.concatenate([P0, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P0 = P0 P1 = np.concatenate([P1, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P1 = P1 P2 = np.concatenate([P2, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P2 = P2 P3 = np.concatenate([P3, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) P3 = P3 image_info['calib/P0'] = P0 image_info['calib/P1'] = P1 image_info['calib/P2'] = P2 image_info['calib/P3'] = P3 R0_rect = np.array([float(info) for info in lines[4].split(' ')[1:10]]).reshape([3, 3]) if extend_matrix: rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) rect_4x4[3, 3] = 1.0 rect_4x4[:3, :3] = R0_rect else: rect_4x4 = R0_rect image_info['calib/R0_rect'] = rect_4x4 Tr_velo_to_cam = np.array([float(info) for info in lines[5].split(' ')[1:13]]).reshape([3, 4]) Tr_imu_to_velo = np.array([float(info) for info in lines[6].split(' ')[1:13]]).reshape([3, 4]) if extend_matrix: Tr_velo_to_cam = np.concatenate([Tr_velo_to_cam, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) Tr_velo_to_cam = Tr_velo_to_cam Tr_imu_to_velo = np.concatenate([Tr_imu_to_velo, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) Tr_imu_to_velo = Tr_imu_to_velo image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo if annotations is not None: image_info['annos'] = annotations min_height = [40, 25, 25] max_occlusion = [0, 1, 2] max_trunc = [0.15, 0.3, 0.5] annos = image_info['annos'] dims = annos['dimensions'] bbox = annos['bbox'] height = bbox[:, 3] - bbox[:, 1] occlusion = annos['occluded'] truncation = annos['truncated'] diff = [] easy_mask = np.ones((len(dims),), dtype=np.bool) moderate_mask = np.ones((len(dims),), dtype=np.bool) hard_mask = np.ones((len(dims),), dtype=np.bool) i = 0 for (h, o, t) in zip(height, occlusion, truncation): if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: easy_mask[i] = False if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: moderate_mask[i] = False if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: hard_mask[i] = False i += 1 is_easy = easy_mask is_moderate = np.logical_xor(easy_mask, moderate_mask) is_hard = np.logical_xor(hard_mask, moderate_mask) for i in range(len(dims)): if is_easy[i]: diff.append(0) elif is_moderate[i]: diff.append(1) elif is_hard[i]: diff.append(2) else: diff.append(-1) annos['difficulty'] = np.array(diff, np.int32) return diff return image_info with futures.ThreadPoolExecutor(num_worker) as executor: image_infos = executor.map(map_func, image_ids) return list(image_infos)
ebms_3dod
positive
def is_ready(self) -> bool: <DeepExtract> self.obj = self.api_client.get(name=self.name, namespace=self.namespace).to_dict() </DeepExtract> log.info(self.obj.get('status', {}).get('orbitJobOperator', {})) return_status = False if self.obj.get('status', {}).get('orbitJobOperator', {}).get('jobStatus'): return_status = True return return_status
def is_ready(self) -> bool: self.obj = self.api_client.get(name=self.name, namespace=self.namespace).to_dict() log.info(self.obj.get('status', {}).get('orbitJobOperator', {})) return_status = False if self.obj.get('status', {}).get('orbitJobOperator', {}).get('jobStatus'): return_status = True return return_status
aws-orbit-workbench
positive
def export_one_task(taskname, models, output_folder, nnunet_trainer=default_trainer, nnunet_trainer_cascade=default_cascade_trainer, plans_identifier=default_plans_identifier): <DeepExtract> trainer_output_dir = nnunet_trainer + '__' + plans_identifier trainer_output_dir_cascade = nnunet_trainer_cascade + '__' + plans_identifier for m in models: to = trainer_output_dir_cascade if m == '3d_cascade_fullres' else trainer_output_dir expected_output_folder = join(network_training_output_dir, m, taskname, to) if not isdir(expected_output_folder): if m == '3d_lowres' or m == '3d_cascade_fullres': print('Task', taskname, 'does not seem to have the cascade') continue else: raise RuntimeError('missing folder! %s' % expected_output_folder) output_here = join(output_folder, m, taskname, to) maybe_mkdir_p(output_here) copy_model(expected_output_folder, output_here) </DeepExtract> <DeepExtract> ensemble_dir = join(network_training_output_dir, 'ensembles', taskname) if not isdir(ensemble_dir): print('No ensemble directory found for task', taskname) return subd = subdirs(ensemble_dir, join=False) valid = [] for s in subd: v = check_if_valid(s, models, (nnunet_trainer, nnunet_trainer_cascade), (plans_identifier,)) if v: valid.append(s) output_ensemble = join(output_folder, 'ensembles', taskname) maybe_mkdir_p(output_ensemble) for v in valid: this_output = join(output_ensemble, v) maybe_mkdir_p(this_output) shutil.copy(join(ensemble_dir, v, 'postprocessing.json'), this_output) </DeepExtract> <DeepExtract> zipf = zipfile.ZipFile(join(output_folder, taskname + '.zip'), 'w', zipfile.ZIP_DEFLATED) for (root, dirs, files) in os.walk(join(output_folder, taskname)): for file in files: zipf.write(join(root, file), os.path.relpath(join(root, file), join(output_folder, taskname))) </DeepExtract>
def export_one_task(taskname, models, output_folder, nnunet_trainer=default_trainer, nnunet_trainer_cascade=default_cascade_trainer, plans_identifier=default_plans_identifier): trainer_output_dir = nnunet_trainer + '__' + plans_identifier trainer_output_dir_cascade = nnunet_trainer_cascade + '__' + plans_identifier for m in models: to = trainer_output_dir_cascade if m == '3d_cascade_fullres' else trainer_output_dir expected_output_folder = join(network_training_output_dir, m, taskname, to) if not isdir(expected_output_folder): if m == '3d_lowres' or m == '3d_cascade_fullres': print('Task', taskname, 'does not seem to have the cascade') continue else: raise RuntimeError('missing folder! %s' % expected_output_folder) output_here = join(output_folder, m, taskname, to) maybe_mkdir_p(output_here) copy_model(expected_output_folder, output_here) ensemble_dir = join(network_training_output_dir, 'ensembles', taskname) if not isdir(ensemble_dir): print('No ensemble directory found for task', taskname) return subd = subdirs(ensemble_dir, join=False) valid = [] for s in subd: v = check_if_valid(s, models, (nnunet_trainer, nnunet_trainer_cascade), (plans_identifier,)) if v: valid.append(s) output_ensemble = join(output_folder, 'ensembles', taskname) maybe_mkdir_p(output_ensemble) for v in valid: this_output = join(output_ensemble, v) maybe_mkdir_p(this_output) shutil.copy(join(ensemble_dir, v, 'postprocessing.json'), this_output) zipf = zipfile.ZipFile(join(output_folder, taskname + '.zip'), 'w', zipfile.ZIP_DEFLATED) for (root, dirs, files) in os.walk(join(output_folder, taskname)): for file in files: zipf.write(join(root, file), os.path.relpath(join(root, file), join(output_folder, taskname))) </DeepExtract>
CoTr
positive
def to_tlbr(self): """Get current position in bounding box format `(min x, miny, max x, max y)`. Returns ------- ndarray The bounding box. """ <DeepExtract> ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 ret = ret </DeepExtract> ret[2:] = ret[:2] + ret[2:] return ret
def to_tlbr(self): """Get current position in bounding box format `(min x, miny, max x, max y)`. Returns ------- ndarray The bounding box. """ ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 ret = ret ret[2:] = ret[:2] + ret[2:] return ret
centerpose
positive
def main(): parser = argparse.ArgumentParser(description='Example: Uncertainty estimates in classification') parser.add_argument('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=300, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='logs', help='Directory to output the log files') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--unit', '-u', type=int, default=20, help='Number of units') parser.add_argument('--noplot', dest='plot', action='store_false', help='Disable PlotReport extension') parser.add_argument('--test_on_test', action='store_true', help='Switch to the testing phase on test dataset') parser.add_argument('--test_on_valid', action='store_true', help='Switch to the testing phase on valid dataset') parser.add_argument('--mc_iteration', type=int, default=50, help='Number of iteration of MCMC') parser.add_argument('--decay', type=float, default=-1, help='Weight of L2 regularization') parser.add_argument('--seed', type=int, default=0, help='Fix the random seed') args = parser.parse_args() os.makedirs(args.out, exist_ok=True) with fixed_seed(args.seed, strict=False): predictor = BayesianConvNet(n_units=args.unit, n_out=10) train = Dataset(phase='train', indices=np.arange(0, 1000)) valid = Dataset(phase='train', indices=np.arange(1000, 2000)) test = Dataset(phase='test') if args.test_on_test: <DeepExtract> test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False) chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'), predictor) model = MCSampler(predictor, mc_iteration=args.mc_iteration, activation=partial(F.softmax, axis=1), reduce_mean=partial(F.argmax, axis=1), reduce_var=partial(F.mean, axis=1)) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() infer = Inferencer(test_iter, model, device=args.gpu) (pred, uncert) = infer.run() os.makedirs(args.out, exist_ok=True) match = pred == test.labels accuracy = np.sum(match) / len(match) arr = [uncert[match], uncert[np.logical_not(match)]] plt.rcParams['font.size'] = 18 plt.figure(figsize=(13, 5)) ax = sns.violinplot(data=arr, inner='quartile', palette='Blues', orient='h', cut=0) ax.set_xlabel('Predicted variance') ax.set_yticklabels(['Correct prediction\n(n=%d)' % len(arr[0]), 'Wrong prediction\n(n=%d)' % len(arr[1])]) plt.title('Accuracy=%.3f' % accuracy) plt.tight_layout() plt.savefig(os.path.join(args.out, 'eval.png')) plt.close() </DeepExtract> elif args.test_on_valid: <DeepExtract> test_iter = chainer.iterators.SerialIterator(valid, args.batchsize, repeat=False, shuffle=False) chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'), predictor) model = MCSampler(predictor, mc_iteration=args.mc_iteration, activation=partial(F.softmax, axis=1), reduce_mean=partial(F.argmax, axis=1), reduce_var=partial(F.mean, axis=1)) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() infer = Inferencer(test_iter, model, device=args.gpu) (pred, uncert) = infer.run() os.makedirs(args.out, exist_ok=True) match = pred == valid.labels accuracy = np.sum(match) / len(match) arr = [uncert[match], uncert[np.logical_not(match)]] plt.rcParams['font.size'] = 18 plt.figure(figsize=(13, 5)) ax = sns.violinplot(data=arr, inner='quartile', palette='Blues', orient='h', cut=0) ax.set_xlabel('Predicted variance') ax.set_yticklabels(['Correct prediction\n(n=%d)' % len(arr[0]), 'Wrong prediction\n(n=%d)' % len(arr[1])]) plt.title('Accuracy=%.3f' % accuracy) plt.tight_layout() plt.savefig(os.path.join(args.out, 'eval.png')) plt.close() </DeepExtract> else: <DeepExtract> train_iter = chainer.iterators.SerialIterator(train, args.batchsize) valid_iter = chainer.iterators.SerialIterator(valid, args.batchsize, repeat=False, shuffle=False) model = Classifier(predictor) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() optimizer = chainer.optimizers.Adam() optimizer.setup(model) if args.decay > 0: optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(args.decay)) updater = training.updaters.StandardUpdater(train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) trainer.extend(extensions.Evaluator(valid_iter, model, device=args.gpu)) trainer.extend(extensions.dump_graph('main/loss')) frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) trainer.extend(extensions.LogReport()) if args.plot and extensions.PlotReport.available(): trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) trainer.extend(extensions.ProgressBar()) if args.resume: chainer.serializers.load_npz(args.resume, trainer) trainer.run() chainer.serializers.save_npz(os.path.join(args.out, 'predictor.npz'), predictor) </DeepExtract>
def main(): parser = argparse.ArgumentParser(description='Example: Uncertainty estimates in classification') parser.add_argument('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=300, help='Number of sweeps over the dataset to train') parser.add_argument('--frequency', '-f', type=int, default=-1, help='Frequency of taking a snapshot') parser.add_argument('--gpu', '-g', type=int, default=0, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='logs', help='Directory to output the log files') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--unit', '-u', type=int, default=20, help='Number of units') parser.add_argument('--noplot', dest='plot', action='store_false', help='Disable PlotReport extension') parser.add_argument('--test_on_test', action='store_true', help='Switch to the testing phase on test dataset') parser.add_argument('--test_on_valid', action='store_true', help='Switch to the testing phase on valid dataset') parser.add_argument('--mc_iteration', type=int, default=50, help='Number of iteration of MCMC') parser.add_argument('--decay', type=float, default=-1, help='Weight of L2 regularization') parser.add_argument('--seed', type=int, default=0, help='Fix the random seed') args = parser.parse_args() os.makedirs(args.out, exist_ok=True) with fixed_seed(args.seed, strict=False): predictor = BayesianConvNet(n_units=args.unit, n_out=10) train = Dataset(phase='train', indices=np.arange(0, 1000)) valid = Dataset(phase='train', indices=np.arange(1000, 2000)) test = Dataset(phase='test') if args.test_on_test: test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False) chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'), predictor) model = MCSampler(predictor, mc_iteration=args.mc_iteration, activation=partial(F.softmax, axis=1), reduce_mean=partial(F.argmax, axis=1), reduce_var=partial(F.mean, axis=1)) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() infer = Inferencer(test_iter, model, device=args.gpu) (pred, uncert) = infer.run() os.makedirs(args.out, exist_ok=True) match = pred == test.labels accuracy = np.sum(match) / len(match) arr = [uncert[match], uncert[np.logical_not(match)]] plt.rcParams['font.size'] = 18 plt.figure(figsize=(13, 5)) ax = sns.violinplot(data=arr, inner='quartile', palette='Blues', orient='h', cut=0) ax.set_xlabel('Predicted variance') ax.set_yticklabels(['Correct prediction\n(n=%d)' % len(arr[0]), 'Wrong prediction\n(n=%d)' % len(arr[1])]) plt.title('Accuracy=%.3f' % accuracy) plt.tight_layout() plt.savefig(os.path.join(args.out, 'eval.png')) plt.close() elif args.test_on_valid: test_iter = chainer.iterators.SerialIterator(valid, args.batchsize, repeat=False, shuffle=False) chainer.serializers.load_npz(os.path.join(args.out, 'predictor.npz'), predictor) model = MCSampler(predictor, mc_iteration=args.mc_iteration, activation=partial(F.softmax, axis=1), reduce_mean=partial(F.argmax, axis=1), reduce_var=partial(F.mean, axis=1)) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() infer = Inferencer(test_iter, model, device=args.gpu) (pred, uncert) = infer.run() os.makedirs(args.out, exist_ok=True) match = pred == valid.labels accuracy = np.sum(match) / len(match) arr = [uncert[match], uncert[np.logical_not(match)]] plt.rcParams['font.size'] = 18 plt.figure(figsize=(13, 5)) ax = sns.violinplot(data=arr, inner='quartile', palette='Blues', orient='h', cut=0) ax.set_xlabel('Predicted variance') ax.set_yticklabels(['Correct prediction\n(n=%d)' % len(arr[0]), 'Wrong prediction\n(n=%d)' % len(arr[1])]) plt.title('Accuracy=%.3f' % accuracy) plt.tight_layout() plt.savefig(os.path.join(args.out, 'eval.png')) plt.close() else: train_iter = chainer.iterators.SerialIterator(train, args.batchsize) valid_iter = chainer.iterators.SerialIterator(valid, args.batchsize, repeat=False, shuffle=False) model = Classifier(predictor) if args.gpu >= 0: chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() optimizer = chainer.optimizers.Adam() optimizer.setup(model) if args.decay > 0: optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(args.decay)) updater = training.updaters.StandardUpdater(train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) trainer.extend(extensions.Evaluator(valid_iter, model, device=args.gpu)) trainer.extend(extensions.dump_graph('main/loss')) frequency = args.epoch if args.frequency == -1 else max(1, args.frequency) trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch')) trainer.extend(extensions.LogReport()) if args.plot and extensions.PlotReport.available(): trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png')) trainer.extend(extensions.PlotReport(['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png')) trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) trainer.extend(extensions.ProgressBar()) if args.resume: chainer.serializers.load_npz(args.resume, trainer) trainer.run() chainer.serializers.save_npz(os.path.join(args.out, 'predictor.npz'), predictor) </DeepExtract>
bayesian_unet
positive
def forward(self, x=None, ref_scribble_label=None, previous_frame_mask=None, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=None, gt_ids=None, k_nearest_neighbors=1, global_map_tmp_dic=None, local_map_dics=None, interaction_num=None, start_annotated_frame=None, frame_num=None): <DeepExtract> x = self.feature_extracter(x) x = self.semantic_embedding(x) x = x </DeepExtract> (ref_frame_embedding, previous_frame_embedding, current_frame_embedding) = torch.split(x, split_size_or_sections=int(x.size(0) / 3), dim=0) <DeepExtract> global_map_tmp_dic = global_map_tmp_dic dic_tmp = {} (bs, c, h, w) = current_frame_embedding.size() scale_ref_scribble_label = torch.nn.functional.interpolate(ref_scribble_label.float(), size=(h, w), mode='nearest') scale_ref_scribble_label = scale_ref_scribble_label.int() scale_previous_frame_label = torch.nn.functional.interpolate(previous_frame_mask.float(), size=(h, w), mode='nearest') scale_previous_frame_label = scale_previous_frame_label.int() if USE_CORRELATION_COST: n_chunks = 20 else: n_chunks = 500 for n in range(bs): seq_current_frame_embedding = current_frame_embedding[n] seq_ref_frame_embedding = ref_frame_embedding[n] seq_prev_frame_embedding = previous_frame_embedding[n] seq_ref_frame_embedding = seq_ref_frame_embedding.permute(1, 2, 0) seq_current_frame_embedding = seq_current_frame_embedding.permute(1, 2, 0) seq_ref_scribble_label = scale_ref_scribble_label[n].permute(1, 2, 0) t2 = time.time() (nn_features_n, ref_obj_ids) = nearest_neighbor_features_per_object(reference_embeddings=seq_ref_frame_embedding, query_embeddings=seq_current_frame_embedding, reference_labels=seq_ref_scribble_label, k_nearest_neighbors=k_nearest_neighbors, gt_ids=gt_ids[n], n_chunks=10) if normalize_nearest_neighbor_distances: nn_features_n = (torch.sigmoid(nn_features_n) - 0.5) * 2 t3 = time.time() if seq_names[n] not in global_map_tmp_dic: global_map_tmp_dic[seq_names[n]] = torch.ones_like(nn_features_n).repeat(104, 1, 1, 1, 1) if torch.cuda.is_available(): global_map_tmp_dic[seq_names[n]] = global_map_tmp_dic[seq_names[n]].cuda() nn_features_n = torch.where(nn_features_n <= global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0), nn_features_n, global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0)) global_map_tmp_dic[seq_names[n]][frame_num[n]] = nn_features_n.detach() t4 = time.time() seq_prev_frame_embedding = seq_prev_frame_embedding.permute(1, 2, 0) seq_previous_frame_label = scale_previous_frame_label[n].permute(1, 2, 0) if use_local_map: prev_frame_nn_features_n = local_previous_frame_nearest_neighbor_features_per_object(prev_frame_embedding=seq_prev_frame_embedding, query_embedding=seq_current_frame_embedding, prev_frame_labels=seq_previous_frame_label, gt_ids=ref_obj_ids, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) else: (prev_frame_nn_features_n, _) = nearest_neighbor_features_per_object(reference_embeddings=seq_prev_frame_embedding, query_embeddings=seq_current_frame_embedding, reference_labels=seq_previous_frame_label, k_nearest_neighbors=k_nearest_neighbors, gt_ids=gt_ids[n], n_chunks=20) prev_frame_nn_features_n = (torch.sigmoid(prev_frame_nn_features_n) - 0.5) * 2 t5 = time.time() if local_map_dics is not None: (local_map_tmp_dic, local_map_dist_dic) = local_map_dics if seq_names[n] not in local_map_dist_dic: local_map_dist_dic[seq_names[n]] = torch.zeros(104, 9) if torch.cuda.is_available(): local_map_dist_dic[seq_names[n]] = local_map_dist_dic[seq_names[n]].cuda() if seq_names[n] not in local_map_tmp_dic: local_map_tmp_dic[seq_names[n]] = torch.zeros_like(prev_frame_nn_features_n).unsqueeze(0).repeat(104, 9, 1, 1, 1, 1) if torch.cuda.is_available(): local_map_tmp_dic[seq_names[n]] = local_map_tmp_dic[seq_names[n]].cuda() local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = 1.0 / abs(frame_num[n] - start_annotated_frame) local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = prev_frame_nn_features_n.squeeze(0).detach() if interaction_num == 1: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) elif local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] > local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 2]: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) else: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 2] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) local_map_dics = (local_map_tmp_dic, local_map_dist_dic) previous_frame_to_cat = seq_previous_frame_label.float() == ref_obj_ids.float() to_cat_current_frame_embedding = current_frame_embedding[n].unsqueeze(0).repeat((ref_obj_ids.size(0), 1, 1, 1)) to_cat_nn_feature_n = nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat_previous_frame = previous_frame_to_cat.unsqueeze(-1).permute(2, 3, 0, 1).float() to_cat_prev_frame_nn_feature_n = prev_frame_nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n, to_cat_prev_frame_nn_feature_n, to_cat_previous_frame), 1) if cfg.MODEL_GLOBAL_ATTEN: cat_global_ = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n), 1) atten_maps = self.global_atten(cat_global_) atten_maps = torch.nn.functional.softmax(atten_maps, 0) pred_ = self.dynamic_seghead(to_cat, atten_maps) else: pred_ = self.dynamic_seghead(to_cat) pred_ = pred_.permute(1, 0, 2, 3) dic_tmp[seq_names[n]] = pred_ if local_map_dics is None: (dic, global_map_tmp_dic) = (dic_tmp, global_map_tmp_dic) else: (dic, global_map_tmp_dic) = (dic_tmp, global_map_tmp_dic, local_map_dics) </DeepExtract> return (dic, global_map_tmp_dic)
def forward(self, x=None, ref_scribble_label=None, previous_frame_mask=None, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=None, gt_ids=None, k_nearest_neighbors=1, global_map_tmp_dic=None, local_map_dics=None, interaction_num=None, start_annotated_frame=None, frame_num=None): x = self.feature_extracter(x) x = self.semantic_embedding(x) x = x (ref_frame_embedding, previous_frame_embedding, current_frame_embedding) = torch.split(x, split_size_or_sections=int(x.size(0) / 3), dim=0) global_map_tmp_dic = global_map_tmp_dic dic_tmp = {} (bs, c, h, w) = current_frame_embedding.size() scale_ref_scribble_label = torch.nn.functional.interpolate(ref_scribble_label.float(), size=(h, w), mode='nearest') scale_ref_scribble_label = scale_ref_scribble_label.int() scale_previous_frame_label = torch.nn.functional.interpolate(previous_frame_mask.float(), size=(h, w), mode='nearest') scale_previous_frame_label = scale_previous_frame_label.int() if USE_CORRELATION_COST: n_chunks = 20 else: n_chunks = 500 for n in range(bs): seq_current_frame_embedding = current_frame_embedding[n] seq_ref_frame_embedding = ref_frame_embedding[n] seq_prev_frame_embedding = previous_frame_embedding[n] seq_ref_frame_embedding = seq_ref_frame_embedding.permute(1, 2, 0) seq_current_frame_embedding = seq_current_frame_embedding.permute(1, 2, 0) seq_ref_scribble_label = scale_ref_scribble_label[n].permute(1, 2, 0) t2 = time.time() (nn_features_n, ref_obj_ids) = nearest_neighbor_features_per_object(reference_embeddings=seq_ref_frame_embedding, query_embeddings=seq_current_frame_embedding, reference_labels=seq_ref_scribble_label, k_nearest_neighbors=k_nearest_neighbors, gt_ids=gt_ids[n], n_chunks=10) if normalize_nearest_neighbor_distances: nn_features_n = (torch.sigmoid(nn_features_n) - 0.5) * 2 t3 = time.time() if seq_names[n] not in global_map_tmp_dic: global_map_tmp_dic[seq_names[n]] = torch.ones_like(nn_features_n).repeat(104, 1, 1, 1, 1) if torch.cuda.is_available(): global_map_tmp_dic[seq_names[n]] = global_map_tmp_dic[seq_names[n]].cuda() nn_features_n = torch.where(nn_features_n <= global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0), nn_features_n, global_map_tmp_dic[seq_names[n]][frame_num[n]].unsqueeze(0)) global_map_tmp_dic[seq_names[n]][frame_num[n]] = nn_features_n.detach() t4 = time.time() seq_prev_frame_embedding = seq_prev_frame_embedding.permute(1, 2, 0) seq_previous_frame_label = scale_previous_frame_label[n].permute(1, 2, 0) if use_local_map: prev_frame_nn_features_n = local_previous_frame_nearest_neighbor_features_per_object(prev_frame_embedding=seq_prev_frame_embedding, query_embedding=seq_current_frame_embedding, prev_frame_labels=seq_previous_frame_label, gt_ids=ref_obj_ids, max_distance=cfg.MODEL_MAX_LOCAL_DISTANCE) else: (prev_frame_nn_features_n, _) = nearest_neighbor_features_per_object(reference_embeddings=seq_prev_frame_embedding, query_embeddings=seq_current_frame_embedding, reference_labels=seq_previous_frame_label, k_nearest_neighbors=k_nearest_neighbors, gt_ids=gt_ids[n], n_chunks=20) prev_frame_nn_features_n = (torch.sigmoid(prev_frame_nn_features_n) - 0.5) * 2 t5 = time.time() if local_map_dics is not None: (local_map_tmp_dic, local_map_dist_dic) = local_map_dics if seq_names[n] not in local_map_dist_dic: local_map_dist_dic[seq_names[n]] = torch.zeros(104, 9) if torch.cuda.is_available(): local_map_dist_dic[seq_names[n]] = local_map_dist_dic[seq_names[n]].cuda() if seq_names[n] not in local_map_tmp_dic: local_map_tmp_dic[seq_names[n]] = torch.zeros_like(prev_frame_nn_features_n).unsqueeze(0).repeat(104, 9, 1, 1, 1, 1) if torch.cuda.is_available(): local_map_tmp_dic[seq_names[n]] = local_map_tmp_dic[seq_names[n]].cuda() local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = 1.0 / abs(frame_num[n] - start_annotated_frame) local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] = prev_frame_nn_features_n.squeeze(0).detach() if interaction_num == 1: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) elif local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 1] > local_map_dist_dic[seq_names[n]][frame_num[n]][interaction_num - 2]: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 1] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) else: prev_frame_nn_features_n = local_map_tmp_dic[seq_names[n]][frame_num[n]][interaction_num - 2] prev_frame_nn_features_n = prev_frame_nn_features_n.unsqueeze(0) local_map_dics = (local_map_tmp_dic, local_map_dist_dic) previous_frame_to_cat = seq_previous_frame_label.float() == ref_obj_ids.float() to_cat_current_frame_embedding = current_frame_embedding[n].unsqueeze(0).repeat((ref_obj_ids.size(0), 1, 1, 1)) to_cat_nn_feature_n = nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat_previous_frame = previous_frame_to_cat.unsqueeze(-1).permute(2, 3, 0, 1).float() to_cat_prev_frame_nn_feature_n = prev_frame_nn_features_n.squeeze(0).permute(2, 3, 0, 1) to_cat = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n, to_cat_prev_frame_nn_feature_n, to_cat_previous_frame), 1) if cfg.MODEL_GLOBAL_ATTEN: cat_global_ = torch.cat((to_cat_current_frame_embedding, to_cat_nn_feature_n), 1) atten_maps = self.global_atten(cat_global_) atten_maps = torch.nn.functional.softmax(atten_maps, 0) pred_ = self.dynamic_seghead(to_cat, atten_maps) else: pred_ = self.dynamic_seghead(to_cat) pred_ = pred_.permute(1, 0, 2, 3) dic_tmp[seq_names[n]] = pred_ if local_map_dics is None: (dic, global_map_tmp_dic) = (dic_tmp, global_map_tmp_dic) else: (dic, global_map_tmp_dic) = (dic_tmp, global_map_tmp_dic, local_map_dics) return (dic, global_map_tmp_dic)
CVPR2020_MANet
positive
def get_tz_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name, transport_zone_base_url): <DeepExtract> try: (rc, resp) = request(manager_url + transport_zone_base_url, method='GET', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zones. Error [%s]' % to_native(err)) transport_zones = resp </DeepExtract> for transport_zone in transport_zones['results']: if transport_zone.__contains__('display_name') and transport_zone['display_name'] == display_name: return transport_zone return None
def get_tz_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name, transport_zone_base_url): try: (rc, resp) = request(manager_url + transport_zone_base_url, method='GET', headers=dict(Accept='application/json'), url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True) except Exception as err: module.fail_json(msg='Error accessing transport zones. Error [%s]' % to_native(err)) transport_zones = resp for transport_zone in transport_zones['results']: if transport_zone.__contains__('display_name') and transport_zone['display_name'] == display_name: return transport_zone return None
ansible-for-nsxt
positive
def __del__(self): <DeepExtract> self.log_file.write_text('\n'.join(self.log)) </DeepExtract> del self.log
def __del__(self): self.log_file.write_text('\n'.join(self.log)) del self.log
BCML
positive
def forward(self, batched_inputs): original_images = [x['image'].to(self.device) for x in batched_inputs] images_norm = [self.normalizer(x) for x in original_images] images_norm = ImageList.from_tensors(images_norm, self.backbone.size_divisibility) features = self.backbone(images_norm.tensor) if 'instances' in batched_inputs[0]: gt_instances = [x['instances'].to(self.device) for x in batched_inputs] if self.boxinst_enabled: original_image_masks = [torch.ones_like(x[0], dtype=torch.float32) for x in original_images] for i in range(len(original_image_masks)): im_h = batched_inputs[i]['height'] pixels_removed = int(self.bottom_pixels_removed * float(original_images[i].size(1)) / float(im_h)) if pixels_removed > 0: original_image_masks[i][-pixels_removed:, :] = 0 original_images = ImageList.from_tensors(original_images, self.backbone.size_divisibility) original_image_masks = ImageList.from_tensors(original_image_masks, self.backbone.size_divisibility, pad_value=0.0) <DeepExtract> stride = self.mask_out_stride start = int(stride // 2) assert original_images.tensor.size(2) % stride == 0 assert original_images.tensor.size(3) % stride == 0 downsampled_images = F.avg_pool2d(original_images.tensor.float(), kernel_size=stride, stride=stride, padding=0)[:, [2, 1, 0]] original_image_masks.tensor = original_image_masks.tensor[:, start::stride, start::stride] for (im_i, per_im_gt_inst) in enumerate(gt_instances): images_lab = color.rgb2lab(downsampled_images[im_i].byte().permute(1, 2, 0).cpu().numpy()) images_lab = torch.as_tensor(images_lab, device=downsampled_images.device, dtype=torch.float32) images_lab = images_lab.permute(2, 0, 1)[None] images_color_similarity = get_images_color_similarity(images_lab, original_image_masks.tensor[im_i], self.pairwise_size, self.pairwise_dilation) per_im_boxes = per_im_gt_inst.gt_boxes.tensor per_im_bitmasks = [] per_im_bitmasks_full = [] for per_box in per_im_boxes: bitmask_full = torch.zeros((original_images.tensor.size(-2), original_images.tensor.size(-1)), device=self.device).float() bitmask_full[int(per_box[1]):int(per_box[3] + 1), int(per_box[0]):int(per_box[2] + 1)] = 1.0 bitmask = bitmask_full[start::stride, start::stride] assert bitmask.size(0) * stride == original_images.tensor.size(-2) assert bitmask.size(1) * stride == original_images.tensor.size(-1) per_im_bitmasks.append(bitmask) per_im_bitmasks_full.append(bitmask_full) per_im_gt_inst.gt_bitmasks = torch.stack(per_im_bitmasks, dim=0) per_im_gt_inst.gt_bitmasks_full = torch.stack(per_im_bitmasks_full, dim=0) per_im_gt_inst.image_color_similarity = torch.cat([images_color_similarity for _ in range(len(per_im_gt_inst))], dim=0) </DeepExtract> else: <DeepExtract> for per_im_gt_inst in gt_instances: if not per_im_gt_inst.has('gt_masks'): continue start = int(self.mask_out_stride // 2) if isinstance(per_im_gt_inst.get('gt_masks'), PolygonMasks): polygons = per_im_gt_inst.get('gt_masks').polygons per_im_bitmasks = [] per_im_bitmasks_full = [] for per_polygons in polygons: bitmask = polygons_to_bitmask(per_polygons, images_norm.tensor.size(-2), images_norm.tensor.size(-1)) bitmask = torch.from_numpy(bitmask).to(self.device).float() start = int(self.mask_out_stride // 2) bitmask_full = bitmask.clone() bitmask = bitmask[start::self.mask_out_stride, start::self.mask_out_stride] assert bitmask.size(0) * self.mask_out_stride == images_norm.tensor.size(-2) assert bitmask.size(1) * self.mask_out_stride == images_norm.tensor.size(-1) per_im_bitmasks.append(bitmask) per_im_bitmasks_full.append(bitmask_full) per_im_gt_inst.gt_bitmasks = torch.stack(per_im_bitmasks, dim=0) per_im_gt_inst.gt_bitmasks_full = torch.stack(per_im_bitmasks_full, dim=0) else: bitmasks = per_im_gt_inst.get('gt_masks').tensor (h, w) = bitmasks.size()[1:] bitmasks_full = F.pad(bitmasks, (0, images_norm.tensor.size(-1) - w, 0, images_norm.tensor.size(-2) - h), 'constant', 0) bitmasks = bitmasks_full[:, start::self.mask_out_stride, start::self.mask_out_stride] per_im_gt_inst.gt_bitmasks = bitmasks per_im_gt_inst.gt_bitmasks_full = bitmasks_full </DeepExtract> else: gt_instances = None (mask_feats, sem_losses) = self.mask_branch(features, gt_instances) (proposals, proposal_losses) = self.proposal_generator(images_norm, features, gt_instances, self.controller) if self.training: <DeepExtract> pred_instances = proposals['instances'] assert self.max_proposals == -1 or self.topk_proposals_per_im == -1, 'MAX_PROPOSALS and TOPK_PROPOSALS_PER_IM cannot be used at the same time.' if self.max_proposals != -1: if self.max_proposals < len(pred_instances): inds = torch.randperm(len(pred_instances), device=mask_feats.device).long() logger.info('clipping proposals from {} to {}'.format(len(pred_instances), self.max_proposals)) pred_instances = pred_instances[inds[:self.max_proposals]] elif self.topk_proposals_per_im != -1: num_images = len(gt_instances) kept_instances = [] for im_id in range(num_images): instances_per_im = pred_instances[pred_instances.im_inds == im_id] if len(instances_per_im) == 0: kept_instances.append(instances_per_im) continue unique_gt_inds = instances_per_im.gt_inds.unique() num_instances_per_gt = max(int(self.topk_proposals_per_im / len(unique_gt_inds)), 1) for gt_ind in unique_gt_inds: instances_per_gt = instances_per_im[instances_per_im.gt_inds == gt_ind] if len(instances_per_gt) > num_instances_per_gt: scores = instances_per_gt.logits_pred.sigmoid().max(dim=1)[0] ctrness_pred = instances_per_gt.ctrness_pred.sigmoid() inds = (scores * ctrness_pred).topk(k=num_instances_per_gt, dim=0)[1] instances_per_gt = instances_per_gt[inds] kept_instances.append(instances_per_gt) pred_instances = Instances.cat(kept_instances) pred_instances.mask_head_params = pred_instances.top_feats loss_mask = self.mask_head(mask_feats, self.mask_branch.out_stride, pred_instances, gt_instances) mask_losses = loss_mask </DeepExtract> losses = {} losses.update(sem_losses) losses.update(proposal_losses) losses.update(mask_losses) return losses else: <DeepExtract> for (im_id, per_im) in enumerate(proposals): per_im.im_inds = per_im.locations.new_ones(len(per_im), dtype=torch.long) * im_id pred_instances = Instances.cat(proposals) pred_instances.mask_head_params = pred_instances.top_feat pred_instances_w_masks = self.mask_head(mask_feats, self.mask_branch.out_stride, pred_instances) pred_instances_w_masks = pred_instances_w_masks </DeepExtract> (padded_im_h, padded_im_w) = images_norm.tensor.size()[-2:] processed_results = [] for (im_id, (input_per_image, image_size)) in enumerate(zip(batched_inputs, images_norm.image_sizes)): height = input_per_image.get('height', image_size[0]) width = input_per_image.get('width', image_size[1]) instances_per_im = pred_instances_w_masks[pred_instances_w_masks.im_inds == im_id] <DeepExtract> (scale_x, scale_y) = (width / instances_per_im.image_size[1], height / instances_per_im.image_size[0]) (resized_im_h, resized_im_w) = instances_per_im.image_size instances_per_im = Instances((height, width), **instances_per_im.get_fields()) if instances_per_im.has('pred_boxes'): output_boxes = instances_per_im.pred_boxes elif instances_per_im.has('proposal_boxes'): output_boxes = instances_per_im.proposal_boxes output_boxes.scale(scale_x, scale_y) output_boxes.clip(instances_per_im.image_size) instances_per_im = instances_per_im[output_boxes.nonempty()] if instances_per_im.has('pred_global_masks'): (mask_h, mask_w) = instances_per_im.pred_global_masks.size()[-2:] factor_h = padded_im_h // mask_h factor_w = padded_im_w // mask_w assert factor_h == factor_w factor = factor_h pred_global_masks = aligned_bilinear(instances_per_im.pred_global_masks, factor) pred_global_masks = pred_global_masks[:, :, :resized_im_h, :resized_im_w] pred_global_masks = F.interpolate(pred_global_masks, size=(height, width), mode='bilinear', align_corners=False) pred_global_masks = pred_global_masks[:, 0, :, :] instances_per_im.pred_masks = (pred_global_masks > mask_threshold).float() instances_per_im = instances_per_im </DeepExtract> processed_results.append({'instances': instances_per_im}) return processed_results
def forward(self, batched_inputs): original_images = [x['image'].to(self.device) for x in batched_inputs] images_norm = [self.normalizer(x) for x in original_images] images_norm = ImageList.from_tensors(images_norm, self.backbone.size_divisibility) features = self.backbone(images_norm.tensor) if 'instances' in batched_inputs[0]: gt_instances = [x['instances'].to(self.device) for x in batched_inputs] if self.boxinst_enabled: original_image_masks = [torch.ones_like(x[0], dtype=torch.float32) for x in original_images] for i in range(len(original_image_masks)): im_h = batched_inputs[i]['height'] pixels_removed = int(self.bottom_pixels_removed * float(original_images[i].size(1)) / float(im_h)) if pixels_removed > 0: original_image_masks[i][-pixels_removed:, :] = 0 original_images = ImageList.from_tensors(original_images, self.backbone.size_divisibility) original_image_masks = ImageList.from_tensors(original_image_masks, self.backbone.size_divisibility, pad_value=0.0) stride = self.mask_out_stride start = int(stride // 2) assert original_images.tensor.size(2) % stride == 0 assert original_images.tensor.size(3) % stride == 0 downsampled_images = F.avg_pool2d(original_images.tensor.float(), kernel_size=stride, stride=stride, padding=0)[:, [2, 1, 0]] original_image_masks.tensor = original_image_masks.tensor[:, start::stride, start::stride] for (im_i, per_im_gt_inst) in enumerate(gt_instances): images_lab = color.rgb2lab(downsampled_images[im_i].byte().permute(1, 2, 0).cpu().numpy()) images_lab = torch.as_tensor(images_lab, device=downsampled_images.device, dtype=torch.float32) images_lab = images_lab.permute(2, 0, 1)[None] images_color_similarity = get_images_color_similarity(images_lab, original_image_masks.tensor[im_i], self.pairwise_size, self.pairwise_dilation) per_im_boxes = per_im_gt_inst.gt_boxes.tensor per_im_bitmasks = [] per_im_bitmasks_full = [] for per_box in per_im_boxes: bitmask_full = torch.zeros((original_images.tensor.size(-2), original_images.tensor.size(-1)), device=self.device).float() bitmask_full[int(per_box[1]):int(per_box[3] + 1), int(per_box[0]):int(per_box[2] + 1)] = 1.0 bitmask = bitmask_full[start::stride, start::stride] assert bitmask.size(0) * stride == original_images.tensor.size(-2) assert bitmask.size(1) * stride == original_images.tensor.size(-1) per_im_bitmasks.append(bitmask) per_im_bitmasks_full.append(bitmask_full) per_im_gt_inst.gt_bitmasks = torch.stack(per_im_bitmasks, dim=0) per_im_gt_inst.gt_bitmasks_full = torch.stack(per_im_bitmasks_full, dim=0) per_im_gt_inst.image_color_similarity = torch.cat([images_color_similarity for _ in range(len(per_im_gt_inst))], dim=0) else: for per_im_gt_inst in gt_instances: if not per_im_gt_inst.has('gt_masks'): continue start = int(self.mask_out_stride // 2) if isinstance(per_im_gt_inst.get('gt_masks'), PolygonMasks): polygons = per_im_gt_inst.get('gt_masks').polygons per_im_bitmasks = [] per_im_bitmasks_full = [] for per_polygons in polygons: bitmask = polygons_to_bitmask(per_polygons, images_norm.tensor.size(-2), images_norm.tensor.size(-1)) bitmask = torch.from_numpy(bitmask).to(self.device).float() start = int(self.mask_out_stride // 2) bitmask_full = bitmask.clone() bitmask = bitmask[start::self.mask_out_stride, start::self.mask_out_stride] assert bitmask.size(0) * self.mask_out_stride == images_norm.tensor.size(-2) assert bitmask.size(1) * self.mask_out_stride == images_norm.tensor.size(-1) per_im_bitmasks.append(bitmask) per_im_bitmasks_full.append(bitmask_full) per_im_gt_inst.gt_bitmasks = torch.stack(per_im_bitmasks, dim=0) per_im_gt_inst.gt_bitmasks_full = torch.stack(per_im_bitmasks_full, dim=0) else: bitmasks = per_im_gt_inst.get('gt_masks').tensor (h, w) = bitmasks.size()[1:] bitmasks_full = F.pad(bitmasks, (0, images_norm.tensor.size(-1) - w, 0, images_norm.tensor.size(-2) - h), 'constant', 0) bitmasks = bitmasks_full[:, start::self.mask_out_stride, start::self.mask_out_stride] per_im_gt_inst.gt_bitmasks = bitmasks per_im_gt_inst.gt_bitmasks_full = bitmasks_full else: gt_instances = None (mask_feats, sem_losses) = self.mask_branch(features, gt_instances) (proposals, proposal_losses) = self.proposal_generator(images_norm, features, gt_instances, self.controller) if self.training: pred_instances = proposals['instances'] assert self.max_proposals == -1 or self.topk_proposals_per_im == -1, 'MAX_PROPOSALS and TOPK_PROPOSALS_PER_IM cannot be used at the same time.' if self.max_proposals != -1: if self.max_proposals < len(pred_instances): inds = torch.randperm(len(pred_instances), device=mask_feats.device).long() logger.info('clipping proposals from {} to {}'.format(len(pred_instances), self.max_proposals)) pred_instances = pred_instances[inds[:self.max_proposals]] elif self.topk_proposals_per_im != -1: num_images = len(gt_instances) kept_instances = [] for im_id in range(num_images): instances_per_im = pred_instances[pred_instances.im_inds == im_id] if len(instances_per_im) == 0: kept_instances.append(instances_per_im) continue unique_gt_inds = instances_per_im.gt_inds.unique() num_instances_per_gt = max(int(self.topk_proposals_per_im / len(unique_gt_inds)), 1) for gt_ind in unique_gt_inds: instances_per_gt = instances_per_im[instances_per_im.gt_inds == gt_ind] if len(instances_per_gt) > num_instances_per_gt: scores = instances_per_gt.logits_pred.sigmoid().max(dim=1)[0] ctrness_pred = instances_per_gt.ctrness_pred.sigmoid() inds = (scores * ctrness_pred).topk(k=num_instances_per_gt, dim=0)[1] instances_per_gt = instances_per_gt[inds] kept_instances.append(instances_per_gt) pred_instances = Instances.cat(kept_instances) pred_instances.mask_head_params = pred_instances.top_feats loss_mask = self.mask_head(mask_feats, self.mask_branch.out_stride, pred_instances, gt_instances) mask_losses = loss_mask losses = {} losses.update(sem_losses) losses.update(proposal_losses) losses.update(mask_losses) return losses else: for (im_id, per_im) in enumerate(proposals): per_im.im_inds = per_im.locations.new_ones(len(per_im), dtype=torch.long) * im_id pred_instances = Instances.cat(proposals) pred_instances.mask_head_params = pred_instances.top_feat pred_instances_w_masks = self.mask_head(mask_feats, self.mask_branch.out_stride, pred_instances) pred_instances_w_masks = pred_instances_w_masks (padded_im_h, padded_im_w) = images_norm.tensor.size()[-2:] processed_results = [] for (im_id, (input_per_image, image_size)) in enumerate(zip(batched_inputs, images_norm.image_sizes)): height = input_per_image.get('height', image_size[0]) width = input_per_image.get('width', image_size[1]) instances_per_im = pred_instances_w_masks[pred_instances_w_masks.im_inds == im_id] (scale_x, scale_y) = (width / instances_per_im.image_size[1], height / instances_per_im.image_size[0]) (resized_im_h, resized_im_w) = instances_per_im.image_size instances_per_im = Instances((height, width), **instances_per_im.get_fields()) if instances_per_im.has('pred_boxes'): output_boxes = instances_per_im.pred_boxes elif instances_per_im.has('proposal_boxes'): output_boxes = instances_per_im.proposal_boxes output_boxes.scale(scale_x, scale_y) output_boxes.clip(instances_per_im.image_size) instances_per_im = instances_per_im[output_boxes.nonempty()] if instances_per_im.has('pred_global_masks'): (mask_h, mask_w) = instances_per_im.pred_global_masks.size()[-2:] factor_h = padded_im_h // mask_h factor_w = padded_im_w // mask_w assert factor_h == factor_w factor = factor_h pred_global_masks = aligned_bilinear(instances_per_im.pred_global_masks, factor) pred_global_masks = pred_global_masks[:, :, :resized_im_h, :resized_im_w] pred_global_masks = F.interpolate(pred_global_masks, size=(height, width), mode='bilinear', align_corners=False) pred_global_masks = pred_global_masks[:, 0, :, :] instances_per_im.pred_masks = (pred_global_masks > mask_threshold).float() instances_per_im = instances_per_im processed_results.append({'instances': instances_per_im}) return processed_results
AdelaiDet
positive
def _delete(self, tree): """Run a DELETE statement""" tablename = tree.table <DeepExtract> ... </DeepExtract> kwargs = {} visitor = Visitor(self.reserved_words) if isinstance(tree.where, ConstraintExpression): kwargs['condition'] = tree.where.build(visitor) kwargs['expr_values'] = visitor.expression_values kwargs['alias'] = visitor.attribute_names return self._query_and_op(tree, table, 'delete_item', kwargs)
def _delete(self, tree): """Run a DELETE statement""" tablename = tree.table ... kwargs = {} visitor = Visitor(self.reserved_words) if isinstance(tree.where, ConstraintExpression): kwargs['condition'] = tree.where.build(visitor) kwargs['expr_values'] = visitor.expression_values kwargs['alias'] = visitor.attribute_names return self._query_and_op(tree, table, 'delete_item', kwargs)
dql
positive
@api.model def create_order_ws(self, parsed_order, price_source, order_filename=None): """Same method as create_order() but callable via JSON-RPC webservice. Returns an ID to avoid this error: TypeError: sale.order(15,) is not JSON serializable""" <DeepExtract> soo = self.env['sale.order'].with_context(mail_create_nosubscribe=True) bdio = self.env['business.document.import'] so_vals = self._prepare_order(parsed_order, price_source) order = soo.create(so_vals) bdio.post_create_or_update(parsed_order, order, doc_filename=order_filename) logger.info('Sale Order ID %d created', order.id) order = order </DeepExtract> return order.id
@api.model def create_order_ws(self, parsed_order, price_source, order_filename=None): """Same method as create_order() but callable via JSON-RPC webservice. Returns an ID to avoid this error: TypeError: sale.order(15,) is not JSON serializable""" soo = self.env['sale.order'].with_context(mail_create_nosubscribe=True) bdio = self.env['business.document.import'] so_vals = self._prepare_order(parsed_order, price_source) order = soo.create(so_vals) bdio.post_create_or_update(parsed_order, order, doc_filename=order_filename) logger.info('Sale Order ID %d created', order.id) order = order return order.id
edi
positive
def hash_message(self, ts): ts = SlackTS(ts) def calc_hash(msg): return sha1_hex(str(msg.ts)) if ts in self.messages and (not self.messages[ts].hash): message = self.messages[ts] <DeepExtract> tshash = sha1_hex(str(message.ts)) </DeepExtract> hl = 3 shorthash = tshash[:hl] while any((x.startswith(shorthash) for x in self.hashed_messages)): hl += 1 shorthash = tshash[:hl] if shorthash[:-1] in self.hashed_messages: col_msg = self.hashed_messages.pop(shorthash[:-1]) col_new_hash = calc_hash(col_msg)[:hl] col_msg.hash = col_new_hash self.hashed_messages[col_new_hash] = col_msg <DeepExtract> str(col_msg.ts) = SlackTS(str(col_msg.ts)) m = self.messages.get(str(col_msg.ts)) if not m: return if message_json: m.message_json.update(message_json) if text: m.change_text(text) if type(m) == SlackMessage or config.thread_messages_in_channel: new_text = self.render(m, force=True) modify_buffer_line(self.channel_buffer, str(col_msg.ts), new_text) if type(m) == SlackThreadMessage: thread_channel = m.parent_message.thread_channel if thread_channel and thread_channel.active: new_text = thread_channel.render(m, force=True) modify_buffer_line(thread_channel.channel_buffer, str(col_msg.ts), new_text) </DeepExtract> if col_msg.thread_channel: col_msg.thread_channel.rename() self.hashed_messages[shorthash] = message message.hash = shorthash return shorthash elif ts in self.messages: return self.messages[ts].hash
def hash_message(self, ts): ts = SlackTS(ts) def calc_hash(msg): return sha1_hex(str(msg.ts)) if ts in self.messages and (not self.messages[ts].hash): message = self.messages[ts] tshash = sha1_hex(str(message.ts)) hl = 3 shorthash = tshash[:hl] while any((x.startswith(shorthash) for x in self.hashed_messages)): hl += 1 shorthash = tshash[:hl] if shorthash[:-1] in self.hashed_messages: col_msg = self.hashed_messages.pop(shorthash[:-1]) col_new_hash = calc_hash(col_msg)[:hl] col_msg.hash = col_new_hash self.hashed_messages[col_new_hash] = col_msg str(col_msg.ts) = SlackTS(str(col_msg.ts)) m = self.messages.get(str(col_msg.ts)) if not m: return if message_json: m.message_json.update(message_json) if text: m.change_text(text) if type(m) == SlackMessage or config.thread_messages_in_channel: new_text = self.render(m, force=True) modify_buffer_line(self.channel_buffer, str(col_msg.ts), new_text) if type(m) == SlackThreadMessage: thread_channel = m.parent_message.thread_channel if thread_channel and thread_channel.active: new_text = thread_channel.render(m, force=True) modify_buffer_line(thread_channel.channel_buffer, str(col_msg.ts), new_text) if col_msg.thread_channel: col_msg.thread_channel.rename() self.hashed_messages[shorthash] = message message.hash = shorthash return shorthash elif ts in self.messages: return self.messages[ts].hash
awesome-dots
positive
def _sample_lanczos(im, xs, ys): xs = np.asanyarray(xs) ys = np.asanyarray(ys) im = np.atleast_2d(np.asanyarray(im)) if xs.shape != ys.shape: raise ValueError('Shape of xs and ys must match') (floor_xs, floor_ys) = (np.floor(xs), np.floor(ys)) (frac_xs, frac_ys) = (xs - floor_xs, ys - floor_ys) a = 3.0 def _l(x): return np.sinc(x) * np.sinc(x / a) S = None for dx in np.arange(-a + 1, a + 1): <DeepExtract> Lx = np.sinc(frac_xs - dx) * np.sinc(frac_xs - dx / a) </DeepExtract> for dy in np.arange(-a + 1, a + 1): <DeepExtract> Ly = np.sinc(frac_ys - dy) * np.sinc(frac_ys - dy / a) </DeepExtract> weight = Lx * Ly while len(im.shape) != len(weight.shape): weight = np.repeat(weight[..., np.newaxis], im.shape[len(weight.shape)], len(weight.shape)) contrib = weight * _sample_clipped(im, floor_xs + dx, floor_ys + dy) if S is None: S = contrib else: S += contrib return S
def _sample_lanczos(im, xs, ys): xs = np.asanyarray(xs) ys = np.asanyarray(ys) im = np.atleast_2d(np.asanyarray(im)) if xs.shape != ys.shape: raise ValueError('Shape of xs and ys must match') (floor_xs, floor_ys) = (np.floor(xs), np.floor(ys)) (frac_xs, frac_ys) = (xs - floor_xs, ys - floor_ys) a = 3.0 def _l(x): return np.sinc(x) * np.sinc(x / a) S = None for dx in np.arange(-a + 1, a + 1): Lx = np.sinc(frac_xs - dx) * np.sinc(frac_xs - dx / a) for dy in np.arange(-a + 1, a + 1): Ly = np.sinc(frac_ys - dy) * np.sinc(frac_ys - dy / a) weight = Lx * Ly while len(im.shape) != len(weight.shape): weight = np.repeat(weight[..., np.newaxis], im.shape[len(weight.shape)], len(weight.shape)) contrib = weight * _sample_clipped(im, floor_xs + dx, floor_ys + dy) if S is None: S = contrib else: S += contrib return S
dtcwt
positive
def spot_sell_long(self): """ Exits long position in spot account. """ <DeepExtract> self.spot_coin = self.round_down(self.binance_client.get_asset_balance(asset=self.coin_name)['free']) </DeepExtract> order = self.binance_client.order_market_sell(symbol=self.symbol, quantity=self.spot_coin) self.add_trade(message='Sold spot long.', force=False, orderID=order['clientOrderId'])
def spot_sell_long(self): """ Exits long position in spot account. """ self.spot_coin = self.round_down(self.binance_client.get_asset_balance(asset=self.coin_name)['free']) order = self.binance_client.order_market_sell(symbol=self.symbol, quantity=self.spot_coin) self.add_trade(message='Sold spot long.', force=False, orderID=order['clientOrderId'])
algobot
positive
def test_create_then_patch_topic(self): skeleton = TopicSkeleton.objects.get(title='Body Count') <DeepExtract> if credentials is None: credentials = self.get_contrib_credentials() if skeleton is None: skeleton = TopicSkeleton.objects.get(title='Body Count') {'title': u'Skeletonist'}['ontology_as_json'] = skeleton.ontology resp = self.api_client.post('/api/detective/common/v1/topic/', data={'title': u'Skeletonist'}, format='json', authentication=credentials) </DeepExtract> self.assertHttpCreated(resp) created_topic = json.loads(resp.content) self.assertIsNotNone(created_topic['ontology_as_json']) data = {'title': u'new title'} resp = self.api_client.patch('/api/detective/common/v1/topic/{pk}/'.format(pk=created_topic['id']), data=data, format='json', authentication=self.get_contrib_credentials()) self.assertTrue(resp.status_code in [200, 202]) updated_topic = Topic.objects.get(slug=created_topic['slug']) self.assertEqual(updated_topic.title, data['title'])
def test_create_then_patch_topic(self): skeleton = TopicSkeleton.objects.get(title='Body Count') if credentials is None: credentials = self.get_contrib_credentials() if skeleton is None: skeleton = TopicSkeleton.objects.get(title='Body Count') {'title': u'Skeletonist'}['ontology_as_json'] = skeleton.ontology resp = self.api_client.post('/api/detective/common/v1/topic/', data={'title': u'Skeletonist'}, format='json', authentication=credentials) self.assertHttpCreated(resp) created_topic = json.loads(resp.content) self.assertIsNotNone(created_topic['ontology_as_json']) data = {'title': u'new title'} resp = self.api_client.patch('/api/detective/common/v1/topic/{pk}/'.format(pk=created_topic['id']), data=data, format='json', authentication=self.get_contrib_credentials()) self.assertTrue(resp.status_code in [200, 202]) updated_topic = Topic.objects.get(slug=created_topic['slug']) self.assertEqual(updated_topic.title, data['title'])
detective.io
positive
@pytest.mark.django_db def test_plan_list_delete_403_if_not_authorized(client, django_user_model): """Tests for 403 error for PlanListDelete if inadequate permissions.""" <DeepExtract> plan_list = models.PlanList.objects.create(title=title) </DeepExtract> django_user_model.objects.create_user(username='user', password='password') client.login(username='user', password='password') response = client.get(reverse('dfs_plan_list_delete', kwargs={'plan_list_id': plan_list.id})) assert response.status_code == 403
@pytest.mark.django_db def test_plan_list_delete_403_if_not_authorized(client, django_user_model): """Tests for 403 error for PlanListDelete if inadequate permissions.""" plan_list = models.PlanList.objects.create(title=title) django_user_model.objects.create_user(username='user', password='password') client.login(username='user', password='password') response = client.get(reverse('dfs_plan_list_delete', kwargs={'plan_list_id': plan_list.id})) assert response.status_code == 403
django-flexible-subscriptions
positive
def _decode_group_by(groupby_type, body): if search_pb.GROUP_BY_FIELD == groupby_type: proto = search_pb.GroupByFieldResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_field_result_items: sub_group_by_results = [] sub_agg_results = [] <DeepExtract> if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) </DeepExtract> <DeepExtract> if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) </DeepExtract> result_item = GroupByFieldResultItem(item.key, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_RANGE == groupby_type: proto = search_pb.GroupByRangeResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_range_result_items: sub_group_by_results = [] sub_agg_results = [] <DeepExtract> if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) </DeepExtract> <DeepExtract> if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) </DeepExtract> result_item = GroupByRangeResultItem(item.range_from, item.range_to, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_FILTER == groupby_type: proto = search_pb.GroupByFilterResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_filter_result_items: sub_group_by_results = [] sub_agg_results = [] <DeepExtract> if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) </DeepExtract> <DeepExtract> if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) </DeepExtract> result_item = GroupByFilterResultItem(item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_GEO_DISTANCE == groupby_type: proto = search_pb.GroupByGeoDistanceResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_geo_distance_result_items: sub_group_by_results = [] sub_agg_results = [] <DeepExtract> if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) </DeepExtract> <DeepExtract> if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) </DeepExtract> result_item = GroupByGeoDistanceResultItem(item.range_from, item.range_to, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_HISTOGRAM == groupby_type: proto = search_pb.GroupByHistogramResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_histogra_items: sub_group_by_results = [] sub_agg_results = [] <DeepExtract> if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) </DeepExtract> <DeepExtract> if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) </DeepExtract> result_item = GroupByHistogramResultItem(self._decode_column_value(item.key), item.value, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items else: raise OTSClientError('unsupport group by type:%s' % str(groupby_type))
def _decode_group_by(groupby_type, body): if search_pb.GROUP_BY_FIELD == groupby_type: proto = search_pb.GroupByFieldResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_field_result_items: sub_group_by_results = [] sub_agg_results = [] if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) result_item = GroupByFieldResultItem(item.key, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_RANGE == groupby_type: proto = search_pb.GroupByRangeResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_range_result_items: sub_group_by_results = [] sub_agg_results = [] if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) result_item = GroupByRangeResultItem(item.range_from, item.range_to, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_FILTER == groupby_type: proto = search_pb.GroupByFilterResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_filter_result_items: sub_group_by_results = [] sub_agg_results = [] if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) result_item = GroupByFilterResultItem(item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_GEO_DISTANCE == groupby_type: proto = search_pb.GroupByGeoDistanceResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_geo_distance_result_items: sub_group_by_results = [] sub_agg_results = [] if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) result_item = GroupByGeoDistanceResultItem(item.range_from, item.range_to, item.row_count, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items elif search_pb.GROUP_BY_HISTOGRAM == groupby_type: proto = search_pb.GroupByHistogramResult() proto.ParseFromString(body) result_items = [] for item in proto.group_by_histogra_items: sub_group_by_results = [] sub_agg_results = [] if item.sub_group_bys_result is not None: for group_result in item.sub_group_bys_result.group_by_results: name = group_result.name items = self._decode_group_by(group_result.type, group_result.group_by_result) sub_group_by_results.append(GroupByResult(name, items)) if item.sub_aggs_result is not None: for agg_result in item.sub_aggs_result.agg_results: name = agg_result.name value = self._decode_agg(agg_result.type, agg_result.agg_result) sub_agg_results.append(AggResult(name, value)) result_item = GroupByHistogramResultItem(self._decode_column_value(item.key), item.value, sub_agg_results, sub_group_by_results) result_items.append(result_item) return result_items else: raise OTSClientError('unsupport group by type:%s' % str(groupby_type))
aliyun-tablestore-python-sdk
positive
def test_fac_ui(self): <DeepExtract> self = Mpfr_t() mpfr_init2(self, 53) x = self </DeepExtract> mpfr_fac_ui(x, 4, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 24.0) mpfr_fac_ui(x, 5, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 120.0) mpfr_fac_ui(x, 6, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 720.0)
def test_fac_ui(self): self = Mpfr_t() mpfr_init2(self, 53) x = self mpfr_fac_ui(x, 4, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 24.0) mpfr_fac_ui(x, 5, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 120.0) mpfr_fac_ui(x, 6, MPFR_RNDN) self.assertEqual(mpfr_get_d(x, MPFR_RNDN), 720.0)
bigfloat
positive
def __init__(self, cache=False): if cache: self._cache = [] self._cache_lock = _thread.allocate_lock() <DeepExtract> if self._cache is not None: self._cache = [] self._cache_complete = False self._cache_gen = self._iter() if self._cache_lock.locked(): self._cache_lock.release() self._len = None </DeepExtract> else: self._cache = None self._cache_complete = False self._len = None
def __init__(self, cache=False): if cache: self._cache = [] self._cache_lock = _thread.allocate_lock() if self._cache is not None: self._cache = [] self._cache_complete = False self._cache_gen = self._iter() if self._cache_lock.locked(): self._cache_lock.release() self._len = None else: self._cache = None self._cache_complete = False self._len = None
Android-Free-Forensic-Toolkit
positive
def ResNet101V1_d(include_top=True, weights=None, input_shape=None, pooling=None, classes=1000, weight_decay=0.0001, norm_fn=None, **kwargs): def stack_fn(x): <DeepExtract> x = block1(x, 64, stride=1, avg_down=True, norm_fn=norm_fn, name='conv2' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv2' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 128, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv3' + '_block1', weight_decay=weight_decay) for i in range(2, 4 + 1): x = block1(x, 128, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv3' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 256, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv4' + '_block1', weight_decay=weight_decay) for i in range(2, 23 + 1): x = block1(x, 256, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv4' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block1(x, 512, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv5' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv5' + '_block' + str(i), weight_decay=weight_decay) x = x </DeepExtract> return x return ResNet(stack_fn, norm_fn, False, True, 'resnet101v1_d', include_top, weights, input_shape, pooling, classes, deep_stem=True, **kwargs)
def ResNet101V1_d(include_top=True, weights=None, input_shape=None, pooling=None, classes=1000, weight_decay=0.0001, norm_fn=None, **kwargs): def stack_fn(x): x = block1(x, 64, stride=1, avg_down=True, norm_fn=norm_fn, name='conv2' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 64, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv2' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 128, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv3' + '_block1', weight_decay=weight_decay) for i in range(2, 4 + 1): x = block1(x, 128, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv3' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 256, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv4' + '_block1', weight_decay=weight_decay) for i in range(2, 23 + 1): x = block1(x, 256, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv4' + '_block' + str(i), weight_decay=weight_decay) x = x x = block1(x, 512, stride=stride1, avg_down=True, norm_fn=norm_fn, name='conv5' + '_block1', weight_decay=weight_decay) for i in range(2, 3 + 1): x = block1(x, 512, conv_shortcut=False, avg_down=True, norm_fn=norm_fn, name='conv5' + '_block' + str(i), weight_decay=weight_decay) x = x return x return ResNet(stack_fn, norm_fn, False, True, 'resnet101v1_d', include_top, weights, input_shape, pooling, classes, deep_stem=True, **kwargs)
deep-learning-models
positive
def floatValue(self): localctx = cqlParser.FloatValueContext(self, self._ctx, self.state) <DeepExtract> if hasattr(localctx, 'enterFloatValue'): localctx.enterFloatValue(self) </DeepExtract> try: self.enterOuterAlt(localctx, 1) self.state = 28 self.match(cqlParser.FLOAT) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: <DeepExtract> if hasattr(listener, 'exitFloatValue'): listener.exitFloatValue(self) </DeepExtract> return localctx
def floatValue(self): localctx = cqlParser.FloatValueContext(self, self._ctx, self.state) if hasattr(localctx, 'enterFloatValue'): localctx.enterFloatValue(self) try: self.enterOuterAlt(localctx, 1) self.state = 28 self.match(cqlParser.FLOAT) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: if hasattr(listener, 'exitFloatValue'): listener.exitFloatValue(self) return localctx
cascade-server
positive
def evaluate(model, g, features, labels, mask, loss_func): model.eval() with torch.no_grad(): logits = model(g, features) loss = loss_func(logits[mask], labels[mask]) <DeepExtract> (_, indices) = torch.max(logits[mask], dim=1) prediction = indices.long().cpu().numpy() labels[mask] = labels[mask].cpu().numpy() accuracy = (prediction == labels[mask]).sum() / len(prediction) micro_f1 = f1_score(labels[mask], prediction, average='micro') macro_f1 = f1_score(labels[mask], prediction, average='macro') (accuracy, micro_f1, macro_f1) = (accuracy, micro_f1, macro_f1) </DeepExtract> return (loss, accuracy, micro_f1, macro_f1)
def evaluate(model, g, features, labels, mask, loss_func): model.eval() with torch.no_grad(): logits = model(g, features) loss = loss_func(logits[mask], labels[mask]) (_, indices) = torch.max(logits[mask], dim=1) prediction = indices.long().cpu().numpy() labels[mask] = labels[mask].cpu().numpy() accuracy = (prediction == labels[mask]).sum() / len(prediction) micro_f1 = f1_score(labels[mask], prediction, average='micro') macro_f1 = f1_score(labels[mask], prediction, average='macro') (accuracy, micro_f1, macro_f1) = (accuracy, micro_f1, macro_f1) return (loss, accuracy, micro_f1, macro_f1)
AutoGL
positive
def load_resources(self, resources=None): if resources is None: <DeepExtract> res_list = [] for resource in self._schema['resource']: res_list.append(resource) resources = res_list </DeepExtract> for resource in resources: <DeepExtract> if resource not in self.resources: self.resources[resource] = {} resources = Resource.objects.filter(manager=self.manager(), kind=resource).only('id', 'name', 'kind', 'status') for item in resources: self.resources[resource]['id{}'.format(item.id)] = {'id': 'id{}'.format(item.id), 'name': item.name, 'kind': item.kind, 'status': item.status} </DeepExtract> count = len(self.resources.get(resource, {})) logger.info('Loaded {} {} resources'.format(count, resource))
def load_resources(self, resources=None): if resources is None: res_list = [] for resource in self._schema['resource']: res_list.append(resource) resources = res_list for resource in resources: if resource not in self.resources: self.resources[resource] = {} resources = Resource.objects.filter(manager=self.manager(), kind=resource).only('id', 'name', 'kind', 'status') for item in resources: self.resources[resource]['id{}'.format(item.id)] = {'id': 'id{}'.format(item.id), 'name': item.name, 'kind': item.kind, 'status': item.status} count = len(self.resources.get(resource, {})) logger.info('Loaded {} {} resources'.format(count, resource))
architect-api
positive
def _main(args): print('args: ', args) start_all = time.time() start = time.time() <DeepExtract> print('Loading model: %s' % args.model_path) net = get_infer(args) model_type = args.model_type if net is None: print('ERROR: Uknown model type: ', model_type) print('Please use the supported types: ', model_type_dict.keys()) sys.exit(-1) net = net </DeepExtract> dura = time.time() - start print('Loading model time cost: %f seconds.' % dura) eval_sets = [it.strip() for it in args.eval_sets.strip().split(',')] merge_sets = [] for ieval_set in eval_sets: if ieval_set not in eval_sets_dict: print('unknown eval set %s, pass' % ieval_set) continue print('\nExtract on %s...' % ieval_set) start = time.time() evaluator = eval(eval_sets_dict[ieval_set])(args) ret = evaluator.extract_embedding(net) dura = (time.time() - start) / 60.0 print('\nExtract on %s done! Time cost: %fm' % (ieval_set, dura)) if not ret: print('extract failed') continue eval_info = {'eval_set': ieval_set, 'args': args} <DeepExtract> r = requests.post('http://127.0.0.1:%s/eval' % eval_info['args'].port, data=eval_info).json() if r['success']: print('eval set %s evaluate process append success!' % eval_info['eval_set']) print('current evaluate tasks:') print(r['tasks']) else: print('eval set %s evaluate process append failed, please restart manally using command [ python -c "from argparse import Namespace; import requests; r=requests.post(\'http://127.0.0.1:%d/eval\', data=%s).json(); print(\'success:\', r[\'success\']);" ]' % (eval_info['eval_set'], eval_info['args'].port, eval_info)) </DeepExtract> merge_sets.append(ieval_set) if len(merge_sets) > 0: merge_info = {'eval_set': ','.join(merge_sets), 'args': args, 'net_scale_type': net._net_scale_type} <DeepExtract> r = requests.post('http://127.0.0.1:%d/merge' % merge_info['args'].port, data=merge_info).json() if r['success']: print('start merge process success %s' % merge_info['eval_set']) else: print('merge %s process falied, please restart manally using command [ python -c "from argparse import Namespace; import requests; r=requests.post(\'http://127.0.0.1:%d/merge\', data=%s).json(); print(\'success:\', r[\'success\']);" ]' % (merge_info['eval_set'], merge_info['args'].port, merge_info)) </DeepExtract> dura_all = (time.time() - start_all) / 60.0 print('Total extract time cost: %fm' % dura_all)
def _main(args): print('args: ', args) start_all = time.time() start = time.time() print('Loading model: %s' % args.model_path) net = get_infer(args) model_type = args.model_type if net is None: print('ERROR: Uknown model type: ', model_type) print('Please use the supported types: ', model_type_dict.keys()) sys.exit(-1) net = net dura = time.time() - start print('Loading model time cost: %f seconds.' % dura) eval_sets = [it.strip() for it in args.eval_sets.strip().split(',')] merge_sets = [] for ieval_set in eval_sets: if ieval_set not in eval_sets_dict: print('unknown eval set %s, pass' % ieval_set) continue print('\nExtract on %s...' % ieval_set) start = time.time() evaluator = eval(eval_sets_dict[ieval_set])(args) ret = evaluator.extract_embedding(net) dura = (time.time() - start) / 60.0 print('\nExtract on %s done! Time cost: %fm' % (ieval_set, dura)) if not ret: print('extract failed') continue eval_info = {'eval_set': ieval_set, 'args': args} r = requests.post('http://127.0.0.1:%s/eval' % eval_info['args'].port, data=eval_info).json() if r['success']: print('eval set %s evaluate process append success!' % eval_info['eval_set']) print('current evaluate tasks:') print(r['tasks']) else: print('eval set %s evaluate process append failed, please restart manally using command [ python -c "from argparse import Namespace; import requests; r=requests.post(\'http://127.0.0.1:%d/eval\', data=%s).json(); print(\'success:\', r[\'success\']);" ]' % (eval_info['eval_set'], eval_info['args'].port, eval_info)) merge_sets.append(ieval_set) if len(merge_sets) > 0: merge_info = {'eval_set': ','.join(merge_sets), 'args': args, 'net_scale_type': net._net_scale_type} r = requests.post('http://127.0.0.1:%d/merge' % merge_info['args'].port, data=merge_info).json() if r['success']: print('start merge process success %s' % merge_info['eval_set']) else: print('merge %s process falied, please restart manally using command [ python -c "from argparse import Namespace; import requests; r=requests.post(\'http://127.0.0.1:%d/merge\', data=%s).json(); print(\'success:\', r[\'success\']);" ]' % (merge_info['eval_set'], merge_info['args'].port, merge_info)) dura_all = (time.time() - start_all) / 60.0 print('Total extract time cost: %fm' % dura_all)
cavaface.pytorch
positive
def process_message(message: Message, env: Environment) -> Evm: """ Executes a call to create a smart contract. Parameters ---------- message : Transaction specific items. env : External items required for EVM execution. Returns ------- evm: :py:class:`~ethereum.homestead.vm.Evm` Items containing execution specific objects """ if message.depth > STACK_DEPTH_LIMIT: raise StackDepthLimitError('Stack depth limit reached') begin_transaction(env.state) touch_account(env.state, message.current_target) if message.should_transfer_value and message.value != 0: move_ether(env.state, message.caller, message.current_target, message.value) <DeepExtract> code = message.code valid_jump_destinations = get_valid_jump_destinations(code) evm = Evm(pc=Uint(0), stack=[], memory=bytearray(), code=code, gas_left=message.gas, env=env, valid_jump_destinations=valid_jump_destinations, logs=(), refund_counter=U256(0), running=True, message=message, output=b'', accounts_to_delete=set(), has_erred=False) try: if evm.message.code_address in PRE_COMPILED_CONTRACTS: evm_trace(evm, evm.message.code_address) PRE_COMPILED_CONTRACTS[evm.message.code_address](evm) evm = evm while evm.running and evm.pc < len(evm.code): try: op = Ops(evm.code[evm.pc]) except ValueError: raise InvalidOpcode(evm.code[evm.pc]) evm_trace(evm, op) op_implementation[op](evm) except ExceptionalHalt: evm.gas_left = U256(0) evm.has_erred = True evm = evm </DeepExtract> if evm.has_erred: rollback_transaction(env.state) else: commit_transaction(env.state) return evm
def process_message(message: Message, env: Environment) -> Evm: """ Executes a call to create a smart contract. Parameters ---------- message : Transaction specific items. env : External items required for EVM execution. Returns ------- evm: :py:class:`~ethereum.homestead.vm.Evm` Items containing execution specific objects """ if message.depth > STACK_DEPTH_LIMIT: raise StackDepthLimitError('Stack depth limit reached') begin_transaction(env.state) touch_account(env.state, message.current_target) if message.should_transfer_value and message.value != 0: move_ether(env.state, message.caller, message.current_target, message.value) code = message.code valid_jump_destinations = get_valid_jump_destinations(code) evm = Evm(pc=Uint(0), stack=[], memory=bytearray(), code=code, gas_left=message.gas, env=env, valid_jump_destinations=valid_jump_destinations, logs=(), refund_counter=U256(0), running=True, message=message, output=b'', accounts_to_delete=set(), has_erred=False) try: if evm.message.code_address in PRE_COMPILED_CONTRACTS: evm_trace(evm, evm.message.code_address) PRE_COMPILED_CONTRACTS[evm.message.code_address](evm) evm = evm while evm.running and evm.pc < len(evm.code): try: op = Ops(evm.code[evm.pc]) except ValueError: raise InvalidOpcode(evm.code[evm.pc]) evm_trace(evm, op) op_implementation[op](evm) except ExceptionalHalt: evm.gas_left = U256(0) evm.has_erred = True evm = evm if evm.has_erred: rollback_transaction(env.state) else: commit_transaction(env.state) return evm
eth1.0-specs
positive
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ <DeepExtract> if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) </DeepExtract> if backoff <= 0: return time.sleep(backoff)
def sleep(self): """ Sleep between retry attempts using an exponential backoff. By default, the backoff factor is 0 and this method will return immediately. """ if self._observed_errors <= 1: backoff = 0 backoff_value = self.backoff_factor * 2 ** (self._observed_errors - 1) backoff = min(self.BACKOFF_MAX, backoff_value) if backoff <= 0: return time.sleep(backoff)
dirfuzz
positive
def _make_sibling_of_root_node(node, target, position): """ Moves ``node``, making it a sibling of the given ``target`` root node as specified by ``position``. ``node`` will be modified to reflect its new tree state in the database. Since we use tree ids to reduce the number of rows affected by tree mangement during insertion and deletion, root nodes are not true siblings; thus, making an item a sibling of a root node is a special case which involves shuffling tree ids around. """ if node == target: raise InvalidMove(_('A node may not be made a sibling of itself.')) opts = self.model._meta tree_id = getattr(node, self.tree_id_attr) target_tree_id = getattr(target, self.tree_id_attr) if node.is_child_node(): if position == 'left': space_target = target_tree_id - 1 new_tree_id = target_tree_id elif position == 'right': space_target = target_tree_id new_tree_id = target_tree_id + 1 else: raise ValueError(_('An invalid position was given: %s.') % position) <DeepExtract> opts = self.model._meta cursor = connection.cursor() cursor.execute('\n UPDATE %(table)s\n SET %(tree_id)s = %(tree_id)s + 1\n WHERE %(tree_id)s > %%s' % {'table': qn(opts.db_table), 'tree_id': qn(opts.get_field(self.tree_id_attr).column)}, [space_target]) </DeepExtract> if tree_id > space_target: setattr(node, self.tree_id_attr, tree_id + 1) <DeepExtract> left = getattr(node, self.left_attr) right = getattr(node, self.right_attr) level = getattr(node, self.level_attr) tree_id = getattr(node, self.tree_id_attr) if not new_tree_id: new_tree_id = self._get_next_tree_id() left_right_change = left - 1 self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id) setattr(node, self.left_attr, left - left_right_change) setattr(node, self.right_attr, right - left_right_change) setattr(node, self.level_attr, 0) setattr(node, self.tree_id_attr, new_tree_id) setattr(node, self.parent_attr, None) </DeepExtract> else: if position == 'left': if target_tree_id > tree_id: left_sibling = target.get_previous_sibling() if node == left_sibling: return new_tree_id = getattr(left_sibling, self.tree_id_attr) (lower_bound, upper_bound) = (tree_id, new_tree_id) shift = -1 else: new_tree_id = target_tree_id (lower_bound, upper_bound) = (new_tree_id, tree_id) shift = 1 elif position == 'right': if target_tree_id > tree_id: new_tree_id = target_tree_id (lower_bound, upper_bound) = (tree_id, target_tree_id) shift = -1 else: right_sibling = target.get_next_sibling() if node == right_sibling: return new_tree_id = getattr(right_sibling, self.tree_id_attr) (lower_bound, upper_bound) = (new_tree_id, tree_id) shift = 1 else: raise ValueError(_('An invalid position was given: %s.') % position) root_sibling_query = '\n UPDATE %(table)s\n SET %(tree_id)s = CASE\n WHEN %(tree_id)s = %%s\n THEN %%s\n ELSE %(tree_id)s + %%s END\n WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s' % {'table': qn(opts.db_table), 'tree_id': qn(opts.get_field(self.tree_id_attr).column)} cursor = connection.cursor() cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift, lower_bound, upper_bound]) setattr(node, self.tree_id_attr, new_tree_id)
def _make_sibling_of_root_node(node, target, position): """ Moves ``node``, making it a sibling of the given ``target`` root node as specified by ``position``. ``node`` will be modified to reflect its new tree state in the database. Since we use tree ids to reduce the number of rows affected by tree mangement during insertion and deletion, root nodes are not true siblings; thus, making an item a sibling of a root node is a special case which involves shuffling tree ids around. """ if node == target: raise InvalidMove(_('A node may not be made a sibling of itself.')) opts = self.model._meta tree_id = getattr(node, self.tree_id_attr) target_tree_id = getattr(target, self.tree_id_attr) if node.is_child_node(): if position == 'left': space_target = target_tree_id - 1 new_tree_id = target_tree_id elif position == 'right': space_target = target_tree_id new_tree_id = target_tree_id + 1 else: raise ValueError(_('An invalid position was given: %s.') % position) opts = self.model._meta cursor = connection.cursor() cursor.execute('\n UPDATE %(table)s\n SET %(tree_id)s = %(tree_id)s + 1\n WHERE %(tree_id)s > %%s' % {'table': qn(opts.db_table), 'tree_id': qn(opts.get_field(self.tree_id_attr).column)}, [space_target]) if tree_id > space_target: setattr(node, self.tree_id_attr, tree_id + 1) left = getattr(node, self.left_attr) right = getattr(node, self.right_attr) level = getattr(node, self.level_attr) tree_id = getattr(node, self.tree_id_attr) if not new_tree_id: new_tree_id = self._get_next_tree_id() left_right_change = left - 1 self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id) setattr(node, self.left_attr, left - left_right_change) setattr(node, self.right_attr, right - left_right_change) setattr(node, self.level_attr, 0) setattr(node, self.tree_id_attr, new_tree_id) setattr(node, self.parent_attr, None) else: if position == 'left': if target_tree_id > tree_id: left_sibling = target.get_previous_sibling() if node == left_sibling: return new_tree_id = getattr(left_sibling, self.tree_id_attr) (lower_bound, upper_bound) = (tree_id, new_tree_id) shift = -1 else: new_tree_id = target_tree_id (lower_bound, upper_bound) = (new_tree_id, tree_id) shift = 1 elif position == 'right': if target_tree_id > tree_id: new_tree_id = target_tree_id (lower_bound, upper_bound) = (tree_id, target_tree_id) shift = -1 else: right_sibling = target.get_next_sibling() if node == right_sibling: return new_tree_id = getattr(right_sibling, self.tree_id_attr) (lower_bound, upper_bound) = (new_tree_id, tree_id) shift = 1 else: raise ValueError(_('An invalid position was given: %s.') % position) root_sibling_query = '\n UPDATE %(table)s\n SET %(tree_id)s = CASE\n WHEN %(tree_id)s = %%s\n THEN %%s\n ELSE %(tree_id)s + %%s END\n WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s' % {'table': qn(opts.db_table), 'tree_id': qn(opts.get_field(self.tree_id_attr).column)} cursor = connection.cursor() cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift, lower_bound, upper_bound]) setattr(node, self.tree_id_attr, new_tree_id)
colab
positive
def get_payloads(self, obj): payloads = [] for payload in obj.payloads: <DeepExtract> schema = _schemas.get(payload.type, None) </DeepExtract> if schema is not None: result = schema().dump(payload) payloads.append(result.data) else: print('Unsupported PayloadType: {}'.format(payload.type)) return payloads
def get_payloads(self, obj): payloads = [] for payload in obj.payloads: schema = _schemas.get(payload.type, None) if schema is not None: result = schema().dump(payload) payloads.append(result.data) else: print('Unsupported PayloadType: {}'.format(payload.type)) return payloads
commandment
positive
def build_foreign_key_map_from_json(table): with open(table) as f: data = json.load(f) tables = {} for entry in data: <DeepExtract> cols_orig = entry['column_names_original'] tables_orig = entry['table_names_original'] cols = [] for col_orig in cols_orig: if col_orig[0] >= 0: t = tables_orig[col_orig[0]] c = col_orig[1] cols.append('__' + t.lower() + '.' + c.lower() + '__') else: cols.append('__all__') def keyset_in_list(k1, k2, k_list): for k_set in k_list: if k1 in k_set or k2 in k_set: tables[entry['db_id']] = k_set new_k_set = set() k_list.append(new_k_set) tables[entry['db_id']] = new_k_set foreign_key_list = [] foreign_keys = entry['foreign_keys'] for fkey in foreign_keys: (key1, key2) = fkey key_set = keyset_in_list(key1, key2, foreign_key_list) key_set.add(key1) key_set.add(key2) foreign_key_map = {} for key_set in foreign_key_list: sorted_list = sorted(list(key_set)) midx = sorted_list[0] for idx in sorted_list: foreign_key_map[cols[idx]] = cols[midx] tables[entry['db_id']] = foreign_key_map </DeepExtract> return tables
def build_foreign_key_map_from_json(table): with open(table) as f: data = json.load(f) tables = {} for entry in data: cols_orig = entry['column_names_original'] tables_orig = entry['table_names_original'] cols = [] for col_orig in cols_orig: if col_orig[0] >= 0: t = tables_orig[col_orig[0]] c = col_orig[1] cols.append('__' + t.lower() + '.' + c.lower() + '__') else: cols.append('__all__') def keyset_in_list(k1, k2, k_list): for k_set in k_list: if k1 in k_set or k2 in k_set: tables[entry['db_id']] = k_set new_k_set = set() k_list.append(new_k_set) tables[entry['db_id']] = new_k_set foreign_key_list = [] foreign_keys = entry['foreign_keys'] for fkey in foreign_keys: (key1, key2) = fkey key_set = keyset_in_list(key1, key2, foreign_key_list) key_set.add(key1) key_set.add(key2) foreign_key_map = {} for key_set in foreign_key_list: sorted_list = sorted(list(key_set)) midx = sorted_list[0] for idx in sorted_list: foreign_key_map[cols[idx]] = cols[midx] tables[entry['db_id']] = foreign_key_map return tables
editsql
positive
def aligned_keypoint(self, face_idx): assert face_idx < len(self) keypoint = self.keypoints[face_idx].copy().astype(float) if self.align_faces: matrix = self.rotation_matrices[face_idx] keypoint = np.pad(keypoint, ((0, 0), (0, 1)), constant_values=1) keypoint = keypoint.dot(matrix.T) <DeepExtract> assert face_idx < len(self) tight_bbox = self.bbox_XYXY[face_idx] expanded_bbox = expand_bbox(tight_bbox, self.im.shape, simple_expand=self.simple_expand, default_to_simple=True, expansion_factor=0.35) width = expanded_bbox[2] - expanded_bbox[0] height = expanded_bbox[3] - expanded_bbox[1] assert width == height expanded_bbox = expanded_bbox </DeepExtract> keypoint[:, 0] -= expanded_bbox[0] keypoint[:, 1] -= expanded_bbox[1] w = expanded_bbox[2] - expanded_bbox[0] keypoint /= w keypoint[keypoint < 0] = 0 keypoint[keypoint > 1] = 1 keypoint = torch.from_numpy(keypoint).view(1, -1) return keypoint
def aligned_keypoint(self, face_idx): assert face_idx < len(self) keypoint = self.keypoints[face_idx].copy().astype(float) if self.align_faces: matrix = self.rotation_matrices[face_idx] keypoint = np.pad(keypoint, ((0, 0), (0, 1)), constant_values=1) keypoint = keypoint.dot(matrix.T) assert face_idx < len(self) tight_bbox = self.bbox_XYXY[face_idx] expanded_bbox = expand_bbox(tight_bbox, self.im.shape, simple_expand=self.simple_expand, default_to_simple=True, expansion_factor=0.35) width = expanded_bbox[2] - expanded_bbox[0] height = expanded_bbox[3] - expanded_bbox[1] assert width == height expanded_bbox = expanded_bbox keypoint[:, 0] -= expanded_bbox[0] keypoint[:, 1] -= expanded_bbox[1] w = expanded_bbox[2] - expanded_bbox[0] keypoint /= w keypoint[keypoint < 0] = 0 keypoint[keypoint > 1] = 1 keypoint = torch.from_numpy(keypoint).view(1, -1) return keypoint
DeepPrivacy
positive
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1, 1)): <DeepExtract> self.translation = (float(*translation), float(newy)) </DeepExtract> <DeepExtract> self.rotation = float(rotation) </DeepExtract> <DeepExtract> self.scale = (float(*scale), float(newy)) </DeepExtract>
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1, 1)): self.translation = (float(*translation), float(newy)) self.rotation = float(rotation) self.scale = (float(*scale), float(newy)) </DeepExtract>
epciclr2020
positive
def reads_relation(self, C, trans, empty): rel = [] (state, N) = trans <DeepExtract> g = self.lr_goto_cache.get((id(C[state]), N), None) if g: g = g s = self.lr_goto_cache.get(N, None) if not s: s = {} self.lr_goto_cache[N] = s gs = [] for p in C[state]: n = p.lr_next if n and n.lr_before == N: s1 = s.get(id(n), None) if not s1: s1 = {} s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end', None) if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[id(C[state]), N] = g g = g </DeepExtract> j = self.lr0_cidhash.get(id(g), -1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j, a)) return rel
def reads_relation(self, C, trans, empty): rel = [] (state, N) = trans g = self.lr_goto_cache.get((id(C[state]), N), None) if g: g = g s = self.lr_goto_cache.get(N, None) if not s: s = {} self.lr_goto_cache[N] = s gs = [] for p in C[state]: n = p.lr_next if n and n.lr_before == N: s1 = s.get(id(n), None) if not s1: s1 = {} s[id(n)] = s1 gs.append(n) s = s1 g = s.get('$end', None) if not g: if gs: g = self.lr0_closure(gs) s['$end'] = g else: s['$end'] = gs self.lr_goto_cache[id(C[state]), N] = g g = g j = self.lr0_cidhash.get(id(g), -1) for p in g: if p.lr_index < p.len - 1: a = p.prod[p.lr_index + 1] if a in empty: rel.append((j, a)) return rel
asp
positive
def get_unet_small2(nClasses, input_height=128, input_width=128, n_filters=16, dropout=0.1, batchnorm=True, n_channels=3): input_img = Input(shape=(input_height, input_width, n_channels)) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x </DeepExtract> p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) <DeepExtract> x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x </DeepExtract> u3 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c2) u3 = concatenate([u3, c1]) u3 = Dropout(dropout)(u3) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x </DeepExtract> outputs = Conv2D(1, (1, 1), activation='relu')(c3) model = Model(inputs=[input_img], outputs=[outputs]) return model
def get_unet_small2(nClasses, input_height=128, input_width=128, n_filters=16, dropout=0.1, batchnorm=True, n_channels=3): input_img = Input(shape=(input_height, input_width, n_channels)) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x u3 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c2) u3 = concatenate([u3, c1]) u3 = Dropout(dropout)(u3) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x outputs = Conv2D(1, (1, 1), activation='relu')(c3) model = Model(inputs=[input_img], outputs=[outputs]) return model
activefire
positive
def main(args=None): if len(sys.argv) == 1: args = ['-h'] if len(sys.argv) == 2: args = [sys.argv[1], '-h'] args = parse_arguments().parse_args(args) hm = heatmapper.heatmapper() if not isinstance(args.matrixFile, list): hm.read_matrix_file(args.matrixFile) if args.command == 'info': <DeepExtract> print('Groups:') for group in hm.matrix.group_labels: print('\t{0}'.format(group)) print('Samples:') for sample in hm.matrix.sample_labels: print('\t{0}'.format(sample)) </DeepExtract> elif args.command == 'dataRange': <DeepExtract> print('Samples\tMin\tMax\tMedian\t10th\t90th') for (i, sample) in enumerate(hm.matrix.sample_labels): start = hm.matrix.sample_boundaries[i] end = hm.matrix.sample_boundaries[i + 1] sample_matrix = hm.matrix.matrix[..., start:end] print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(sample, np.amin(sample_matrix), np.amax(sample_matrix), np.ma.median(sample_matrix), np.percentile(sample_matrix, 10), np.percentile(sample_matrix, 90))) </DeepExtract> elif args.command == 'subset': <DeepExtract> bounds = hm.parameters['sample_boundaries'] if args.samples is None: sIdx = np.arange(0, hm.matrix.matrix.shape[1]) else: o = list() for sample in args.samples: if sample not in hm.matrix.sample_labels: sys.exit("Error: '{0}' is not a valid sample\n".format(sample)) idx = hm.matrix.sample_labels.index(sample) o.extend(range(bounds[idx], bounds[idx + 1])) sIdx = o </DeepExtract> <DeepExtract> bounds = hm.parameters['group_boundaries'] if args.groups is None: (gIdx, gBounds) = (range(0, hm.matrix.matrix.shape[0]), np.array(bounds)) else: o = list() obounds = [0] for group in args.groups: if group not in hm.matrix.group_labels: sys.exit("Error: '{0}' is not a valid group\n".format(group)) idx = hm.matrix.group_labels.index(group) o.extend(range(bounds[idx], bounds[idx + 1])) obounds.append(bounds[idx + 1] - bounds[idx]) (gIdx, gBounds) = (o, np.cumsum(obounds)) </DeepExtract> <DeepExtract> out = [] for x in gIdx: reg = hm.matrix.regions[x] if isinstance(reg, dict): starts = reg['start'].split(',') starts = [int(x) for x in starts] ends = reg['end'].split(',') ends = [int(x) for x in ends] regs = [(x, y) for (x, y) in zip(starts, ends)] out.append([reg['chrom'], regs, reg['name'], 0, reg['strand'], reg['score']]) else: out.append(reg) hm.matrix.regions = out </DeepExtract> hm.matrix.matrix = hm.matrix.matrix[gIdx, :] hm.matrix.matrix = hm.matrix.matrix[:, sIdx] if args.samples is None: args.samples = hm.matrix.sample_labels hm.matrix.sample_boundaries = hm.matrix.sample_boundaries[0:len(args.samples) + 1] hm.matrix.group_boundaries = gBounds.tolist() keepIdx = set() for (_, sample) in enumerate(hm.matrix.sample_labels): if sample in args.samples: keepIdx.add(_) for param in hm.special_params: hm.parameters[param] = [v for (k, v) in enumerate(hm.parameters[param]) if k in keepIdx] hm.matrix.sample_labels = args.samples if args.groups is None: args.groups = hm.matrix.group_labels hm.matrix.group_labels = args.groups hm.save_matrix(args.outFileName) elif args.command == 'filterStrand': <DeepExtract> bounds = [0] regions = [] keep = [] for region in hm.matrix.regions: if region[4] == args.strand: keep.append(True) regions.append(region) else: keep.append(False) keep = np.array(keep) for idx in range(1, len(hm.matrix.group_boundaries)): i = int(np.sum(keep[hm.matrix.group_boundaries[idx - 1]:hm.matrix.group_boundaries[idx]])) bounds.append(bounds[idx - 1] + i) hm.matrix.group_boundaries = bounds hm.matrix.matrix = hm.matrix.matrix[keep, :] hm.matrix.regions = regions </DeepExtract> hm.save_matrix(args.outFileName) elif args.command == 'filterValues': <DeepExtract> bounds = [0] regions = [] keep = [] if args.min is None: args.min = -np.inf if args.max is None: args.max = np.inf np.warnings.filterwarnings('ignore') for (i, (x, y)) in enumerate(zip(np.nanmin(hm.matrix.matrix, axis=1), np.nanmax(hm.matrix.matrix, axis=1))): if np.isnan(x) or (x >= args.min and y <= args.max): keep.append(True) regions.append(hm.matrix.regions[i]) else: keep.append(False) keep = np.array(keep) for idx in range(1, len(hm.matrix.group_boundaries)): i = int(np.sum(keep[hm.matrix.group_boundaries[idx - 1]:hm.matrix.group_boundaries[idx]])) bounds.append(bounds[idx - 1] + i) hm.matrix.group_boundaries = bounds hm.matrix.matrix = hm.matrix.matrix[keep, :] hm.matrix.regions = regions </DeepExtract> hm.save_matrix(args.outFileName) elif args.command == 'rbind': <DeepExtract> hm2 = heatmapper.heatmapper() hm.read_matrix_file(args.matrixFile[0]) for idx in range(1, len(args.matrixFile)): hm2.read_matrix_file(args.matrixFile[idx]) for (idx, group) in enumerate(hm2.parameters['group_labels']): if group in hm.parameters['group_labels']: insertMatrix(hm, hm2, group) else: appendMatrix(hm, hm2, group) hm.parameters['group_labels'].append(group) hm.matrix.group_labels = hm.parameters['group_labels'] hm.matrix.group_boundaries = hm.parameters['group_boundaries'] </DeepExtract> hm.save_matrix(args.outFileName) elif args.command == 'cbind': <DeepExtract> hm2 = heatmapper.heatmapper() hm.read_matrix_file(args.matrixFile[0]) d = dict({x: dict() for x in hm.parameters['group_labels']}) for (idx, group) in enumerate(hm.parameters['group_labels']): s = hm.parameters['group_boundaries'][idx] e = hm.parameters['group_boundaries'][idx + 1] for (idx2, reg) in enumerate(hm.matrix.regions[s:e]): d[group][reg[2]] = idx2 + s for idx in range(1, len(args.matrixFile)): hm2.read_matrix_file(args.matrixFile[idx]) hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels']) lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:] hm.parameters['sample_boundaries'].extend(lens) ncol = hm.matrix.matrix.shape[1] hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape))) hm.matrix.matrix[:, ncol:] = np.NAN for (idx2, group) in enumerate(hm2.parameters['group_labels']): if group not in d: continue s = hm2.parameters['group_boundaries'][idx2] e = hm2.parameters['group_boundaries'][idx2 + 1] for (idx3, reg) in enumerate(hm2.matrix.regions[s:e]): if reg[2] not in d[group]: continue hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :] for s in hm.special_params: hm.parameters[s].extend(hm2.parameters[s]) hm.matrix.sample_labels = hm.parameters['sample_labels'] hm.matrix.sample_boundaries = hm.parameters['sample_boundaries'] </DeepExtract> hm.save_matrix(args.outFileName) elif args.command == 'sort': <DeepExtract> labels = dict() regions = [] defaultGroup = None if len(args.regionsFileName) == 1: defaultGroup = 'genes' for fname in args.regionsFileName: fp = dti.openPossiblyCompressed(fname) line = dti.getNext(fp) labelColumn = None while line.startswith('#'): if not labelColumn: labelColumn = dti.getLabel(line) line = dti.getNext(fp) while line.startswith('track '): line = dti.getNext(fp) subtract = 0 if labelColumn is not None: subtract = 1 cols = line.strip().split('\t') if len(cols) - subtract < 3: raise RuntimeError('{0} does not seem to be a recognized file type!'.format(fname)) elif len(cols) - subtract <= 6: loadBED(line, fp, fname, labelColumn, labels, regions, defaultGroup) elif len(cols) and dti.seemsLikeGTF(cols): loadGTF(line, fp, fname, labels, regions, args.transcriptID, args.transcript_id_designator, defaultGroup) else: loadBED(line, fp, fname, labelColumn, labels, regions, defaultGroup) fp.close() s1 = set(hm.parameters['group_labels']) if verbose: for e in labels: if e not in s1: sys.exit("The computeMatrix output is missing the '{}' region group. It has {} but the specified regions have {}.\n".format(e, s1, labels.keys())) d = dict() pos = 0 groupSizes = dict() for (idx, label) in enumerate(hm.parameters['group_labels']): s = hm.parameters['group_boundaries'][idx] e = hm.parameters['group_boundaries'][idx + 1] if label not in labels: continue d[label] = dict() groupSize = 0 for reg in hm.matrix.regions[s:e]: d[label][reg[2]] = pos pos += 1 groupSize += 1 groupSizes[label] = groupSize labelsList = [''] * len(labels) for (k, v) in labels.items(): labelsList[v] = k order = [] boundaries = [0] for (idx, label) in enumerate(labelsList): _ = [''] * len(regions[idx]) for (k, v) in regions[idx].items(): _[v] = k sz = 0 for name in _: if name not in d[label]: if verbose: sys.stderr.write('Skipping {}, due to being absent in the computeMatrix output.\n'.format(name)) continue sz += 1 order.append(d[label][name]) if sz == 0 and verbose: sys.exit('The region group {} had no matching entries!\n'.format(label)) boundaries.append(sz + boundaries[-1]) hm.matrix.regions = [hm.matrix.regions[i] for i in order] order = np.array(order) hm.matrix.matrix = hm.matrix.matrix[order, :] hm.parameters['group_labels'] = labelsList hm.matrix.group_labels = labelsList hm.parameters['group_boundaries'] = boundaries hm.matrix.group_boundaries = boundaries </DeepExtract> hm.save_matrix(args.outFileName) elif args.command == 'relabel': <DeepExtract> if args.groupLabels: if len(args.groupLabels) != len(hm.matrix.group_labels): sys.exit('You specified {} group labels, but {} are required.\n'.format(len(args.groupLabels), len(hm.matrix.group_labels))) hm.matrix.group_labels = args.groupLabels if args.sampleLabels: if len(args.sampleLabels) != len(hm.matrix.sample_labels): sys.exit('You specified {} sample labels, but {} are required.\n'.format(len(args.sampleLabels), len(hm.matrix.sample_labels))) hm.matrix.sample_labels = args.sampleLabels </DeepExtract> hm.save_matrix(args.outFileName) else: sys.exit('Unknown command {0}!\n'.format(args.command))
def main(args=None): if len(sys.argv) == 1: args = ['-h'] if len(sys.argv) == 2: args = [sys.argv[1], '-h'] args = parse_arguments().parse_args(args) hm = heatmapper.heatmapper() if not isinstance(args.matrixFile, list): hm.read_matrix_file(args.matrixFile) if args.command == 'info': print('Groups:') for group in hm.matrix.group_labels: print('\t{0}'.format(group)) print('Samples:') for sample in hm.matrix.sample_labels: print('\t{0}'.format(sample)) elif args.command == 'dataRange': print('Samples\tMin\tMax\tMedian\t10th\t90th') for (i, sample) in enumerate(hm.matrix.sample_labels): start = hm.matrix.sample_boundaries[i] end = hm.matrix.sample_boundaries[i + 1] sample_matrix = hm.matrix.matrix[..., start:end] print('{0}\t{1}\t{2}\t{3}\t{4}\t{5}'.format(sample, np.amin(sample_matrix), np.amax(sample_matrix), np.ma.median(sample_matrix), np.percentile(sample_matrix, 10), np.percentile(sample_matrix, 90))) elif args.command == 'subset': bounds = hm.parameters['sample_boundaries'] if args.samples is None: sIdx = np.arange(0, hm.matrix.matrix.shape[1]) else: o = list() for sample in args.samples: if sample not in hm.matrix.sample_labels: sys.exit("Error: '{0}' is not a valid sample\n".format(sample)) idx = hm.matrix.sample_labels.index(sample) o.extend(range(bounds[idx], bounds[idx + 1])) sIdx = o bounds = hm.parameters['group_boundaries'] if args.groups is None: (gIdx, gBounds) = (range(0, hm.matrix.matrix.shape[0]), np.array(bounds)) else: o = list() obounds = [0] for group in args.groups: if group not in hm.matrix.group_labels: sys.exit("Error: '{0}' is not a valid group\n".format(group)) idx = hm.matrix.group_labels.index(group) o.extend(range(bounds[idx], bounds[idx + 1])) obounds.append(bounds[idx + 1] - bounds[idx]) (gIdx, gBounds) = (o, np.cumsum(obounds)) out = [] for x in gIdx: reg = hm.matrix.regions[x] if isinstance(reg, dict): starts = reg['start'].split(',') starts = [int(x) for x in starts] ends = reg['end'].split(',') ends = [int(x) for x in ends] regs = [(x, y) for (x, y) in zip(starts, ends)] out.append([reg['chrom'], regs, reg['name'], 0, reg['strand'], reg['score']]) else: out.append(reg) hm.matrix.regions = out hm.matrix.matrix = hm.matrix.matrix[gIdx, :] hm.matrix.matrix = hm.matrix.matrix[:, sIdx] if args.samples is None: args.samples = hm.matrix.sample_labels hm.matrix.sample_boundaries = hm.matrix.sample_boundaries[0:len(args.samples) + 1] hm.matrix.group_boundaries = gBounds.tolist() keepIdx = set() for (_, sample) in enumerate(hm.matrix.sample_labels): if sample in args.samples: keepIdx.add(_) for param in hm.special_params: hm.parameters[param] = [v for (k, v) in enumerate(hm.parameters[param]) if k in keepIdx] hm.matrix.sample_labels = args.samples if args.groups is None: args.groups = hm.matrix.group_labels hm.matrix.group_labels = args.groups hm.save_matrix(args.outFileName) elif args.command == 'filterStrand': bounds = [0] regions = [] keep = [] for region in hm.matrix.regions: if region[4] == args.strand: keep.append(True) regions.append(region) else: keep.append(False) keep = np.array(keep) for idx in range(1, len(hm.matrix.group_boundaries)): i = int(np.sum(keep[hm.matrix.group_boundaries[idx - 1]:hm.matrix.group_boundaries[idx]])) bounds.append(bounds[idx - 1] + i) hm.matrix.group_boundaries = bounds hm.matrix.matrix = hm.matrix.matrix[keep, :] hm.matrix.regions = regions hm.save_matrix(args.outFileName) elif args.command == 'filterValues': bounds = [0] regions = [] keep = [] if args.min is None: args.min = -np.inf if args.max is None: args.max = np.inf np.warnings.filterwarnings('ignore') for (i, (x, y)) in enumerate(zip(np.nanmin(hm.matrix.matrix, axis=1), np.nanmax(hm.matrix.matrix, axis=1))): if np.isnan(x) or (x >= args.min and y <= args.max): keep.append(True) regions.append(hm.matrix.regions[i]) else: keep.append(False) keep = np.array(keep) for idx in range(1, len(hm.matrix.group_boundaries)): i = int(np.sum(keep[hm.matrix.group_boundaries[idx - 1]:hm.matrix.group_boundaries[idx]])) bounds.append(bounds[idx - 1] + i) hm.matrix.group_boundaries = bounds hm.matrix.matrix = hm.matrix.matrix[keep, :] hm.matrix.regions = regions hm.save_matrix(args.outFileName) elif args.command == 'rbind': hm2 = heatmapper.heatmapper() hm.read_matrix_file(args.matrixFile[0]) for idx in range(1, len(args.matrixFile)): hm2.read_matrix_file(args.matrixFile[idx]) for (idx, group) in enumerate(hm2.parameters['group_labels']): if group in hm.parameters['group_labels']: insertMatrix(hm, hm2, group) else: appendMatrix(hm, hm2, group) hm.parameters['group_labels'].append(group) hm.matrix.group_labels = hm.parameters['group_labels'] hm.matrix.group_boundaries = hm.parameters['group_boundaries'] hm.save_matrix(args.outFileName) elif args.command == 'cbind': hm2 = heatmapper.heatmapper() hm.read_matrix_file(args.matrixFile[0]) d = dict({x: dict() for x in hm.parameters['group_labels']}) for (idx, group) in enumerate(hm.parameters['group_labels']): s = hm.parameters['group_boundaries'][idx] e = hm.parameters['group_boundaries'][idx + 1] for (idx2, reg) in enumerate(hm.matrix.regions[s:e]): d[group][reg[2]] = idx2 + s for idx in range(1, len(args.matrixFile)): hm2.read_matrix_file(args.matrixFile[idx]) hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels']) lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:] hm.parameters['sample_boundaries'].extend(lens) ncol = hm.matrix.matrix.shape[1] hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape))) hm.matrix.matrix[:, ncol:] = np.NAN for (idx2, group) in enumerate(hm2.parameters['group_labels']): if group not in d: continue s = hm2.parameters['group_boundaries'][idx2] e = hm2.parameters['group_boundaries'][idx2 + 1] for (idx3, reg) in enumerate(hm2.matrix.regions[s:e]): if reg[2] not in d[group]: continue hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :] for s in hm.special_params: hm.parameters[s].extend(hm2.parameters[s]) hm.matrix.sample_labels = hm.parameters['sample_labels'] hm.matrix.sample_boundaries = hm.parameters['sample_boundaries'] hm.save_matrix(args.outFileName) elif args.command == 'sort': labels = dict() regions = [] defaultGroup = None if len(args.regionsFileName) == 1: defaultGroup = 'genes' for fname in args.regionsFileName: fp = dti.openPossiblyCompressed(fname) line = dti.getNext(fp) labelColumn = None while line.startswith('#'): if not labelColumn: labelColumn = dti.getLabel(line) line = dti.getNext(fp) while line.startswith('track '): line = dti.getNext(fp) subtract = 0 if labelColumn is not None: subtract = 1 cols = line.strip().split('\t') if len(cols) - subtract < 3: raise RuntimeError('{0} does not seem to be a recognized file type!'.format(fname)) elif len(cols) - subtract <= 6: loadBED(line, fp, fname, labelColumn, labels, regions, defaultGroup) elif len(cols) and dti.seemsLikeGTF(cols): loadGTF(line, fp, fname, labels, regions, args.transcriptID, args.transcript_id_designator, defaultGroup) else: loadBED(line, fp, fname, labelColumn, labels, regions, defaultGroup) fp.close() s1 = set(hm.parameters['group_labels']) if verbose: for e in labels: if e not in s1: sys.exit("The computeMatrix output is missing the '{}' region group. It has {} but the specified regions have {}.\n".format(e, s1, labels.keys())) d = dict() pos = 0 groupSizes = dict() for (idx, label) in enumerate(hm.parameters['group_labels']): s = hm.parameters['group_boundaries'][idx] e = hm.parameters['group_boundaries'][idx + 1] if label not in labels: continue d[label] = dict() groupSize = 0 for reg in hm.matrix.regions[s:e]: d[label][reg[2]] = pos pos += 1 groupSize += 1 groupSizes[label] = groupSize labelsList = [''] * len(labels) for (k, v) in labels.items(): labelsList[v] = k order = [] boundaries = [0] for (idx, label) in enumerate(labelsList): _ = [''] * len(regions[idx]) for (k, v) in regions[idx].items(): _[v] = k sz = 0 for name in _: if name not in d[label]: if verbose: sys.stderr.write('Skipping {}, due to being absent in the computeMatrix output.\n'.format(name)) continue sz += 1 order.append(d[label][name]) if sz == 0 and verbose: sys.exit('The region group {} had no matching entries!\n'.format(label)) boundaries.append(sz + boundaries[-1]) hm.matrix.regions = [hm.matrix.regions[i] for i in order] order = np.array(order) hm.matrix.matrix = hm.matrix.matrix[order, :] hm.parameters['group_labels'] = labelsList hm.matrix.group_labels = labelsList hm.parameters['group_boundaries'] = boundaries hm.matrix.group_boundaries = boundaries hm.save_matrix(args.outFileName) elif args.command == 'relabel': if args.groupLabels: if len(args.groupLabels) != len(hm.matrix.group_labels): sys.exit('You specified {} group labels, but {} are required.\n'.format(len(args.groupLabels), len(hm.matrix.group_labels))) hm.matrix.group_labels = args.groupLabels if args.sampleLabels: if len(args.sampleLabels) != len(hm.matrix.sample_labels): sys.exit('You specified {} sample labels, but {} are required.\n'.format(len(args.sampleLabels), len(hm.matrix.sample_labels))) hm.matrix.sample_labels = args.sampleLabels hm.save_matrix(args.outFileName) else: sys.exit('Unknown command {0}!\n'.format(args.command))
deepTools
positive
def __exit__(self, exctype, excinst, exctb): setattr(get_settings, 'settings', self._settings) <DeepExtract> gc.collect() for wrap in {a for a in gc.get_objects() if isinstance(a, functools._lru_cache_wrapper)}: wrap.cache_clear() </DeepExtract> (get_data_dir() / 'tmp_settings.json').unlink()
def __exit__(self, exctype, excinst, exctb): setattr(get_settings, 'settings', self._settings) gc.collect() for wrap in {a for a in gc.get_objects() if isinstance(a, functools._lru_cache_wrapper)}: wrap.cache_clear() (get_data_dir() / 'tmp_settings.json').unlink()
BCML
positive
def __init__(self, **kwargs): <DeepExtract> param_names = cls.class_param_names(hidden=True) params = dict(((name, getattr(cls, name)) for name in param_names)) </DeepExtract> defined_params = set(kwargs.keys()) invalid_params = defined_params - set(params.keys()) if invalid_params: raise ParameterError('{cls} does not accept parameter(s): {keys}'.format(cls=repr(type(self)), keys=', '.join(sorted(invalid_params)))) for (name, param) in params.items(): if name in kwargs: value = param.cast(kwargs[name]) else: value = copy(param.default) setattr(self, name, value) <DeepExtract> pass </DeepExtract>
def __init__(self, **kwargs): param_names = cls.class_param_names(hidden=True) params = dict(((name, getattr(cls, name)) for name in param_names)) defined_params = set(kwargs.keys()) invalid_params = defined_params - set(params.keys()) if invalid_params: raise ParameterError('{cls} does not accept parameter(s): {keys}'.format(cls=repr(type(self)), keys=', '.join(sorted(invalid_params)))) for (name, param) in params.items(): if name in kwargs: value = param.cast(kwargs[name]) else: value = copy(param.default) setattr(self, name, value) pass </DeepExtract>
cqparts
positive
def get_listeners(self, event_name: str | None=None) -> list[Listener] | dict[str, list[Listener]]: if event_name is not None: if event_name not in self._listeners: return [] if event_name not in self._sorted: <DeepExtract> self._sorted[event_name] = [] for (_, listeners) in sorted(self._listeners[event_name].items(), key=lambda t: -t[0]): for listener in listeners: self._sorted[event_name].append(listener) </DeepExtract> return self._sorted[event_name] for event_name in self._listeners.keys(): if event_name not in self._sorted: <DeepExtract> self._sorted[event_name] = [] for (_, listeners) in sorted(self._listeners[event_name].items(), key=lambda t: -t[0]): for listener in listeners: self._sorted[event_name].append(listener) </DeepExtract> return self._sorted
def get_listeners(self, event_name: str | None=None) -> list[Listener] | dict[str, list[Listener]]: if event_name is not None: if event_name not in self._listeners: return [] if event_name not in self._sorted: self._sorted[event_name] = [] for (_, listeners) in sorted(self._listeners[event_name].items(), key=lambda t: -t[0]): for listener in listeners: self._sorted[event_name].append(listener) return self._sorted[event_name] for event_name in self._listeners.keys(): if event_name not in self._sorted: self._sorted[event_name] = [] for (_, listeners) in sorted(self._listeners[event_name].items(), key=lambda t: -t[0]): for listener in listeners: self._sorted[event_name].append(listener) return self._sorted
cleo
positive
def forward(self, input): """ :param input: :return: reconstructed input, obs_distribution_params, latent_distribution_params """ <DeepExtract> raise NotImplementedError() </DeepExtract> <DeepExtract> raise NotImplementedError() </DeepExtract> <DeepExtract> raise NotImplementedError() </DeepExtract> return (reconstructions, obs_distribution_params, latent_distribution_params)
def forward(self, input): """ :param input: :return: reconstructed input, obs_distribution_params, latent_distribution_params """ raise NotImplementedError() raise NotImplementedError() raise NotImplementedError() return (reconstructions, obs_distribution_params, latent_distribution_params)
DoorGym
positive
def ensure_homogeneous_effect_for_none_X_effect(self, estimator): """ when the effect_covariates is equal to list(), we require same effect across different covariates. """ <DeepExtract> n = 100 beta = 0.4 X = pd.Series(np.random.normal(size=n)) a = pd.Series([0] * (n // 2) + [1] * (n // 2)) y = X.mul(beta) data = {'X': X.to_frame(), 'a': a, 'y': y, 'beta': beta} </DeepExtract> estimator = estimator.__class__(outcome_model=estimator.outcome_model, treatment_model=estimator.treatment_model, effect_model=estimator.effect_model, effect_covariates=list()) estimator.fit(data['X'], data['a'], data['y']) estimated_effect = estimator.estimate_individual_effect(data['X']) np.testing.assert_array_equal(estimated_effect[0], estimated_effect)
def ensure_homogeneous_effect_for_none_X_effect(self, estimator): """ when the effect_covariates is equal to list(), we require same effect across different covariates. """ n = 100 beta = 0.4 X = pd.Series(np.random.normal(size=n)) a = pd.Series([0] * (n // 2) + [1] * (n // 2)) y = X.mul(beta) data = {'X': X.to_frame(), 'a': a, 'y': y, 'beta': beta} estimator = estimator.__class__(outcome_model=estimator.outcome_model, treatment_model=estimator.treatment_model, effect_model=estimator.effect_model, effect_covariates=list()) estimator.fit(data['X'], data['a'], data['y']) estimated_effect = estimator.estimate_individual_effect(data['X']) np.testing.assert_array_equal(estimated_effect[0], estimated_effect)
causallib
positive
def draw_selected_state(ctx): if self._selection.item in (CHARACTER, COMPONENT): self._symbol.draw(ctx, self._hover_pos) elif self._selection.item in (OBJECT, RECT): self._selection.startpos = self._drag_startpos self._selection.endpos = self._drag_endpos self._selection.maxpos = self.max_pos_grid self._selection.draw(ctx) <DeepExtract> ctx.save() for ref in self._objects: if ref.symbol.has_pickpoint: if self._show_symbol_pickpoints and ref.symbol.is_symbol or (self._show_line_pickpoints and ref.symbol.is_line) or (self._show_text_pickpoints and ref.symbol.is_text): ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) pos = ref.symbol.pickpoint_pos.view_xy() y_xbase = pos.y + Preferences.values['FONTSIZE'] ctx.move_to(pos.x, y_xbase) ctx.show_text(MARK_CHAR) ctx.restore() </DeepExtract> elif self._selection.item == OBJECTS: <DeepExtract> ctx.save() for ref in self._objects: if ref.symbol.has_pickpoint: if self._show_symbol_pickpoints and ref.symbol.is_symbol or (self._show_line_pickpoints and ref.symbol.is_line) or (self._show_text_pickpoints and ref.symbol.is_text): ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) pos = ref.symbol.pickpoint_pos.view_xy() y_xbase = pos.y + Preferences.values['FONTSIZE'] ctx.move_to(pos.x, y_xbase) ctx.show_text(MARK_CHAR) ctx.restore() </DeepExtract> <DeepExtract> ctx.save() for ref in self._objects: ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) offset = self._hover_pos - ref.startpos.view_xy() pos = ref.symbol.startpos.view_xy() + offset ref.symbol.draw(ctx, pos) ctx.restore() </DeepExtract>
def draw_selected_state(ctx): if self._selection.item in (CHARACTER, COMPONENT): self._symbol.draw(ctx, self._hover_pos) elif self._selection.item in (OBJECT, RECT): self._selection.startpos = self._drag_startpos self._selection.endpos = self._drag_endpos self._selection.maxpos = self.max_pos_grid self._selection.draw(ctx) ctx.save() for ref in self._objects: if ref.symbol.has_pickpoint: if self._show_symbol_pickpoints and ref.symbol.is_symbol or (self._show_line_pickpoints and ref.symbol.is_line) or (self._show_text_pickpoints and ref.symbol.is_text): ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) pos = ref.symbol.pickpoint_pos.view_xy() y_xbase = pos.y + Preferences.values['FONTSIZE'] ctx.move_to(pos.x, y_xbase) ctx.show_text(MARK_CHAR) ctx.restore() elif self._selection.item == OBJECTS: ctx.save() for ref in self._objects: if ref.symbol.has_pickpoint: if self._show_symbol_pickpoints and ref.symbol.is_symbol or (self._show_line_pickpoints and ref.symbol.is_line) or (self._show_text_pickpoints and ref.symbol.is_text): ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) pos = ref.symbol.pickpoint_pos.view_xy() y_xbase = pos.y + Preferences.values['FONTSIZE'] ctx.move_to(pos.x, y_xbase) ctx.show_text(MARK_CHAR) ctx.restore() ctx.save() for ref in self._objects: ctx.set_source_rgb(1, 0, 0) ctx.select_font_face('monospace', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) offset = self._hover_pos - ref.startpos.view_xy() pos = ref.symbol.startpos.view_xy() + offset ref.symbol.draw(ctx, pos) ctx.restore() </DeepExtract>
AACircuit
positive
def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2'): """Inception v2 model for classification. Constructs an Inception v2 network for classification as described in http://arxiv.org/abs/1502.03167. The default image size used to train this network is 224x224. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): <DeepExtract> end_points = {} if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') depth = lambda d: max(int(d * depth_multiplier), min_depth) if data_format != 'NHWC' and data_format != 'NCHW': raise ValueError('data_format must be either NHWC or NCHW.') if data_format == 'NCHW' and use_separable_conv: raise ValueError('separable convolution only supports NHWC layout. NCHW data format can only be used when use_separable_conv is False.') concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope(scope, 'InceptionV2', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): end_point = 'Conv2d_1a_7x7' if use_separable_conv: depthwise_multiplier = min(int(depth(64) / 3), 8) net = slim.separable_conv2d(inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, padding='SAME', weights_initializer=trunc_normal(1.0), scope=end_point) else: net = slim.conv2d(inputs, depth(64), [7, 7], stride=2, weights_initializer=trunc_normal(1.0), scope=end_point) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'MaxPool_2a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Conv2d_2b_1x1' net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1)) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Conv2d_2c_3x3' net = slim.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'MaxPool_3a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_3b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_3c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4d' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4e' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) raise ValueError('Unknown final endpoint %s' % final_endpoint) </DeepExtract> with tf.variable_scope('Logits'): <DeepExtract> shape = net.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = [7, 7] else: kernel_size_out = [min(shape[1], [7, 7][0]), min(shape[2], [7, 7][1])] [7, 7] = kernel_size_out </DeepExtract> net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return (logits, end_points)
def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2'): """Inception v2 model for classification. Constructs an Inception v2 network for classification as described in http://arxiv.org/abs/1502.03167. The default image size used to train this network is 224x224. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: logits: the pre-softmax activations, a tensor of size [batch_size, num_classes] end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): end_points = {} if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') depth = lambda d: max(int(d * depth_multiplier), min_depth) if data_format != 'NHWC' and data_format != 'NCHW': raise ValueError('data_format must be either NHWC or NCHW.') if data_format == 'NCHW' and use_separable_conv: raise ValueError('separable convolution only supports NHWC layout. NCHW data format can only be used when use_separable_conv is False.') concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope(scope, 'InceptionV2', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): end_point = 'Conv2d_1a_7x7' if use_separable_conv: depthwise_multiplier = min(int(depth(64) / 3), 8) net = slim.separable_conv2d(inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, padding='SAME', weights_initializer=trunc_normal(1.0), scope=end_point) else: net = slim.conv2d(inputs, depth(64), [7, 7], stride=2, weights_initializer=trunc_normal(1.0), scope=end_point) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'MaxPool_2a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Conv2d_2b_1x1' net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1)) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Conv2d_2c_3x3' net = slim.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'MaxPool_3a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_3b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_3c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4d' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_4e' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) end_point = 'Mixed_5c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: (net, end_points) = (net, end_points) raise ValueError('Unknown final endpoint %s' % final_endpoint) with tf.variable_scope('Logits'): shape = net.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = [7, 7] else: kernel_size_out = [min(shape[1], [7, 7][0]), min(shape[2], [7, 7][1])] [7, 7] = kernel_size_out net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return (logits, end_points)
CAAD2018
positive
def __init__(self, path, param_set, data_set, base='base', minimize=True): self.param_set = param_set if not isinstance(data_set, DataSet): data_set = DataSet(data_set) self.data_set = data_set self.base = base self.paramtag = base + '_' + param_set self.datatag = data_set.tag self.name = self.paramtag + '_' + self.datatag self.batchPath = path self.relativePath = self.paramtag + os.sep + self.datatag + os.sep self.chainPath = path + self.relativePath self.chainRoot = self.chainPath + self.name self.distPath = self.chainPath + 'dist' + os.sep self.distRoot = self.distPath + self.name self.isImportanceJob = False self.importanceItems = [] self.want_minimize = minimize self.result_converge = None self.group = None self.parent = None self.dist_settings = copy.copy(data_set.dist_settings) <DeepExtract> (self.normed_name, self.normed_params, self.normed_data) = self.makeNormedName() </DeepExtract> self.iniFile_path = input_folder self.iniFile_ext = '.yaml' self.scriptFile_path = script_folder self.logFile_path = log_folder
def __init__(self, path, param_set, data_set, base='base', minimize=True): self.param_set = param_set if not isinstance(data_set, DataSet): data_set = DataSet(data_set) self.data_set = data_set self.base = base self.paramtag = base + '_' + param_set self.datatag = data_set.tag self.name = self.paramtag + '_' + self.datatag self.batchPath = path self.relativePath = self.paramtag + os.sep + self.datatag + os.sep self.chainPath = path + self.relativePath self.chainRoot = self.chainPath + self.name self.distPath = self.chainPath + 'dist' + os.sep self.distRoot = self.distPath + self.name self.isImportanceJob = False self.importanceItems = [] self.want_minimize = minimize self.result_converge = None self.group = None self.parent = None self.dist_settings = copy.copy(data_set.dist_settings) (self.normed_name, self.normed_params, self.normed_data) = self.makeNormedName() self.iniFile_path = input_folder self.iniFile_ext = '.yaml' self.scriptFile_path = script_folder self.logFile_path = log_folder
cobaya
positive
def main(): <DeepExtract> parser = argparse.ArgumentParser(description='Analyze Json Log') subparsers = parser.add_subparsers(dest='task', help='task parser') add_plot_parser(subparsers) add_time_parser(subparsers) args = parser.parse_args() args = args </DeepExtract> json_logs = args.json_logs for json_log in json_logs: assert json_log.endswith('.json') <DeepExtract> log_dicts = [dict() for _ in json_logs] for (json_log, log_dict) in zip(json_logs, log_dicts): with open(json_log, 'r') as log_file: for l in log_file: log = json.loads(l.strip()) epoch = log.pop('epoch') if epoch not in log_dict: log_dict[epoch] = defaultdict(list) for (k, v) in log.items(): log_dict[epoch][k].append(v) log_dicts = log_dicts </DeepExtract> eval(args.task)(log_dicts, args)
def main(): parser = argparse.ArgumentParser(description='Analyze Json Log') subparsers = parser.add_subparsers(dest='task', help='task parser') add_plot_parser(subparsers) add_time_parser(subparsers) args = parser.parse_args() args = args json_logs = args.json_logs for json_log in json_logs: assert json_log.endswith('.json') log_dicts = [dict() for _ in json_logs] for (json_log, log_dict) in zip(json_logs, log_dicts): with open(json_log, 'r') as log_file: for l in log_file: log = json.loads(l.strip()) epoch = log.pop('epoch') if epoch not in log_dict: log_dict[epoch] = defaultdict(list) for (k, v) in log.items(): log_dict[epoch][k].append(v) log_dicts = log_dicts eval(args.task)(log_dicts, args)
AE_TextSpotter
positive
def UpdateUser(self, user, ssh_keys): """Update a Linux user with authorized SSH keys. Args: user: string, the name of the Linux user account. ssh_keys: list, the SSH key strings associated with the user. Returns: bool, True if the user account updated successfully. """ if not bool(USER_REGEX.match(user)): self.logger.warning('Invalid user account name %s.', user) return False if not self._GetUser(user): if not (self._AddUser(user) and self._UpdateUserGroups(user, self.groups)): return False if not self._UpdateSudoer(user, sudoer=True): return False <DeepExtract> try: pw_entry = pwd.getpwnam(user) except KeyError: pw_entry = None </DeepExtract> if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin': message = 'Not updating user %s. User set `nologin` as login shell.' self.logger.debug(message, user) return True try: <DeepExtract> pw_entry = self._GetUser(user) if not pw_entry: return uid = pw_entry.pw_uid gid = pw_entry.pw_gid home_dir = pw_entry.pw_dir ssh_dir = os.path.join(home_dir, '.ssh') authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys') if os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file): self.logger.warning('Not updating authorized keys for user %s. File is a symlink.', user) return if not os.path.exists(home_dir): file_utils.SetPermissions(home_dir, mode=493, uid=uid, gid=gid, mkdir=True) file_utils.SetPermissions(ssh_dir, mode=448, uid=uid, gid=gid, mkdir=True) prefix = self.logger.name + '-' with tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=True) as updated_keys: updated_keys_file = updated_keys.name if os.path.exists(authorized_keys_file): lines = open(authorized_keys_file).readlines() else: lines = [] google_lines = set() for (i, line) in enumerate(lines): if line.startswith(self.google_comment): google_lines.update([i, i + 1]) for (i, line) in enumerate(lines): if i not in google_lines and line: line += '\n' if not line.endswith('\n') else '' updated_keys.write(line) for ssh_key in ssh_keys: ssh_key += '\n' if not ssh_key.endswith('\n') else '' updated_keys.write('%s\n' % self.google_comment) updated_keys.write(ssh_key) updated_keys.flush() shutil.copy(updated_keys_file, authorized_keys_file) file_utils.SetPermissions(authorized_keys_file, mode=384, uid=uid, gid=gid) </DeepExtract> except (IOError, OSError) as e: message = 'Could not update the authorized keys file for user %s. %s.' self.logger.warning(message, user, str(e)) return False else: return True
def UpdateUser(self, user, ssh_keys): """Update a Linux user with authorized SSH keys. Args: user: string, the name of the Linux user account. ssh_keys: list, the SSH key strings associated with the user. Returns: bool, True if the user account updated successfully. """ if not bool(USER_REGEX.match(user)): self.logger.warning('Invalid user account name %s.', user) return False if not self._GetUser(user): if not (self._AddUser(user) and self._UpdateUserGroups(user, self.groups)): return False if not self._UpdateSudoer(user, sudoer=True): return False try: pw_entry = pwd.getpwnam(user) except KeyError: pw_entry = None if pw_entry and os.path.basename(pw_entry.pw_shell) == 'nologin': message = 'Not updating user %s. User set `nologin` as login shell.' self.logger.debug(message, user) return True try: pw_entry = self._GetUser(user) if not pw_entry: return uid = pw_entry.pw_uid gid = pw_entry.pw_gid home_dir = pw_entry.pw_dir ssh_dir = os.path.join(home_dir, '.ssh') authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys') if os.path.islink(ssh_dir) or os.path.islink(authorized_keys_file): self.logger.warning('Not updating authorized keys for user %s. File is a symlink.', user) return if not os.path.exists(home_dir): file_utils.SetPermissions(home_dir, mode=493, uid=uid, gid=gid, mkdir=True) file_utils.SetPermissions(ssh_dir, mode=448, uid=uid, gid=gid, mkdir=True) prefix = self.logger.name + '-' with tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=True) as updated_keys: updated_keys_file = updated_keys.name if os.path.exists(authorized_keys_file): lines = open(authorized_keys_file).readlines() else: lines = [] google_lines = set() for (i, line) in enumerate(lines): if line.startswith(self.google_comment): google_lines.update([i, i + 1]) for (i, line) in enumerate(lines): if i not in google_lines and line: line += '\n' if not line.endswith('\n') else '' updated_keys.write(line) for ssh_key in ssh_keys: ssh_key += '\n' if not ssh_key.endswith('\n') else '' updated_keys.write('%s\n' % self.google_comment) updated_keys.write(ssh_key) updated_keys.flush() shutil.copy(updated_keys_file, authorized_keys_file) file_utils.SetPermissions(authorized_keys_file, mode=384, uid=uid, gid=gid) except (IOError, OSError) as e: message = 'Could not update the authorized keys file for user %s. %s.' self.logger.warning(message, user, str(e)) return False else: return True
compute-image-packages
positive
def write_to_shared_memory(index, value, shared_memory, space): """Write the observation of a single environment into shared memory. Parameters ---------- index : int Index of the environment (must be in `[0, num_envs)`). value : sample from `space` Observation of the single environment to write to shared memory. shared_memory : dict, tuple, or `multiprocessing.Array` instance Shared object across processes. This contains the observations from the vectorized environment. This object is created with `create_shared_memory`. space : `gym.spaces.Space` instance Observation space of a single environment in the vectorized environment. Returns ------- `None` """ if isinstance(space, _BaseGymSpaces): <DeepExtract> size = int(np.prod(space.shape)) destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype) np.copyto(destination[index * size:(index + 1) * size], np.asarray(value, dtype=space.dtype).flatten()) </DeepExtract> elif isinstance(space, Tuple): <DeepExtract> for (value, memory, subspace) in zip(value, shared_memory, space.spaces): write_to_shared_memory(index, value, memory, subspace) </DeepExtract> elif isinstance(space, Dict): <DeepExtract> for (key, subspace) in space.spaces.items(): write_to_shared_memory(index, value[key], shared_memory[key], subspace) </DeepExtract> else: raise NotImplementedError()
def write_to_shared_memory(index, value, shared_memory, space): """Write the observation of a single environment into shared memory. Parameters ---------- index : int Index of the environment (must be in `[0, num_envs)`). value : sample from `space` Observation of the single environment to write to shared memory. shared_memory : dict, tuple, or `multiprocessing.Array` instance Shared object across processes. This contains the observations from the vectorized environment. This object is created with `create_shared_memory`. space : `gym.spaces.Space` instance Observation space of a single environment in the vectorized environment. Returns ------- `None` """ if isinstance(space, _BaseGymSpaces): size = int(np.prod(space.shape)) destination = np.frombuffer(shared_memory.get_obj(), dtype=space.dtype) np.copyto(destination[index * size:(index + 1) * size], np.asarray(value, dtype=space.dtype).flatten()) elif isinstance(space, Tuple): for (value, memory, subspace) in zip(value, shared_memory, space.spaces): write_to_shared_memory(index, value, memory, subspace) elif isinstance(space, Dict): for (key, subspace) in space.spaces.items(): write_to_shared_memory(index, value[key], shared_memory[key], subspace) else: raise NotImplementedError()
DQN-DDPG_Stock_Trading
positive
def OnTextCreditsChange(self, event): warning_minutes = self.options.setdefault('credits_warning', 10) ctrl = event.GetEventObject() try: value = int(ctrl.GetValue()) last_frame = self.framecount - 1 credits_seconds = (last_frame - value) / self.framerate if value > last_frame or credits_seconds > warning_minutes * 60: ctrl.SetForegroundColour(wx.RED) else: ctrl.SetForegroundColour(self.windowTextColor) except ValueError: ctrl.SetForegroundColour(wx.RED) ctrl.Refresh() <DeepExtract> try: commandline = self.presets[self.ctrlDict['preset'].GetStringSelection()] except KeyError: self.ctrlDict['commandline'].SetValue('') return '' exeOptions = self.options.setdefault('exe_options', {}) templist = [] for line in commandline.strip().split('\n'): splitline = line.split(None, 1) if len(splitline) == 2: (first, rest) = splitline else: (first, rest) = (splitline[0], '') try: extra = exeOptions[first.lower()]['extra'] if extra.strip() == '': extra = None except KeyError: extra = None if extra is None: templist.append('%s %s' % (first, rest.replace('$extra_options', ''))) elif rest.count('$extra_options') > 0: templist.append('%s %s' % (first, rest.replace('$extra_options', extra))) else: templist.append('%s %s %s' % (first, extra, rest)) commandline = '\n'.join(templist) + '\n' keyInfo = (('video_bitrate', self.ctrlDict), ('video_quality', self.ctrlDict), ('video_input', self.ctrlDict), ('video_output', self.ctrlDict), ('audio_input', self.bitrateDialog.ctrlDict), ('audio_bitrate', self.bitrateDialog.ctrlDict), ('subtitles_input', self.bitrateDialog.ctrlDict), ('container', self.bitrateDialog.ctrlDict), ('audio_format', self.bitrateDialog.ctrlDict), ('credits_frame', self.ctrlDict), ('par_x', self.ctrlDict), ('par_y', self.ctrlDict)) replaceDict = {} for (key, keydict) in keyInfo: value = keydict[key].GetValue() try: value = value.strip() except: pass if value != '': replaceDict[key] = value replaceDict['last_frame'] = self.framecount - 1 audioname = self.bitrateDialog.ctrlDict['audio_input'].GetValue() try: replaceDict['audio_delay'] = audioname.lower().split('delay', 1)[1].split('ms')[0].strip() except: replaceDict['audio_delay'] = 0 try: replaceDict['par'] = '%.4f' % (float(replaceDict['par_x']) / float(replaceDict['par_y'])) except: pass template = string.Template(commandline) commandline = template.safe_substitute(replaceDict) self.ctrlDict['commandline'].SetValue(commandline) return commandline </DeepExtract> event.Skip()
def OnTextCreditsChange(self, event): warning_minutes = self.options.setdefault('credits_warning', 10) ctrl = event.GetEventObject() try: value = int(ctrl.GetValue()) last_frame = self.framecount - 1 credits_seconds = (last_frame - value) / self.framerate if value > last_frame or credits_seconds > warning_minutes * 60: ctrl.SetForegroundColour(wx.RED) else: ctrl.SetForegroundColour(self.windowTextColor) except ValueError: ctrl.SetForegroundColour(wx.RED) ctrl.Refresh() try: commandline = self.presets[self.ctrlDict['preset'].GetStringSelection()] except KeyError: self.ctrlDict['commandline'].SetValue('') return '' exeOptions = self.options.setdefault('exe_options', {}) templist = [] for line in commandline.strip().split('\n'): splitline = line.split(None, 1) if len(splitline) == 2: (first, rest) = splitline else: (first, rest) = (splitline[0], '') try: extra = exeOptions[first.lower()]['extra'] if extra.strip() == '': extra = None except KeyError: extra = None if extra is None: templist.append('%s %s' % (first, rest.replace('$extra_options', ''))) elif rest.count('$extra_options') > 0: templist.append('%s %s' % (first, rest.replace('$extra_options', extra))) else: templist.append('%s %s %s' % (first, extra, rest)) commandline = '\n'.join(templist) + '\n' keyInfo = (('video_bitrate', self.ctrlDict), ('video_quality', self.ctrlDict), ('video_input', self.ctrlDict), ('video_output', self.ctrlDict), ('audio_input', self.bitrateDialog.ctrlDict), ('audio_bitrate', self.bitrateDialog.ctrlDict), ('subtitles_input', self.bitrateDialog.ctrlDict), ('container', self.bitrateDialog.ctrlDict), ('audio_format', self.bitrateDialog.ctrlDict), ('credits_frame', self.ctrlDict), ('par_x', self.ctrlDict), ('par_y', self.ctrlDict)) replaceDict = {} for (key, keydict) in keyInfo: value = keydict[key].GetValue() try: value = value.strip() except: pass if value != '': replaceDict[key] = value replaceDict['last_frame'] = self.framecount - 1 audioname = self.bitrateDialog.ctrlDict['audio_input'].GetValue() try: replaceDict['audio_delay'] = audioname.lower().split('delay', 1)[1].split('ms')[0].strip() except: replaceDict['audio_delay'] = 0 try: replaceDict['par'] = '%.4f' % (float(replaceDict['par_x']) / float(replaceDict['par_y'])) except: pass template = string.Template(commandline) commandline = template.safe_substitute(replaceDict) self.ctrlDict['commandline'].SetValue(commandline) return commandline event.Skip()
AvsPmod
positive
@property def ok(self): try: <DeepExtract> http_error_msg = '' if 400 <= self.status_code < 500: http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason) elif 500 <= self.status_code < 600: http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason) if http_error_msg: raise HTTPError(http_error_msg, response=self) </DeepExtract> except RequestException: return False return True
@property def ok(self): try: http_error_msg = '' if 400 <= self.status_code < 500: http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason) elif 500 <= self.status_code < 600: http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason) if http_error_msg: raise HTTPError(http_error_msg, response=self) except RequestException: return False return True
Crunchyroll-XML-Decoder
positive
def conv_key_list(dict): ret = {} for key in dict: if not dict[key]: continue try: <DeepExtract> if len(key) == 1: newkey = (ord(key), 0) elif key.startswith('C-'): (k, m) = convkey(key[2:]) newkey = (k & 31, m) elif key.startswith('M-'): (k, m) = self.convkey(key[2:]) newkey = (k, 1) elif key == 'KEY_RETURN': newkey = (10, 0) else: newkey = (getattr(curses, key), 0) </DeepExtract> except AttributeError: continue if type(dict[key]) != type([]): ret[newkey] = [dict[key]] else: ret[newkey] = dict[key] return ret
def conv_key_list(dict): ret = {} for key in dict: if not dict[key]: continue try: if len(key) == 1: newkey = (ord(key), 0) elif key.startswith('C-'): (k, m) = convkey(key[2:]) newkey = (k & 31, m) elif key.startswith('M-'): (k, m) = self.convkey(key[2:]) newkey = (k, 1) elif key == 'KEY_RETURN': newkey = (10, 0) else: newkey = (getattr(curses, key), 0) except AttributeError: continue if type(dict[key]) != type([]): ret[newkey] = [dict[key]] else: ret[newkey] = dict[key] return ret
Canto
positive
def main(args=None): """Perform necessary tasks to create and/or publish a new release""" parser = argparse.ArgumentParser(usage=print_help()) parser.add_argument('goal', help='Supported goals: {prepare-changelog | prepare | publish}', type=str, choices={'prepare-changelog', 'prepare', 'publish'}) parser.add_argument('--version', help='the new release version', type=str, required=True) parser.add_argument('--dev-version', help='the new development version', type=str, required=False) parser.add_argument('--beta', help='the release beta number', type=str, required=False) parser.add_argument('--rc', help='the release candidate number', type=str, required=False) parser.add_argument('--org', help='the github org or username to use', type=str, required=False) parser.add_argument('--branch', help='the branch name to use', type=str, required=False) args = parser.parse_args() if args.beta and args.rc: <DeepExtract> return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' </DeepExtract> sys.exit(1) global config try: <DeepExtract> if not dependency_exists('git'): raise DependencyException('Please install git https://git-scm.com/downloads') if not dependency_exists('node'): raise DependencyException('Please install node.js 18+ https://nodejs.org/') if not dependency_exists('yarn'): raise DependencyException('Please install yarn https://classic.yarnpkg.com/') if not dependency_exists('twine'): raise DependencyException('Please install twine https://twine.readthedocs.io/en/latest/#installation') </DeepExtract> <DeepExtract> pass </DeepExtract> <DeepExtract> if not args: raise ValueError('Invalid command line arguments') v = re.search(VERSION_REG_EX, elyra._version.__version__) configuration = {'goal': args.goal, 'git_url': f'git@github.com:{args.org or DEFAULT_GIT_ORG}/elyra.git', 'git_branch': args.branch or DEFAULT_GIT_BRANCH, 'git_hash': 'HEAD', 'git_user_name': check_output(['git', 'config', 'user.name']), 'git_user_email': check_output(['git', 'config', 'user.email']), 'base_dir': os.getcwd(), 'work_dir': os.path.join(os.getcwd(), DEFAULT_BUILD_DIR), 'source_dir': os.path.join(os.getcwd(), DEFAULT_BUILD_DIR, 'elyra'), 'old_version': elyra._version.__version__, 'old_npm_version': f"{v['major']}.{v['minor']}.{v['patch']}-dev", 'new_version': args.version if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'{args.version}rc{args.rc}' if args.rc else f'{args.version}b{args.beta}', 'new_npm_version': args.version if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'{args.version}-rc.{args.rc}' if args.rc else f'{args.version}-beta.{args.beta}', 'rc': args.rc, 'beta': args.beta, 'dev_version': f'{args.dev_version}.dev0', 'dev_npm_version': f'{args.dev_version}-dev', 'tag': f'v{args.version}' if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'v{args.version}rc{args.rc}' if args.rc else f'v{args.version}b{args.beta}', 'pre_release': True if args.rc or args.beta else False} global config config = SimpleNamespace(**configuration) </DeepExtract> <DeepExtract> global config print('') print('-----------------------------------------------------------------') print('--------------------- Release configuration ---------------------') print('-----------------------------------------------------------------') print(f'Goal \t\t\t -> {config.goal}') print(f'Git URL \t\t -> {config.git_url}') print(f'Git Branch \t\t -> {config.git_branch}') print(f'Git reference \t\t -> {config.git_hash}') print(f'Git user \t\t -> {config.git_user_name}') print(f'Git user email \t\t -> {config.git_user_email}') print(f'Work dir \t\t -> {config.work_dir}') print(f'Source dir \t\t -> {config.source_dir}') print(f'Old Version \t\t -> {config.old_version}') print(f'Old NPM Version \t -> {config.old_npm_version}') print(f'New Version \t\t -> {config.new_version}') print(f'New NPN Version \t -> {config.new_npm_version}') if config.rc is not None: print(f'RC number \t\t -> {config.rc}') if config.beta is not None: print(f'Beta number \t\t -> {config.beta}') print(f'Dev Version \t\t -> {config.dev_version}') print(f'Dev NPM Version \t -> {config.dev_npm_version}') print(f'Release Tag \t\t -> {config.tag}') print('-----------------------------------------------------------------') print('') </DeepExtract> if config.goal == 'prepare-changelog': <DeepExtract> global config print(f'Generating changelog for release {config.new_version}') print('') checkout_code() generate_changelog() check_run(['git', 'commit', '-a', '-m', f'Update changelog for release {config.new_version}'], cwd=config.source_dir) </DeepExtract> print('') print('') print(f'Changelog for release version: {config.new_version} is ready for review at {config.source_dir}') print('After you are done, push the reviewed changelog to github.') print('') print('') elif config.goal == 'prepare': if not args.dev_version: <DeepExtract> return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' </DeepExtract> sys.exit() <DeepExtract> global config print(f'Processing release from {config.old_version} to {config.new_version} ') print('') checkout_code() prepare_changelog() update_version_to_release() check_run(['git', 'commit', '-a', '-m', f'Release v{config.new_version}'], cwd=config.source_dir) check_run(['git', 'tag', config.tag], cwd=config.source_dir) build_server() build_release() show_release_artifacts() update_version_to_dev() check_run(['git', 'commit', '-a', '-m', f'Prepare for next development iteration'], cwd=config.source_dir) prepare_extensions_release() prepare_runtime_extensions_package_release() </DeepExtract> print('') print('') print(f'Release version: {config.new_version} is ready for review') print('After you are done, run the script again to [publish] the release.') print('') print('') elif args.goal == 'publish': <DeepExtract> global config files_to_publish = [f'{config.source_dir}/dist/elyra-{config.new_version}-py3-none-any.whl', f'{config.source_dir}/dist/elyra-{config.new_version}.tar.gz', f'{config.source_dir}/dist/elyra_server-{config.new_version}-py3-none-any.whl', f'{config.source_dir}/dist/elyra_server-{config.new_version}.tar.gz', f'{config.work_dir}/airflow-notebook/dist/airflow_notebook-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/airflow-notebook/dist/airflow-notebook-{config.new_version}.tar.gz', f'{config.work_dir}/kfp-notebook/dist/kfp_notebook-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/kfp-notebook/dist/kfp-notebook-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-code-snippet-extension/dist/elyra_code_snippet_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-code-snippet-extension/dist/elyra-code-snippet-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-code-viewer-extension/dist/elyra_code_viewer_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-code-viewer-extension/dist/elyra-code-viewer-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-pipeline-editor-extension/dist/elyra_pipeline_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-pipeline-editor-extension/dist/elyra-pipeline-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-python-editor-extension/dist/elyra_python_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-python-editor-extension/dist/elyra-python-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-r-editor-extension/dist/elyra_r_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-r-editor-extension/dist/elyra-r-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-scala-editor-extension/dist/elyra_scala_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-scala-editor-extension/dist/elyra-scala-editor-extension-{config.new_version}.tar.gz'] print('-----------------------------------------------------------------') print('---------------------- Publishing to PyPI -----------------------') print('-----------------------------------------------------------------') for file in files_to_publish: if not os.path.exists(file): raise MissingReleaseArtifactException(f'Missing release file: {file}') for file in files_to_publish: print(f'Publishing: {file}') check_run(['twine', 'upload', '--sign', file], cwd=os.getcwd()) print('-----------------------------------------------------------------') print('--------------- Pushing Release and Tag to git ------------------') print('-----------------------------------------------------------------') print() print('Pushing release to git') check_run(['git', 'push'], cwd=config.source_dir) print('Pushing release tag to git') check_run(['git', 'push', '--tags'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('--------------- Preparing to push npm packages ------------------') print('-----------------------------------------------------------------') print() print(f'Checking out release tag {config.tag}') check_run(['git', 'checkout', config.tag], cwd=config.source_dir) check_run(['git', 'status'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('-------------------- Pushing npm packages -----------------------') print('-----------------------------------------------------------------') print() print(f'publishing npm packages') check_run(['lerna', 'publish', '--yes', 'from-package', '--no-git-tag-version', '--no-verify-access', '--no-push'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('-------------------- Pushing container images -------------------') print('-----------------------------------------------------------------') print() if not config.pre_release: print(f'Pushing container images') is_latest = config.git_branch == 'main' check_run(['git', 'checkout', f'tags/v{config.new_version}'], cwd=config.source_dir, capture_output=False) check_run(['make', 'publish-container-images', f'IMAGE_IS_LATEST={is_latest}'], cwd=config.source_dir) check_run(['git', 'checkout', 'main'], cwd=config.source_dir, capture_output=False) </DeepExtract> else: <DeepExtract> return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' </DeepExtract> sys.exit() except Exception as ex: raise RuntimeError(f'Error performing release {args.version}') from ex
def main(args=None): """Perform necessary tasks to create and/or publish a new release""" parser = argparse.ArgumentParser(usage=print_help()) parser.add_argument('goal', help='Supported goals: {prepare-changelog | prepare | publish}', type=str, choices={'prepare-changelog', 'prepare', 'publish'}) parser.add_argument('--version', help='the new release version', type=str, required=True) parser.add_argument('--dev-version', help='the new development version', type=str, required=False) parser.add_argument('--beta', help='the release beta number', type=str, required=False) parser.add_argument('--rc', help='the release candidate number', type=str, required=False) parser.add_argument('--org', help='the github org or username to use', type=str, required=False) parser.add_argument('--branch', help='the branch name to use', type=str, required=False) args = parser.parse_args() if args.beta and args.rc: return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' sys.exit(1) global config try: if not dependency_exists('git'): raise DependencyException('Please install git https://git-scm.com/downloads') if not dependency_exists('node'): raise DependencyException('Please install node.js 18+ https://nodejs.org/') if not dependency_exists('yarn'): raise DependencyException('Please install yarn https://classic.yarnpkg.com/') if not dependency_exists('twine'): raise DependencyException('Please install twine https://twine.readthedocs.io/en/latest/#installation') pass if not args: raise ValueError('Invalid command line arguments') v = re.search(VERSION_REG_EX, elyra._version.__version__) configuration = {'goal': args.goal, 'git_url': f'git@github.com:{args.org or DEFAULT_GIT_ORG}/elyra.git', 'git_branch': args.branch or DEFAULT_GIT_BRANCH, 'git_hash': 'HEAD', 'git_user_name': check_output(['git', 'config', 'user.name']), 'git_user_email': check_output(['git', 'config', 'user.email']), 'base_dir': os.getcwd(), 'work_dir': os.path.join(os.getcwd(), DEFAULT_BUILD_DIR), 'source_dir': os.path.join(os.getcwd(), DEFAULT_BUILD_DIR, 'elyra'), 'old_version': elyra._version.__version__, 'old_npm_version': f"{v['major']}.{v['minor']}.{v['patch']}-dev", 'new_version': args.version if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'{args.version}rc{args.rc}' if args.rc else f'{args.version}b{args.beta}', 'new_npm_version': args.version if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'{args.version}-rc.{args.rc}' if args.rc else f'{args.version}-beta.{args.beta}', 'rc': args.rc, 'beta': args.beta, 'dev_version': f'{args.dev_version}.dev0', 'dev_npm_version': f'{args.dev_version}-dev', 'tag': f'v{args.version}' if (not args.rc or not str.isdigit(args.rc)) and (not args.beta or not str.isdigit(args.beta)) else f'v{args.version}rc{args.rc}' if args.rc else f'v{args.version}b{args.beta}', 'pre_release': True if args.rc or args.beta else False} global config config = SimpleNamespace(**configuration) global config print('') print('-----------------------------------------------------------------') print('--------------------- Release configuration ---------------------') print('-----------------------------------------------------------------') print(f'Goal \t\t\t -> {config.goal}') print(f'Git URL \t\t -> {config.git_url}') print(f'Git Branch \t\t -> {config.git_branch}') print(f'Git reference \t\t -> {config.git_hash}') print(f'Git user \t\t -> {config.git_user_name}') print(f'Git user email \t\t -> {config.git_user_email}') print(f'Work dir \t\t -> {config.work_dir}') print(f'Source dir \t\t -> {config.source_dir}') print(f'Old Version \t\t -> {config.old_version}') print(f'Old NPM Version \t -> {config.old_npm_version}') print(f'New Version \t\t -> {config.new_version}') print(f'New NPN Version \t -> {config.new_npm_version}') if config.rc is not None: print(f'RC number \t\t -> {config.rc}') if config.beta is not None: print(f'Beta number \t\t -> {config.beta}') print(f'Dev Version \t\t -> {config.dev_version}') print(f'Dev NPM Version \t -> {config.dev_npm_version}') print(f'Release Tag \t\t -> {config.tag}') print('-----------------------------------------------------------------') print('') if config.goal == 'prepare-changelog': global config print(f'Generating changelog for release {config.new_version}') print('') checkout_code() generate_changelog() check_run(['git', 'commit', '-a', '-m', f'Update changelog for release {config.new_version}'], cwd=config.source_dir) print('') print('') print(f'Changelog for release version: {config.new_version} is ready for review at {config.source_dir}') print('After you are done, push the reviewed changelog to github.') print('') print('') elif config.goal == 'prepare': if not args.dev_version: return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' sys.exit() global config print(f'Processing release from {config.old_version} to {config.new_version} ') print('') checkout_code() prepare_changelog() update_version_to_release() check_run(['git', 'commit', '-a', '-m', f'Release v{config.new_version}'], cwd=config.source_dir) check_run(['git', 'tag', config.tag], cwd=config.source_dir) build_server() build_release() show_release_artifacts() update_version_to_dev() check_run(['git', 'commit', '-a', '-m', f'Prepare for next development iteration'], cwd=config.source_dir) prepare_extensions_release() prepare_runtime_extensions_package_release() print('') print('') print(f'Release version: {config.new_version} is ready for review') print('After you are done, run the script again to [publish] the release.') print('') print('') elif args.goal == 'publish': global config files_to_publish = [f'{config.source_dir}/dist/elyra-{config.new_version}-py3-none-any.whl', f'{config.source_dir}/dist/elyra-{config.new_version}.tar.gz', f'{config.source_dir}/dist/elyra_server-{config.new_version}-py3-none-any.whl', f'{config.source_dir}/dist/elyra_server-{config.new_version}.tar.gz', f'{config.work_dir}/airflow-notebook/dist/airflow_notebook-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/airflow-notebook/dist/airflow-notebook-{config.new_version}.tar.gz', f'{config.work_dir}/kfp-notebook/dist/kfp_notebook-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/kfp-notebook/dist/kfp-notebook-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-code-snippet-extension/dist/elyra_code_snippet_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-code-snippet-extension/dist/elyra-code-snippet-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-code-viewer-extension/dist/elyra_code_viewer_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-code-viewer-extension/dist/elyra-code-viewer-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-pipeline-editor-extension/dist/elyra_pipeline_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-pipeline-editor-extension/dist/elyra-pipeline-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-python-editor-extension/dist/elyra_python_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-python-editor-extension/dist/elyra-python-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-r-editor-extension/dist/elyra_r_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-r-editor-extension/dist/elyra-r-editor-extension-{config.new_version}.tar.gz', f'{config.work_dir}/elyra-scala-editor-extension/dist/elyra_scala_editor_extension-{config.new_version}-py3-none-any.whl', f'{config.work_dir}/elyra-scala-editor-extension/dist/elyra-scala-editor-extension-{config.new_version}.tar.gz'] print('-----------------------------------------------------------------') print('---------------------- Publishing to PyPI -----------------------') print('-----------------------------------------------------------------') for file in files_to_publish: if not os.path.exists(file): raise MissingReleaseArtifactException(f'Missing release file: {file}') for file in files_to_publish: print(f'Publishing: {file}') check_run(['twine', 'upload', '--sign', file], cwd=os.getcwd()) print('-----------------------------------------------------------------') print('--------------- Pushing Release and Tag to git ------------------') print('-----------------------------------------------------------------') print() print('Pushing release to git') check_run(['git', 'push'], cwd=config.source_dir) print('Pushing release tag to git') check_run(['git', 'push', '--tags'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('--------------- Preparing to push npm packages ------------------') print('-----------------------------------------------------------------') print() print(f'Checking out release tag {config.tag}') check_run(['git', 'checkout', config.tag], cwd=config.source_dir) check_run(['git', 'status'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('-------------------- Pushing npm packages -----------------------') print('-----------------------------------------------------------------') print() print(f'publishing npm packages') check_run(['lerna', 'publish', '--yes', 'from-package', '--no-git-tag-version', '--no-verify-access', '--no-push'], cwd=config.source_dir) print('-----------------------------------------------------------------') print('-------------------- Pushing container images -------------------') print('-----------------------------------------------------------------') print() if not config.pre_release: print(f'Pushing container images') is_latest = config.git_branch == 'main' check_run(['git', 'checkout', f'tags/v{config.new_version}'], cwd=config.source_dir, capture_output=False) check_run(['make', 'publish-container-images', f'IMAGE_IS_LATEST={is_latest}'], cwd=config.source_dir) check_run(['git', 'checkout', 'main'], cwd=config.source_dir, capture_output=False) else: return 'create-release.py [ prepare | publish ] --version VERSION\n \n DESCRIPTION\n Creates Elyra release based on git commit hash or from HEAD.\n \n create release prepare-changelog --version 1.3.0 [--beta 0] [--rc 0]\n This will prepare the release changelog and make it ready for review on the release workdir.\n\n create-release.py prepare --version 1.3.0 --dev-version 1.4.0 [--beta 0] [--rc 0]\n This will prepare a release candidate, build it locally and make it ready for review on the release workdir.\n \n Note: that one can either use a beta or rc modifier for the release, but not both.\n\n create-release.py publish --version 1.3.0 [--beta 0] [--rc 0]\n This will build a previously prepared release, and publish the artifacts to public repositories.\n \n Required software dependencies for building and publishing a release:\n - Git\n - Node\n - Twine\n - Yarn\n \n Required configurations for publishing a release:\n - GPG with signing key configured\n \n \n ' sys.exit() except Exception as ex: raise RuntimeError(f'Error performing release {args.version}') from ex
elyra
positive
def helper(k, start, buf, result): if len(buf) == k: result.append(list(buf)) return result for i in range(start, len(nums) - (k - len(buf)) + 1): buf.append(nums[i]) <DeepExtract> if len(buf) == k: result.append(list(buf)) return result for i in range(i + 1, len(nums) - (k - len(buf)) + 1): buf.append(nums[i]) helper(k, i + 1, buf, result) buf.pop() return result </DeepExtract> buf.pop() return result
def helper(k, start, buf, result): if len(buf) == k: result.append(list(buf)) return result for i in range(start, len(nums) - (k - len(buf)) + 1): buf.append(nums[i]) if len(buf) == k: result.append(list(buf)) return result for i in range(i + 1, len(nums) - (k - len(buf)) + 1): buf.append(nums[i]) helper(k, i + 1, buf, result) buf.pop() return result buf.pop() return result
Algorithm-Implementations
positive
def extract(self): """Destructively rips this element out of the tree.""" if self.parent is not None: del self.parent.contents[self.parent.index(self)] <DeepExtract> last_child = self while hasattr(last_child, 'contents') and last_child.contents: last_child = last_child.contents[-1] last_child = last_child </DeepExtract> next_element = last_child.next_element if self.previous_element is not None: self.previous_element.next_element = next_element if next_element is not None: next_element.previous_element = self.previous_element self.previous_element = None last_child.next_element = None self.parent = None if self.previous_sibling is not None: self.previous_sibling.next_sibling = self.next_sibling if self.next_sibling is not None: self.next_sibling.previous_sibling = self.previous_sibling self.previous_sibling = self.next_sibling = None return self
def extract(self): """Destructively rips this element out of the tree.""" if self.parent is not None: del self.parent.contents[self.parent.index(self)] last_child = self while hasattr(last_child, 'contents') and last_child.contents: last_child = last_child.contents[-1] last_child = last_child next_element = last_child.next_element if self.previous_element is not None: self.previous_element.next_element = next_element if next_element is not None: next_element.previous_element = self.previous_element self.previous_element = None last_child.next_element = None self.parent = None if self.previous_sibling is not None: self.previous_sibling.next_sibling = self.next_sibling if self.next_sibling is not None: self.next_sibling.previous_sibling = self.previous_sibling self.previous_sibling = self.next_sibling = None return self
alp
positive
def _parse_properties(response, result_class): """ Extracts out resource properties and metadata information. Ignores the standard http headers. """ if response is None or response.headers is None: return None props = result_class() for (key, value) in response.headers.items(): info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) if info: if info[0] is None: setattr(props, info[1], info[2](value)) else: attr = getattr(props, info[0]) setattr(attr, info[1], info[2](value)) if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and (props.blob_tier is not None): <DeepExtract> props.blob_tier = _to_str(props.blob_tier).upper() if props.blob_tier is not None else None </DeepExtract> return props
def _parse_properties(response, result_class): """ Extracts out resource properties and metadata information. Ignores the standard http headers. """ if response is None or response.headers is None: return None props = result_class() for (key, value) in response.headers.items(): info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) if info: if info[0] is None: setattr(props, info[1], info[2](value)) else: attr = getattr(props, info[0]) setattr(attr, info[1], info[2](value)) if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and (props.blob_tier is not None): props.blob_tier = _to_str(props.blob_tier).upper() if props.blob_tier is not None else None return props
azure-cosmos-table-python
positive
def _client_setup(materials_provider, table_name, table_index, ciphertext_item, attribute_actions): cmp = materials_provider() <DeepExtract> client = MagicMock(__class__=botocore.client.BaseClient) client.get_item.return_value = {'Item': ciphertext_item.copy()} client.batch_get_item.return_value = {'Responses': {table_name: [ciphertext_item.copy()]}} client = client </DeepExtract> table_info = TableInfo(name=table_name, primary_index=TableIndex(partition=table_index['partition'], sort=table_index.get('sort', None))) item_key = {table_info.primary_index.partition: ciphertext_item[table_info.primary_index.partition]} if table_info.primary_index.sort is not None: item_key[table_info.primary_index.sort] = ciphertext_item[table_info.primary_index.sort] e_client = EncryptedClient(client=client, materials_provider=cmp, attribute_actions=attribute_actions, auto_refresh_table_indexes=False) e_client._table_info_cache._all_tables_info[table_name] = table_info return (e_client, dict_to_ddb(item_key))
def _client_setup(materials_provider, table_name, table_index, ciphertext_item, attribute_actions): cmp = materials_provider() client = MagicMock(__class__=botocore.client.BaseClient) client.get_item.return_value = {'Item': ciphertext_item.copy()} client.batch_get_item.return_value = {'Responses': {table_name: [ciphertext_item.copy()]}} client = client table_info = TableInfo(name=table_name, primary_index=TableIndex(partition=table_index['partition'], sort=table_index.get('sort', None))) item_key = {table_info.primary_index.partition: ciphertext_item[table_info.primary_index.partition]} if table_info.primary_index.sort is not None: item_key[table_info.primary_index.sort] = ciphertext_item[table_info.primary_index.sort] e_client = EncryptedClient(client=client, materials_provider=cmp, attribute_actions=attribute_actions, auto_refresh_table_indexes=False) e_client._table_info_cache._all_tables_info[table_name] = table_info return (e_client, dict_to_ddb(item_key))
aws-dynamodb-encryption-python
positive
def _add_gt_annotations(entry): """Add ground truth annotation metadata to an roidb entry.""" ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None) objs = self.COCO.loadAnns(ann_ids) valid_objs = [] valid_segms = [] width = entry['width'] height = entry['height'] for obj in objs: if isinstance(obj['segmentation'], list): obj['segmentation'] = [p for p in obj['segmentation'] if len(p) >= 6] if obj['area'] < cfg.TRAIN.GT_MIN_AREA: continue if 'ignore' in obj and obj['ignore'] == 1: continue (x1, y1, x2, y2) = box_utils.xywh_to_xyxy(obj['bbox']) (x1, y1, x2, y2) = box_utils.clip_xyxy_to_image(x1, y1, x2, y2, height, width) if obj['area'] > 0 and x2 > x1 and (y2 > y1): obj['clean_bbox'] = [x1, y1, x2, y2] valid_objs.append(obj) valid_segms.append(obj['segmentation']) num_valid_objs = len(valid_objs) boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype) gt_classes = np.zeros(num_valid_objs, dtype=entry['gt_classes'].dtype) gt_overlaps = np.zeros((num_valid_objs, self.num_classes), dtype=entry['gt_overlaps'].dtype) seg_areas = np.zeros(num_valid_objs, dtype=entry['seg_areas'].dtype) is_crowd = np.zeros(num_valid_objs, dtype=entry['is_crowd'].dtype) box_to_gt_ind_map = np.zeros(num_valid_objs, dtype=entry['box_to_gt_ind_map'].dtype) if self.keypoints is not None: gt_keypoints = np.zeros((num_valid_objs, 3, self.num_keypoints), dtype=entry['gt_keypoints'].dtype) im_has_visible_keypoints = False for (ix, obj) in enumerate(valid_objs): cls = self.json_category_id_to_contiguous_id[obj['category_id']] boxes[ix, :] = obj['clean_bbox'] gt_classes[ix] = cls seg_areas[ix] = obj['area'] is_crowd[ix] = obj['iscrowd'] box_to_gt_ind_map[ix] = ix if self.keypoints is not None: <DeepExtract> if 'keypoints' not in obj: gt_keypoints[ix, :, :] = None kp = np.array(obj['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] num_keypoints = len(obj['keypoints']) / 3 assert num_keypoints == self.num_keypoints gt_kps = np.ones((3, self.num_keypoints), dtype=np.int32) for i in range(self.num_keypoints): gt_kps[0, i] = x[i] gt_kps[1, i] = y[i] gt_kps[2, i] = v[i] gt_keypoints[ix, :, :] = gt_kps </DeepExtract> if np.sum(gt_keypoints[ix, 2, :]) > 0: im_has_visible_keypoints = True if obj['iscrowd']: gt_overlaps[ix, :] = -1.0 else: gt_overlaps[ix, cls] = 1.0 entry['boxes'] = np.append(entry['boxes'], boxes, axis=0) entry['segms'].extend(valid_segms) entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes) entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas) entry['gt_overlaps'] = np.append(entry['gt_overlaps'].toarray(), gt_overlaps, axis=0) entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps']) entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd) entry['box_to_gt_ind_map'] = np.append(entry['box_to_gt_ind_map'], box_to_gt_ind_map) if self.keypoints is not None: entry['gt_keypoints'] = np.append(entry['gt_keypoints'], gt_keypoints, axis=0) entry['has_visible_keypoints'] = im_has_visible_keypoints
def _add_gt_annotations(entry): """Add ground truth annotation metadata to an roidb entry.""" ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None) objs = self.COCO.loadAnns(ann_ids) valid_objs = [] valid_segms = [] width = entry['width'] height = entry['height'] for obj in objs: if isinstance(obj['segmentation'], list): obj['segmentation'] = [p for p in obj['segmentation'] if len(p) >= 6] if obj['area'] < cfg.TRAIN.GT_MIN_AREA: continue if 'ignore' in obj and obj['ignore'] == 1: continue (x1, y1, x2, y2) = box_utils.xywh_to_xyxy(obj['bbox']) (x1, y1, x2, y2) = box_utils.clip_xyxy_to_image(x1, y1, x2, y2, height, width) if obj['area'] > 0 and x2 > x1 and (y2 > y1): obj['clean_bbox'] = [x1, y1, x2, y2] valid_objs.append(obj) valid_segms.append(obj['segmentation']) num_valid_objs = len(valid_objs) boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype) gt_classes = np.zeros(num_valid_objs, dtype=entry['gt_classes'].dtype) gt_overlaps = np.zeros((num_valid_objs, self.num_classes), dtype=entry['gt_overlaps'].dtype) seg_areas = np.zeros(num_valid_objs, dtype=entry['seg_areas'].dtype) is_crowd = np.zeros(num_valid_objs, dtype=entry['is_crowd'].dtype) box_to_gt_ind_map = np.zeros(num_valid_objs, dtype=entry['box_to_gt_ind_map'].dtype) if self.keypoints is not None: gt_keypoints = np.zeros((num_valid_objs, 3, self.num_keypoints), dtype=entry['gt_keypoints'].dtype) im_has_visible_keypoints = False for (ix, obj) in enumerate(valid_objs): cls = self.json_category_id_to_contiguous_id[obj['category_id']] boxes[ix, :] = obj['clean_bbox'] gt_classes[ix] = cls seg_areas[ix] = obj['area'] is_crowd[ix] = obj['iscrowd'] box_to_gt_ind_map[ix] = ix if self.keypoints is not None: if 'keypoints' not in obj: gt_keypoints[ix, :, :] = None kp = np.array(obj['keypoints']) x = kp[0::3] y = kp[1::3] v = kp[2::3] num_keypoints = len(obj['keypoints']) / 3 assert num_keypoints == self.num_keypoints gt_kps = np.ones((3, self.num_keypoints), dtype=np.int32) for i in range(self.num_keypoints): gt_kps[0, i] = x[i] gt_kps[1, i] = y[i] gt_kps[2, i] = v[i] gt_keypoints[ix, :, :] = gt_kps if np.sum(gt_keypoints[ix, 2, :]) > 0: im_has_visible_keypoints = True if obj['iscrowd']: gt_overlaps[ix, :] = -1.0 else: gt_overlaps[ix, cls] = 1.0 entry['boxes'] = np.append(entry['boxes'], boxes, axis=0) entry['segms'].extend(valid_segms) entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes) entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas) entry['gt_overlaps'] = np.append(entry['gt_overlaps'].toarray(), gt_overlaps, axis=0) entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps']) entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd) entry['box_to_gt_ind_map'] = np.append(entry['box_to_gt_ind_map'], box_to_gt_ind_map) if self.keypoints is not None: entry['gt_keypoints'] = np.append(entry['gt_keypoints'], gt_keypoints, axis=0) entry['has_visible_keypoints'] = im_has_visible_keypoints
AIC2018_iamai
positive
def get_devices(self): """ Return name of devices """ <DeepExtract> if not self.initialized: if is_macos: try: pynvx.cudaInit() except RuntimeError: self.initialized = True return else: try: pynvml.nvmlInit() except pynvml.NVMLError_LibraryNotFound: self.initialized = True return self.initialized = True self.get_device_count() self.get_handles() </DeepExtract> if is_macos: names = [pynvx.cudaGetName(handle, ignore=True) for handle in self.handles] else: names = [pynvml.nvmlDeviceGetName(handle).decode('utf-8') for handle in self.handles] return names
def get_devices(self): """ Return name of devices """ if not self.initialized: if is_macos: try: pynvx.cudaInit() except RuntimeError: self.initialized = True return else: try: pynvml.nvmlInit() except pynvml.NVMLError_LibraryNotFound: self.initialized = True return self.initialized = True self.get_device_count() self.get_handles() if is_macos: names = [pynvx.cudaGetName(handle, ignore=True) for handle in self.handles] else: names = [pynvml.nvmlDeviceGetName(handle).decode('utf-8') for handle in self.handles] return names
DeepFakeTutorial
positive
def check_many_encrypted_items(actual, expected, attribute_actions, transformer=_nop_transformer): assert len(actual) == len(expected) for actual_item in actual: <DeepExtract> expected_item = [i for i in expected if i['partition_attribute'] == actual_item['partition_attribute'] and i['sort_attribute'] == actual_item['sort_attribute']] assert len(expected_item) == 1 expected_item = expected_item[0] </DeepExtract> <DeepExtract> ciphertext_attributes = set(transformer(actual_item).keys()) plaintext_attributes = set(transformer(expected_item).keys()) if attribute_actions.take_no_actions: assert ciphertext_attributes == plaintext_attributes else: assert ciphertext_attributes == plaintext_attributes.union(_reserved_attributes) for (name, value) in transformer(actual_item).items(): if name in _reserved_attributes: continue if attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN: assert isinstance(value, Binary) assert value != transformer(expected_item)[name] else: assert value == transformer(expected_item)[name] </DeepExtract>
def check_many_encrypted_items(actual, expected, attribute_actions, transformer=_nop_transformer): assert len(actual) == len(expected) for actual_item in actual: expected_item = [i for i in expected if i['partition_attribute'] == actual_item['partition_attribute'] and i['sort_attribute'] == actual_item['sort_attribute']] assert len(expected_item) == 1 expected_item = expected_item[0] ciphertext_attributes = set(transformer(actual_item).keys()) plaintext_attributes = set(transformer(expected_item).keys()) if attribute_actions.take_no_actions: assert ciphertext_attributes == plaintext_attributes else: assert ciphertext_attributes == plaintext_attributes.union(_reserved_attributes) for (name, value) in transformer(actual_item).items(): if name in _reserved_attributes: continue if attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN: assert isinstance(value, Binary) assert value != transformer(expected_item)[name] else: assert value == transformer(expected_item)[name] </DeepExtract>
aws-dynamodb-encryption-python
positive
def _manual_test_getch(): <DeepExtract> sys.stdout.write('\n') sys.stdout.flush() </DeepExtract> keys = 'a b c ENTER ESC'.split() for key in keys: if key in globals(): value = globals()[key][0] else: value = key <DeepExtract> sys.stdout.write(f"Press key '{key}': ") sys.stdout.flush() </DeepExtract> key = getch() if key == value: <DeepExtract> sys.stdout.write('OK\n') sys.stdout.flush() </DeepExtract> else: <DeepExtract> sys.stdout.write(f'FAILED: getch() returned {key} (hex {dumpkey(key)})\n') sys.stdout.flush() </DeepExtract>
def _manual_test_getch(): sys.stdout.write('\n') sys.stdout.flush() keys = 'a b c ENTER ESC'.split() for key in keys: if key in globals(): value = globals()[key][0] else: value = key sys.stdout.write(f"Press key '{key}': ") sys.stdout.flush() key = getch() if key == value: sys.stdout.write('OK\n') sys.stdout.flush() else: sys.stdout.write(f'FAILED: getch() returned {key} (hex {dumpkey(key)})\n') sys.stdout.flush() </DeepExtract>
DragonPy
positive
def get_protectionGoSet(self): reg = self.protectionGoSet_ref['read'] <DeepExtract> (self.protectionGoSet, error) = self.i2c.readU8(reg) </DeepExtract> if self.debug & error: print('Error reading address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
def get_protectionGoSet(self): reg = self.protectionGoSet_ref['read'] (self.protectionGoSet, error) = self.i2c.readU8(reg) if self.debug & error: print('Error reading address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
choreograph-git
positive
def forward(self, obs, compute_pi=True, compute_log_pi=True, detach_encoder=False): obs = self.encoder(obs, detach=detach_encoder) (mu, log_std) = self.trunk(obs).chunk(2, dim=-1) log_std = torch.tanh(log_std) log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min) * (log_std + 1) self.outputs['mu'] = mu self.outputs['std'] = log_std.exp() if compute_pi: std = log_std.exp() noise = torch.randn_like(mu) pi = mu + noise * std else: pi = None entropy = None if compute_log_pi: <DeepExtract> residual = (-0.5 * noise.pow(2) - log_std).sum(-1, keepdim=True) log_pi = residual - 0.5 * np.log(2 * np.pi) * noise.size(-1) </DeepExtract> else: log_pi = None <DeepExtract> mu = torch.tanh(mu) if pi is not None: pi = torch.tanh(pi) if log_pi is not None: log_pi -= torch.log(F.relu(1 - pi.pow(2)) + 1e-06).sum(-1, keepdim=True) (mu, pi, log_pi) = (mu, pi, log_pi) </DeepExtract> return (mu, pi, log_pi, log_std)
def forward(self, obs, compute_pi=True, compute_log_pi=True, detach_encoder=False): obs = self.encoder(obs, detach=detach_encoder) (mu, log_std) = self.trunk(obs).chunk(2, dim=-1) log_std = torch.tanh(log_std) log_std = self.log_std_min + 0.5 * (self.log_std_max - self.log_std_min) * (log_std + 1) self.outputs['mu'] = mu self.outputs['std'] = log_std.exp() if compute_pi: std = log_std.exp() noise = torch.randn_like(mu) pi = mu + noise * std else: pi = None entropy = None if compute_log_pi: residual = (-0.5 * noise.pow(2) - log_std).sum(-1, keepdim=True) log_pi = residual - 0.5 * np.log(2 * np.pi) * noise.size(-1) else: log_pi = None mu = torch.tanh(mu) if pi is not None: pi = torch.tanh(pi) if log_pi is not None: log_pi -= torch.log(F.relu(1 - pi.pow(2)) + 1e-06).sum(-1, keepdim=True) (mu, pi, log_pi) = (mu, pi, log_pi) return (mu, pi, log_pi, log_std)
deep_bisim4control
positive
def fit(self, pyg_data, train_iters=200, initialize=True, verbose=False, patience=500, **kwargs): """Train the ChebNet model, when idx_val is not None, pick the best model according to the validation loss. Parameters ---------- pyg_data : pytorch geometric dataset object train_iters : int number of training epochs initialize : bool whether to initialize parameters before training verbose : bool whether to show verbose logs patience : int patience for early stopping, only valid when `idx_val` is given """ self.device = self.conv1.weight.device if initialize: <DeepExtract> self.conv1.reset_parameters() self.conv2.reset_parameters() </DeepExtract> self.data = pyg_data[0].to(self.device) <DeepExtract> if verbose: print('=== training ChebNet model ===') optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) labels = self.data.y (train_mask, val_mask) = (self.data.train_mask, self.data.val_mask) early_stopping = patience best_loss_val = 100 for i in range(train_iters): self.train() optimizer.zero_grad() output = self.forward(self.data) loss_train = F.nll_loss(output[train_mask], labels[train_mask]) loss_train.backward() optimizer.step() if verbose and i % 10 == 0: print('Epoch {}, training loss: {}'.format(i, loss_train.item())) self.eval() output = self.forward(self.data) loss_val = F.nll_loss(output[val_mask], labels[val_mask]) if best_loss_val > loss_val: best_loss_val = loss_val self.output = output weights = deepcopy(self.state_dict()) patience = early_stopping else: patience -= 1 if i > early_stopping and patience <= 0: break if verbose: print('=== early stopping at {0}, loss_val = {1} ==='.format(i, best_loss_val)) self.load_state_dict(weights) </DeepExtract>
def fit(self, pyg_data, train_iters=200, initialize=True, verbose=False, patience=500, **kwargs): """Train the ChebNet model, when idx_val is not None, pick the best model according to the validation loss. Parameters ---------- pyg_data : pytorch geometric dataset object train_iters : int number of training epochs initialize : bool whether to initialize parameters before training verbose : bool whether to show verbose logs patience : int patience for early stopping, only valid when `idx_val` is given """ self.device = self.conv1.weight.device if initialize: self.conv1.reset_parameters() self.conv2.reset_parameters() self.data = pyg_data[0].to(self.device) if verbose: print('=== training ChebNet model ===') optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay) labels = self.data.y (train_mask, val_mask) = (self.data.train_mask, self.data.val_mask) early_stopping = patience best_loss_val = 100 for i in range(train_iters): self.train() optimizer.zero_grad() output = self.forward(self.data) loss_train = F.nll_loss(output[train_mask], labels[train_mask]) loss_train.backward() optimizer.step() if verbose and i % 10 == 0: print('Epoch {}, training loss: {}'.format(i, loss_train.item())) self.eval() output = self.forward(self.data) loss_val = F.nll_loss(output[val_mask], labels[val_mask]) if best_loss_val > loss_val: best_loss_val = loss_val self.output = output weights = deepcopy(self.state_dict()) patience = early_stopping else: patience -= 1 if i > early_stopping and patience <= 0: break if verbose: print('=== early stopping at {0}, loss_val = {1} ==='.format(i, best_loss_val)) self.load_state_dict(weights) </DeepExtract>
DeepRobust
positive
def adjust_crop_region(crop_region, target): keep_indies_ = torch.zeros(len(target), dtype=torch.bool) while True: <DeepExtract> inter_xmin = torch.max(crop_region[0], target.bbox[:, 0]) inter_ymin = torch.max(crop_region[1], target.bbox[:, 1]) inter_xmax = torch.min(crop_region[2], target.bbox[:, 2]) inter_ymax = torch.min(crop_region[3], target.bbox[:, 3]) inter_width = torch.max(torch.Tensor([0]), inter_xmax - inter_xmin) inter_height = torch.max(torch.Tensor([0]), inter_ymax - inter_ymin) inter_area = inter_width * inter_height </DeepExtract> keep_indies = inter_area > 0 if torch.sum(keep_indies) == 0: return (None, None) keep_target = target[keep_indies] if keep_indies.equal(keep_indies_): return (crop_region, keep_target) keep_bbox = keep_target.bbox crop_xmin = torch.min(crop_region[0], torch.min(keep_bbox[:, 0])) crop_ymin = torch.min(crop_region[1], torch.min(keep_bbox[:, 1])) crop_xmax = torch.max(crop_region[2], torch.max(keep_bbox[:, 2])) crop_ymax = torch.max(crop_region[3], torch.max(keep_bbox[:, 3])) crop_region = torch.Tensor([crop_xmin, crop_ymin, crop_xmax, crop_ymax]) keep_indies_ = keep_indies
def adjust_crop_region(crop_region, target): keep_indies_ = torch.zeros(len(target), dtype=torch.bool) while True: inter_xmin = torch.max(crop_region[0], target.bbox[:, 0]) inter_ymin = torch.max(crop_region[1], target.bbox[:, 1]) inter_xmax = torch.min(crop_region[2], target.bbox[:, 2]) inter_ymax = torch.min(crop_region[3], target.bbox[:, 3]) inter_width = torch.max(torch.Tensor([0]), inter_xmax - inter_xmin) inter_height = torch.max(torch.Tensor([0]), inter_ymax - inter_ymin) inter_area = inter_width * inter_height keep_indies = inter_area > 0 if torch.sum(keep_indies) == 0: return (None, None) keep_target = target[keep_indies] if keep_indies.equal(keep_indies_): return (crop_region, keep_target) keep_bbox = keep_target.bbox crop_xmin = torch.min(crop_region[0], torch.min(keep_bbox[:, 0])) crop_ymin = torch.min(crop_region[1], torch.min(keep_bbox[:, 1])) crop_xmax = torch.max(crop_region[2], torch.max(keep_bbox[:, 2])) crop_ymax = torch.max(crop_region[3], torch.max(keep_bbox[:, 3])) crop_region = torch.Tensor([crop_xmin, crop_ymin, crop_xmax, crop_ymax]) keep_indies_ = keep_indies
bezier_curve_text_spotting
positive
def flash(filepath, addr=0, close=True): collect() <DeepExtract> if filepath.startswith('http://') or filepath.startswith('/http:/'): dstream = open_web(filepath) else: dstream = open(filepath, 'rb') </DeepExtract> if dstream: <DeepExtract> if filepath.lower().endswith('.gz'): import uzlib flash_stream(uzlib.DecompIO(dstream, 31), addr) else: flash_stream(dstream, addr) </DeepExtract> if close: <DeepExtract> sdr(b' ') sir_idle(b'\xff', 100, 1) sir_idle(b'&', 2, 200) sir_idle(b'\xff', 2, 1) sir(b'y') sdr_idle(b'\x00\x00\x00', 2, 100) spi_jtag_off() send_tms(1, 6) led.off() bitbang_jtag_off() </DeepExtract> return status return False
def flash(filepath, addr=0, close=True): collect() if filepath.startswith('http://') or filepath.startswith('/http:/'): dstream = open_web(filepath) else: dstream = open(filepath, 'rb') if dstream: if filepath.lower().endswith('.gz'): import uzlib flash_stream(uzlib.DecompIO(dstream, 31), addr) else: flash_stream(dstream, addr) if close: sdr(b' ') sir_idle(b'\xff', 100, 1) sir_idle(b'&', 2, 200) sir_idle(b'\xff', 2, 1) sir(b'y') sdr_idle(b'\x00\x00\x00', 2, 100) spi_jtag_off() send_tms(1, 6) led.off() bitbang_jtag_off() return status return False
esp32ecp5
positive
def _loss(self, input, target): logit = self(input) <DeepExtract> base_loss = self.base_weight * nn.L1Loss()(logit, target) y_c_features = self.vgg(target) y_hat_features = self.vgg(logit) y_hat_gram = [self.gram(fmap) for fmap in y_hat_features] x_gram = [self.gram(fmap) for fmap in y_c_features] style_loss = 0 for j in range(4): style_loss += self.style_weight * nn.functional.mse_loss(y_hat_gram[j], x_gram[j]) recon = y_c_features[1] recon_hat = y_hat_features[1] content_loss = self.content_weight * nn.L1Loss()(recon_hat, recon) diff_i = torch.sum(torch.abs(logit[:, :, :, 1:] - logit[:, :, :, :-1])) diff_j = torch.sum(torch.abs(logit[:, :, 1:, :] - logit[:, :, :-1, :])) tv_loss = self.tv_weight * (diff_i + diff_j) total_loss = base_loss + style_loss + content_loss + tv_loss loss = total_loss </DeepExtract> return loss
def _loss(self, input, target): logit = self(input) base_loss = self.base_weight * nn.L1Loss()(logit, target) y_c_features = self.vgg(target) y_hat_features = self.vgg(logit) y_hat_gram = [self.gram(fmap) for fmap in y_hat_features] x_gram = [self.gram(fmap) for fmap in y_c_features] style_loss = 0 for j in range(4): style_loss += self.style_weight * nn.functional.mse_loss(y_hat_gram[j], x_gram[j]) recon = y_c_features[1] recon_hat = y_hat_features[1] content_loss = self.content_weight * nn.L1Loss()(recon_hat, recon) diff_i = torch.sum(torch.abs(logit[:, :, :, 1:] - logit[:, :, :, :-1])) diff_j = torch.sum(torch.abs(logit[:, :, 1:, :] - logit[:, :, :-1, :])) tv_loss = self.tv_weight * (diff_i + diff_j) total_loss = base_loss + style_loss + content_loss + tv_loss loss = total_loss return loss
AGD
positive
def create_cluster(self, clustername, username, image, user_info, setting): if self.is_cluster(clustername, username): return [False, 'cluster:%s already exists' % clustername] if self.imgmgr.get_image_size(image) + 100 > int(setting['disk']): return [False, 'the size of disk is not big enough for the image'] clustersize = int(self.defaultsize) logger.info('starting cluster %s with %d containers for %s' % (clustername, int(clustersize), username)) workers = self.nodemgr.get_base_nodeips() image_json = json.dumps(image) groupname = json.loads(user_info)['data']['group'] groupquota = json.loads(user_info)['data']['groupinfo'] uid = json.loads(user_info)['data']['id'] if len(workers) == 0: logger.warning('no workers to start containers, start cluster failed') return [False, 'no workers are running'] if not self.networkmgr.has_user(username): ipnum = int(groupquota['vnode']) + 3 cidr = 32 - math.ceil(math.log(ipnum, 2)) self.networkmgr.add_user(username, cidr=cidr, isshared=True if str(groupname) == 'fundation' else False) if self.distributedgw == 'False': [success, message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr) if not success: return [False, message] elif not self.networkmgr.has_usrgw(username): self.networkmgr.usrgws[username] = self.networkmgr.masterip self.networkmgr.dump_usrgw(username) [status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize) gateway = self.networkmgr.get_usergw(username) logger.info('create cluster with gateway : %s' % gateway) self.networkmgr.printpools() if not status: logger.info('create cluster failed: %s' % result) return [False, result] ips = result <DeepExtract> self.clusterid_locks.acquire() clusterid = self.etcd.getkey('vcluster/nextid')[1] self.etcd.setkey('vcluster/nextid', str(int(clusterid) + 1)) self.clusterid_locks.release() clusterid = int(clusterid) </DeepExtract> clusterpath = self.fspath + '/global/users/' + username + '/clusters/' + clustername hostpath = self.fspath + '/global/users/' + username + '/hosts/' + str(clusterid) + '.hosts' hosts = '127.0.0.1\tlocalhost\n' proxy_server_ip = '' proxy_public_ip = '' containers = [] for i in range(0, clustersize): workerip = workers[random.randint(0, len(workers) - 1)] oneworker = self.nodemgr.ip_to_rpc(workerip) if self.distributedgw == 'True' and i == 0 and (not self.networkmgr.has_usrgw(username)): [success, message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr, workerip) if not success: return [False, message] if i == 0: self.networkmgr.load_usrgw(username) proxy_server_ip = self.networkmgr.usrgws[username] [status, proxy_public_ip] = self.etcd.getkey('machines/publicIP/' + proxy_server_ip) if not status: logger.error('Fail to get proxy_public_ip %s.' % proxy_server_ip) return [False, 'Fail to get proxy server public IP.'] lxc_name = username + '-' + str(clusterid) + '-' + str(i) hostname = 'host-' + str(i) logger.info('create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s' % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json)) [success, message] = oneworker.create_container(lxc_name, proxy_public_ip, username, uid, json.dumps(setting), clustername, str(clusterid), str(i), hostname, ips[i], gateway, image_json) if success is False: self.networkmgr.release_userips(username, ips[i]) logger.info('container create failed, so vcluster create failed') return [False, message] logger.info('container create success') hosts = hosts + ips[i].split('/')[0] + '\t' + hostname + '\t' + hostname + '.' + clustername + '\n' containers.append(Container(lxc_name, hostname, ips[i], workerip, image['name'], datetime.datetime.now(), setting)) hostfile = open(hostpath, 'w') hostfile.write(hosts) hostfile.close() vcluster = VCluster(clusterid, clustername, username, 'stopped', clustersize, clustersize, proxy_server_ip, proxy_public_ip) for con in containers: vcluster.containers.append(con) db.session.add(vcluster) <DeepExtract> try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) return False return True </DeepExtract> return [True, str(vcluster)]
def create_cluster(self, clustername, username, image, user_info, setting): if self.is_cluster(clustername, username): return [False, 'cluster:%s already exists' % clustername] if self.imgmgr.get_image_size(image) + 100 > int(setting['disk']): return [False, 'the size of disk is not big enough for the image'] clustersize = int(self.defaultsize) logger.info('starting cluster %s with %d containers for %s' % (clustername, int(clustersize), username)) workers = self.nodemgr.get_base_nodeips() image_json = json.dumps(image) groupname = json.loads(user_info)['data']['group'] groupquota = json.loads(user_info)['data']['groupinfo'] uid = json.loads(user_info)['data']['id'] if len(workers) == 0: logger.warning('no workers to start containers, start cluster failed') return [False, 'no workers are running'] if not self.networkmgr.has_user(username): ipnum = int(groupquota['vnode']) + 3 cidr = 32 - math.ceil(math.log(ipnum, 2)) self.networkmgr.add_user(username, cidr=cidr, isshared=True if str(groupname) == 'fundation' else False) if self.distributedgw == 'False': [success, message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr) if not success: return [False, message] elif not self.networkmgr.has_usrgw(username): self.networkmgr.usrgws[username] = self.networkmgr.masterip self.networkmgr.dump_usrgw(username) [status, result] = self.networkmgr.acquire_userips_cidr(username, clustersize) gateway = self.networkmgr.get_usergw(username) logger.info('create cluster with gateway : %s' % gateway) self.networkmgr.printpools() if not status: logger.info('create cluster failed: %s' % result) return [False, result] ips = result self.clusterid_locks.acquire() clusterid = self.etcd.getkey('vcluster/nextid')[1] self.etcd.setkey('vcluster/nextid', str(int(clusterid) + 1)) self.clusterid_locks.release() clusterid = int(clusterid) clusterpath = self.fspath + '/global/users/' + username + '/clusters/' + clustername hostpath = self.fspath + '/global/users/' + username + '/hosts/' + str(clusterid) + '.hosts' hosts = '127.0.0.1\tlocalhost\n' proxy_server_ip = '' proxy_public_ip = '' containers = [] for i in range(0, clustersize): workerip = workers[random.randint(0, len(workers) - 1)] oneworker = self.nodemgr.ip_to_rpc(workerip) if self.distributedgw == 'True' and i == 0 and (not self.networkmgr.has_usrgw(username)): [success, message] = self.networkmgr.setup_usrgw(groupquota['input_rate_limit'], groupquota['output_rate_limit'], username, uid, self.nodemgr, workerip) if not success: return [False, message] if i == 0: self.networkmgr.load_usrgw(username) proxy_server_ip = self.networkmgr.usrgws[username] [status, proxy_public_ip] = self.etcd.getkey('machines/publicIP/' + proxy_server_ip) if not status: logger.error('Fail to get proxy_public_ip %s.' % proxy_server_ip) return [False, 'Fail to get proxy server public IP.'] lxc_name = username + '-' + str(clusterid) + '-' + str(i) hostname = 'host-' + str(i) logger.info('create container with : name-%s, username-%s, clustername-%s, clusterid-%s, hostname-%s, ip-%s, gateway-%s, image-%s' % (lxc_name, username, clustername, str(clusterid), hostname, ips[i], gateway, image_json)) [success, message] = oneworker.create_container(lxc_name, proxy_public_ip, username, uid, json.dumps(setting), clustername, str(clusterid), str(i), hostname, ips[i], gateway, image_json) if success is False: self.networkmgr.release_userips(username, ips[i]) logger.info('container create failed, so vcluster create failed') return [False, message] logger.info('container create success') hosts = hosts + ips[i].split('/')[0] + '\t' + hostname + '\t' + hostname + '.' + clustername + '\n' containers.append(Container(lxc_name, hostname, ips[i], workerip, image['name'], datetime.datetime.now(), setting)) hostfile = open(hostpath, 'w') hostfile.write(hosts) hostfile.close() vcluster = VCluster(clusterid, clustername, username, 'stopped', clustersize, clustersize, proxy_server_ip, proxy_public_ip) for con in containers: vcluster.containers.append(con) db.session.add(vcluster) try: db.session.commit() except Exception as err: db.session.rollback() logger.error(traceback.format_exc()) return False return True return [True, str(vcluster)]
docklet
positive
def get_module_authors(self): """Grep the authors out of the module docstrings""" if 'author' in self.docs or 'authors' in self.docs: _authors = self.docs.get('author') or self.docs.get('authors') if _authors is None: return [] if not isinstance(_authors, list): _authors = [_authors] logins = set() for author in _authors: <DeepExtract> if author is None: _logins = [] authors = set() if author is None: _logins = authors if 'ansible core team' in author.lower(): authors.add('ansible') elif '@' in author: authors.update(re.findall('(?<!\\w)@([\\w-]+)(?![\\w.])', author)) elif 'github.com/' in author: idx = author.find('github.com/') author = author[idx + 11:] authors.add(author.replace(')', '')) elif '(' in author and len(author.split()) == 3: idx = author.find('(') author = author[idx + 1:] authors.add(author.replace(')', '')) for email in re.findall('[<(]([^@]+@[^)>]+)[)>]', author): github_id = self.email_cache.get(email) if github_id: authors.add(github_id) _logins = list(authors) </DeepExtract> if _logins: logins = logins.union(_logins) return list(logins) else: return []
def get_module_authors(self): """Grep the authors out of the module docstrings""" if 'author' in self.docs or 'authors' in self.docs: _authors = self.docs.get('author') or self.docs.get('authors') if _authors is None: return [] if not isinstance(_authors, list): _authors = [_authors] logins = set() for author in _authors: if author is None: _logins = [] authors = set() if author is None: _logins = authors if 'ansible core team' in author.lower(): authors.add('ansible') elif '@' in author: authors.update(re.findall('(?<!\\w)@([\\w-]+)(?![\\w.])', author)) elif 'github.com/' in author: idx = author.find('github.com/') author = author[idx + 11:] authors.add(author.replace(')', '')) elif '(' in author and len(author.split()) == 3: idx = author.find('(') author = author[idx + 1:] authors.add(author.replace(')', '')) for email in re.findall('[<(]([^@]+@[^)>]+)[)>]', author): github_id = self.email_cache.get(email) if github_id: authors.add(github_id) _logins = list(authors) if _logins: logins = logins.union(_logins) return list(logins) else: return []
ansibullbot
positive
def main(): parser = argparse.ArgumentParser(description='Converts prodigal GFF to canonical GFF3') parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read') parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created') args = parser.parse_args() ofh = open(args.output_file, 'wt') ofh.write('##gff-version 3\n') for line in open(args.input_file): if line.startswith('#'): m = re.search('gff-version', line) if not m: ofh.write(line) continue cols = line.split('\t') if len(cols) == 9: m = re.match('ID=(.+?);(.+)', cols[8]) (id, annotation) = m.groups() <DeepExtract> cols[2] = 'gene' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0};{1}'.format(id, annotation))) </DeepExtract> <DeepExtract> cols[2] = 'mRNA' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_mRNA;Parent={0}'.format(id))) </DeepExtract> <DeepExtract> cols[2] = 'CDS' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_cds;Parent={0}_mRNA'.format(id))) </DeepExtract> <DeepExtract> cols[2] = 'exon' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_exon;Parent={0}_mRNA'.format(id))) </DeepExtract> <DeepExtract> cols[2] = 'polypeptide' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_polypeptide;Parent={0}_mRNA'.format(id))) </DeepExtract>
def main(): parser = argparse.ArgumentParser(description='Converts prodigal GFF to canonical GFF3') parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input file to be read') parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created') args = parser.parse_args() ofh = open(args.output_file, 'wt') ofh.write('##gff-version 3\n') for line in open(args.input_file): if line.startswith('#'): m = re.search('gff-version', line) if not m: ofh.write(line) continue cols = line.split('\t') if len(cols) == 9: m = re.match('ID=(.+?);(.+)', cols[8]) (id, annotation) = m.groups() cols[2] = 'gene' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0};{1}'.format(id, annotation))) cols[2] = 'mRNA' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_mRNA;Parent={0}'.format(id))) cols[2] = 'CDS' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_cds;Parent={0}_mRNA'.format(id))) cols[2] = 'exon' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_exon;Parent={0}_mRNA'.format(id))) cols[2] = 'polypeptide' ofh.write('\t'.join(cols[:-1]) + '\t{0}\n'.format('ID={0}_polypeptide;Parent={0}_mRNA'.format(id))) </DeepExtract>
biocode
positive
@property def _shaft(self): span = XY(self.span_width, self.span_height) _dir = self.edge.direction <DeepExtract> renderer = noderenderer.get(self.edge.node1.shape) if hasattr(renderer, 'render'): node1 = renderer(self.edge.node1, self) else: node1 = self.cell(self.edge.node1) </DeepExtract> <DeepExtract> cell1 = self.spreadsheet.node(self.edge.node1, False) </DeepExtract> <DeepExtract> renderer = noderenderer.get(self.edge.node2.shape) if hasattr(renderer, 'render'): node2 = renderer(self.edge.node2, self) else: node2 = self.cell(self.edge.node2) </DeepExtract> <DeepExtract> cell2 = self.spreadsheet.node(self.edge.node2, False) </DeepExtract> shaft = EdgeLines() if _dir == 'right': shaft.moveTo(node1.right) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell1.bottomright.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.bottomright.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'right-up': shaft.moveTo(node1.right) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.bottomleft.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.bottomleft.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) else: shaft.lineTo(cell2.left.x - span.x // 4, cell1.right.y) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'right-down': shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell2.topleft.y - span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.topleft.y - span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) else: shaft.lineTo(cell1.right.x + span.x // 2, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'up': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 4, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 4, cell2.bottom.y + span.y // 2) shaft.lineTo(cell2.bottom.x, cell2.bottom.y + span.y // 2) else: shaft.moveTo(node1.top) shaft.lineTo(node2.bottom) elif _dir in ('left-up', 'left', 'same'): shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 4, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 4, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(node2.top) elif _dir == 'left-down': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.top.y - span.y // 2) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2) else: shaft.moveTo(node1.bottom) shaft.lineTo(cell1.bottom.x, cell2.top.y - span.y // 2) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2) shaft.lineTo(node2.top) elif _dir == 'down': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2 + span.y // 8) else: shaft.moveTo(node1.bottom) shaft.lineTo(node2.top) return shaft
@property def _shaft(self): span = XY(self.span_width, self.span_height) _dir = self.edge.direction renderer = noderenderer.get(self.edge.node1.shape) if hasattr(renderer, 'render'): node1 = renderer(self.edge.node1, self) else: node1 = self.cell(self.edge.node1) cell1 = self.spreadsheet.node(self.edge.node1, False) renderer = noderenderer.get(self.edge.node2.shape) if hasattr(renderer, 'render'): node2 = renderer(self.edge.node2, self) else: node2 = self.cell(self.edge.node2) cell2 = self.spreadsheet.node(self.edge.node2, False) shaft = EdgeLines() if _dir == 'right': shaft.moveTo(node1.right) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell1.bottomright.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.bottomright.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'right-up': shaft.moveTo(node1.right) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.bottomleft.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.bottomleft.y + span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) else: shaft.lineTo(cell2.left.x - span.x // 4, cell1.right.y) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'right-down': shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) if self.edge.skipped: shaft.lineTo(cell1.right.x + span.x // 2, cell2.topleft.y - span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.topleft.y - span.y // 2) shaft.lineTo(cell2.left.x - span.x // 4, cell2.left.y) else: shaft.lineTo(cell1.right.x + span.x // 2, cell2.left.y) shaft.lineTo(node2.left) elif _dir == 'up': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 4, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 4, cell2.bottom.y + span.y // 2) shaft.lineTo(cell2.bottom.x, cell2.bottom.y + span.y // 2) else: shaft.moveTo(node1.top) shaft.lineTo(node2.bottom) elif _dir in ('left-up', 'left', 'same'): shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 4, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 4, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(node2.top) elif _dir == 'left-down': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.top.y - span.y // 2) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2) else: shaft.moveTo(node1.bottom) shaft.lineTo(cell1.bottom.x, cell2.top.y - span.y // 2) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2) shaft.lineTo(node2.top) elif _dir == 'down': if self.edge.skipped: shaft.moveTo(node1.right) shaft.lineTo(cell1.right.x + span.x // 2, cell1.right.y) shaft.lineTo(cell1.right.x + span.x // 2, cell2.top.y - span.y // 2 + span.y // 8) shaft.lineTo(cell2.top.x, cell2.top.y - span.y // 2 + span.y // 8) else: shaft.moveTo(node1.bottom) shaft.lineTo(node2.top) return shaft
blockdiag
positive
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 >>> v0[3] = 1 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) <DeepExtract> if out is None: normal[:3] = numpy.array(normal[:3], dtype=numpy.float64, copy=True) if normal[:3].ndim == 1: normal[:3] /= math.sqrt(numpy.dot(normal[:3], normal[:3])) normal = normal[:3] else: if out is not normal[:3]: out[:] = numpy.array(normal[:3], copy=False) normal[:3] = out length = numpy.atleast_1d(numpy.sum(normal[:3] * normal[:3], axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) normal[:3] /= length if out is None: normal = normal[:3] </DeepExtract> if perspective is not None: perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective - point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective + normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) True >>> point = numpy.random.random(3) - 0.5 >>> normal = numpy.random.random(3) - 0.5 >>> direct = numpy.random.random(3) - 0.5 >>> persp = numpy.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, numpy.dot(P0, P3)) True >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 >>> v0[3] = 1 >>> v1 = numpy.dot(P, v0) >>> numpy.allclose(v1[1], v0[1]) True >>> numpy.allclose(v1[0], 3-v1[1]) True """ M = numpy.identity(4) point = numpy.array(point[:3], dtype=numpy.float64, copy=False) if out is None: normal[:3] = numpy.array(normal[:3], dtype=numpy.float64, copy=True) if normal[:3].ndim == 1: normal[:3] /= math.sqrt(numpy.dot(normal[:3], normal[:3])) normal = normal[:3] else: if out is not normal[:3]: out[:] = numpy.array(normal[:3], copy=False) normal[:3] = out length = numpy.atleast_1d(numpy.sum(normal[:3] * normal[:3], axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) normal[:3] /= length if out is None: normal = normal[:3] if perspective is not None: perspective = numpy.array(perspective[:3], dtype=numpy.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective - point, normal) M[:3, :3] -= numpy.outer(perspective, normal) if pseudo: M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * (perspective + normal) else: M[:3, 3] = numpy.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = numpy.dot(perspective, normal) elif direction is not None: direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) scale = numpy.dot(direction, normal) M[:3, :3] -= numpy.outer(direction, normal) / scale M[:3, 3] = direction * (numpy.dot(point, normal) / scale) else: M[:3, :3] -= numpy.outer(normal, normal) M[:3, 3] = numpy.dot(point, normal) * normal return M
deformation_aware_embedding
positive
def test_confirmed_and_non_collaborative(self): <DeepExtract> self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=collaborative) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) </DeepExtract> <DeepExtract> file = export_dataset(self.project.id, 'JSONL', True) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) datasets = dataset </DeepExtract> expected_datasets = {self.project.admin.username: [{**self.data1, 'label': [self.category1.to_string()], 'Comments': [self.comment1.to_string()]}]} for (username, dataset) in expected_datasets.items(): self.assertEqual(datasets[username], dataset)
def test_confirmed_and_non_collaborative(self): self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=collaborative) self.example1 = mommy.make('ExportedExample', project=self.project.item, text='example1') self.example2 = mommy.make('ExportedExample', project=self.project.item, text='example2') self.category1 = mommy.make('ExportedCategory', example=self.example1, user=self.project.admin) self.category2 = mommy.make('ExportedCategory', example=self.example1, user=self.project.annotator) self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin) self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator) mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin) self.data1 = self.data_to_text(self.example1) self.data2 = self.data_to_text(self.example2) file = export_dataset(self.project.id, 'JSONL', True) if self.project.item.collaborative_annotation: dataset = pd.read_json(file, lines=True).to_dict(orient='records') else: dataset = read_zip_content(file) os.remove(file) datasets = dataset expected_datasets = {self.project.admin.username: [{**self.data1, 'label': [self.category1.to_string()], 'Comments': [self.comment1.to_string()]}]} for (username, dataset) in expected_datasets.items(): self.assertEqual(datasets[username], dataset)
doccano
positive
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=nn.BatchNorm2d): super(ResNet, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) <DeepExtract> downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 64 * block.expansion, stride), norm_layer(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for _ in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 128 * block.expansion, 2), norm_layer(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for _ in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 256 * block.expansion, 2), norm_layer(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for _ in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) </DeepExtract> <DeepExtract> downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 512 * block.expansion, 2), norm_layer(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for _ in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) </DeepExtract> self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0)
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=nn.BatchNorm2d): super(ResNet, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) downsample = None if stride != 1 or self.inplanes != 64 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 64 * block.expansion, stride), norm_layer(64 * block.expansion)) layers = [] layers.append(block(self.inplanes, 64, stride, downsample)) self.inplanes = 64 * block.expansion for _ in range(1, layers[0]): layers.append(block(self.inplanes, 64)) self.layer1 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 128 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 128 * block.expansion, 2), norm_layer(128 * block.expansion)) layers = [] layers.append(block(self.inplanes, 128, 2, downsample)) self.inplanes = 128 * block.expansion for _ in range(1, layers[1]): layers.append(block(self.inplanes, 128)) self.layer2 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 256 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 256 * block.expansion, 2), norm_layer(256 * block.expansion)) layers = [] layers.append(block(self.inplanes, 256, 2, downsample)) self.inplanes = 256 * block.expansion for _ in range(1, layers[2]): layers.append(block(self.inplanes, 256)) self.layer3 = nn.Sequential(*layers) downsample = None if 2 != 1 or self.inplanes != 512 * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, 512 * block.expansion, 2), norm_layer(512 * block.expansion)) layers = [] layers.append(block(self.inplanes, 512, 2, downsample)) self.inplanes = 512 * block.expansion for _ in range(1, layers[3]): layers.append(block(self.inplanes, 512)) self.layer4 = nn.Sequential(*layers) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0)
awesome-semantic-segmentation-pytorch
positive
def get_historical(self, asset, quote, timestamp, no_cache=False): name = None for data_source in self.data_source_priority(asset): <DeepExtract> if data_source.upper() in self.data_sources: if asset in self.data_sources[data_source.upper()].assets: date = timestamp.strftime('%Y-%m-%d') pair = asset + '/' + quote if not no_cache: if pair in self.data_sources[data_source.upper()].prices and date in self.data_sources[data_source.upper()].prices[pair]: (price, name, url) = (self.data_sources[data_source.upper()].prices[pair][date]['price'], self.data_sources[data_source.upper()].assets[asset]['name'], self.data_sources[data_source.upper()].prices[pair][date]['url']) self.data_sources[data_source.upper()].get_historical(asset, quote, timestamp) if pair in self.data_sources[data_source.upper()].prices and date in self.data_sources[data_source.upper()].prices[pair]: (price, name, url) = (self.data_sources[data_source.upper()].prices[pair][date]['price'], self.data_sources[data_source.upper()].assets[asset]['name'], self.data_sources[data_source.upper()].prices[pair][date]['url']) (price, name, url) = (None, self.data_sources[data_source.upper()].assets[asset]['name'], None) (price, name, url) = (None, None, None) raise UnexpectedDataSourceError(data_source, DataSourceBase) </DeepExtract> if price is not None: if config.debug: print('%sprice: %s, 1 %s=%s %s via %s (%s)' % (Fore.YELLOW, timestamp.strftime('%Y-%m-%d'), asset, '{:0,f}'.format(price.normalize()), quote, self.data_sources[data_source.upper()].name(), name)) if self.price_tool: print('%s1 %s=%s %s %svia %s (%s)' % (Fore.YELLOW, asset, '{:0,f}'.format(price.normalize()), quote, Fore.CYAN, self.data_sources[data_source.upper()].name(), name)) return (price, name, self.data_sources[data_source.upper()].name(), url) return (None, name, None, None)
def get_historical(self, asset, quote, timestamp, no_cache=False): name = None for data_source in self.data_source_priority(asset): if data_source.upper() in self.data_sources: if asset in self.data_sources[data_source.upper()].assets: date = timestamp.strftime('%Y-%m-%d') pair = asset + '/' + quote if not no_cache: if pair in self.data_sources[data_source.upper()].prices and date in self.data_sources[data_source.upper()].prices[pair]: (price, name, url) = (self.data_sources[data_source.upper()].prices[pair][date]['price'], self.data_sources[data_source.upper()].assets[asset]['name'], self.data_sources[data_source.upper()].prices[pair][date]['url']) self.data_sources[data_source.upper()].get_historical(asset, quote, timestamp) if pair in self.data_sources[data_source.upper()].prices and date in self.data_sources[data_source.upper()].prices[pair]: (price, name, url) = (self.data_sources[data_source.upper()].prices[pair][date]['price'], self.data_sources[data_source.upper()].assets[asset]['name'], self.data_sources[data_source.upper()].prices[pair][date]['url']) (price, name, url) = (None, self.data_sources[data_source.upper()].assets[asset]['name'], None) (price, name, url) = (None, None, None) raise UnexpectedDataSourceError(data_source, DataSourceBase) if price is not None: if config.debug: print('%sprice: %s, 1 %s=%s %s via %s (%s)' % (Fore.YELLOW, timestamp.strftime('%Y-%m-%d'), asset, '{:0,f}'.format(price.normalize()), quote, self.data_sources[data_source.upper()].name(), name)) if self.price_tool: print('%s1 %s=%s %s %svia %s (%s)' % (Fore.YELLOW, asset, '{:0,f}'.format(price.normalize()), quote, Fore.CYAN, self.data_sources[data_source.upper()].name(), name)) return (price, name, self.data_sources[data_source.upper()].name(), url) return (None, name, None, None)
BittyTax
positive
def migrate_state(self, user_model, entity_slug: str, itemtxs_qs: QuerySet=None, force_migrate: bool=False, commit: bool=True, void: bool=False, je_date: Union[str, date, datetime]=None, verify_journal_entries: bool=True, raise_exception: bool=True, **kwargs): if self.can_migrate() or force_migrate: (txs_qs, txs_digest) = self.ledger.digest(user_model=user_model, entity_slug=entity_slug, process_groups=True, process_roles=False, process_ratios=False, signs=False, by_unit=True) digest_data = txs_digest['tx_digest']['accounts'] current_ledger_state = {(a['account_uuid'], a['unit_uuid'], a['balance_type']): a['balance'] for a in digest_data} item_data = list(self.get_migration_data(queryset=itemtxs_qs)) cogs_adjustment = defaultdict(lambda : Decimal('0.00')) inventory_adjustment = defaultdict(lambda : Decimal('0.00')) <DeepExtract> if self.accrue: progress = self.progress if not self.amount_due: progress = 0 progress = (self.amount_paid or 0) / self.amount_due </DeepExtract> if isinstance(self, lazy_loader.get_bill_model()): for item in item_data: account_uuid_expense = item.get('item_model__expense_account__uuid') account_uuid_inventory = item.get('item_model__inventory_account__uuid') if account_uuid_expense: item['account_uuid'] = account_uuid_expense item['account_balance_type'] = item.get('item_model__expense_account__balance_type') elif account_uuid_inventory: item['account_uuid'] = account_uuid_inventory item['account_balance_type'] = item.get('item_model__inventory_account__balance_type') elif isinstance(self, lazy_loader.get_invoice_model()): for item in item_data: account_uuid_earnings = item.get('item_model__earnings_account__uuid') account_uuid_cogs = item.get('item_model__cogs_account__uuid') account_uuid_inventory = item.get('item_model__inventory_account__uuid') if account_uuid_earnings: item['account_uuid'] = account_uuid_earnings item['account_balance_type'] = item.get('item_model__earnings_account__balance_type') if account_uuid_cogs and account_uuid_inventory: try: irq = item.get('item_model__inventory_received') irv = item.get('item_model__inventory_received_value') tot_amt = 0 if irq is not None and irv is not None and (irq != 0): qty = item.get('quantity', Decimal('0.00')) if not isinstance(qty, Decimal): qty = Decimal.from_float(qty) cogs_unit_cost = irv / irq tot_amt = round(cogs_unit_cost * qty, 2) except ZeroDivisionError: tot_amt = 0 if tot_amt != 0: cogs_adjustment[account_uuid_cogs, item.get('entity_unit__uuid'), item.get('item_model__cogs_account__balance_type')] += tot_amt * progress inventory_adjustment[account_uuid_inventory, item.get('entity_unit__uuid'), item.get('item_model__inventory_account__balance_type')] -= tot_amt * progress item_data_gb = groupby(item_data, key=lambda a: (a['account_uuid'], a['entity_unit__uuid'], a['account_balance_type'])) progress_item_idx = {idx: round(sum((a['account_unit_total'] for a in ad)) * progress, 2) for (idx, ad) in item_data_gb} ua_gen = list(((k[1], v) for (k, v) in progress_item_idx.items())) ua_gen.sort(key=lambda a: str(a[0]) if a[0] else '') unit_amounts = {u: sum((a[1] for a in l)) for (u, l) in groupby(ua_gen, key=lambda x: x[0])} total_amount = sum(unit_amounts.values()) unit_percents = {k: v / total_amount if progress and total_amount else Decimal('0.00') for (k, v) in unit_amounts.items()} if not void: <DeepExtract> new_state = {'amount_paid': self.get_amount_cash(), 'amount_receivable': self.get_amount_prepaid(), 'amount_unearned': self.get_amount_unearned(), 'amount_earned': self.get_amount_earned()} if commit: self.update_state(new_state) new_state = new_state </DeepExtract> else: <DeepExtract> void_state = {'amount_paid': Decimal.from_float(0.0), 'amount_receivable': Decimal.from_float(0.0), 'amount_unearned': Decimal.from_float(0.0), 'amount_earned': Decimal.from_float(0.0)} if commit: self.update_state(void_state) new_state = void_state </DeepExtract> <DeepExtract> running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.cash_account_id, u, 'debit'] = new_state['amount_paid'] - running_alloc else: alloc = round(p * new_state['amount_paid'], 2) split_results[self.cash_account_id, u, 'debit'] = alloc running_alloc += alloc amount_paid_split = split_results </DeepExtract> <DeepExtract> running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.prepaid_account_id, u, 'debit'] = new_state['amount_receivable'] - running_alloc else: alloc = round(p * new_state['amount_receivable'], 2) split_results[self.prepaid_account_id, u, 'debit'] = alloc running_alloc += alloc amount_prepaid_split = split_results </DeepExtract> <DeepExtract> running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.unearned_account_id, u, 'credit'] = new_state['amount_unearned'] - running_alloc else: alloc = round(p * new_state['amount_unearned'], 2) split_results[self.unearned_account_id, u, 'credit'] = alloc running_alloc += alloc amount_unearned_split = split_results </DeepExtract> new_ledger_state = dict() new_ledger_state.update(amount_paid_split) new_ledger_state.update(amount_prepaid_split) new_ledger_state.update(amount_unearned_split) if inventory_adjustment and cogs_adjustment: new_ledger_state.update(cogs_adjustment) new_ledger_state.update(inventory_adjustment) new_ledger_state.update(progress_item_idx) idx_keys = set(list(current_ledger_state) + list(new_ledger_state)) diff_idx = {k: new_ledger_state.get(k, Decimal('0.00')) - current_ledger_state.get(k, Decimal('0.00')) for k in idx_keys} diff_idx = {k: v for (k, v) in diff_idx.items() if v} if commit: JournalEntryModel = lazy_loader.get_journal_entry_model() TransactionModel = lazy_loader.get_transaction_model() unit_uuids = list(set((k[1] for k in idx_keys))) if je_date: je_date = validate_io_date(dt=je_date) now_timestamp = localtime() if not je_date else je_date je_list = {u: JournalEntryModel(entity_unit_id=u, timestamp=now_timestamp, description=self.get_migrate_state_desc(), origin='migration', ledger_id=self.ledger_id) for u in unit_uuids} for (u, je) in je_list.items(): je.clean(verify=False) txs_list = [(unit_uuid, TransactionModel(journal_entry=je_list.get(unit_uuid), amount=abs(round(amt, 2)), tx_type=self.get_tx_type(acc_bal_type=bal_type, adjustment_amount=amt), account_id=acc_uuid, description=self.get_migrate_state_desc())) for ((acc_uuid, unit_uuid, bal_type), amt) in diff_idx.items() if amt] for (unit_uuid, tx) in txs_list: tx.clean() for uid in unit_uuids: balance_tx_data(tx_data=[tx for (ui, tx) in txs_list if uid == ui], perform_correction=True) txs = [tx for (ui, tx) in txs_list] balance_tx_data(tx_data=txs, perform_correction=True) TransactionModel.objects.bulk_create(txs) if verify_journal_entries: for (_, je) in je_list.items(): je.clean(verify=True) if je.is_verified(): je.mark_as_posted(commit=False, verify=False, raise_exception=True) je.mark_as_locked(commit=False, raise_exception=True) if all([je.is_verified() for (_, je) in je_list.items()]): JournalEntryModel.objects.bulk_update(objs=[je for (_, je) in je_list.items()], fields=['posted', 'locked', 'activity']) return (item_data, digest_data) elif raise_exception: raise ValidationError(f'{self.REL_NAME_PREFIX.upper()} state migration not allowed')
def migrate_state(self, user_model, entity_slug: str, itemtxs_qs: QuerySet=None, force_migrate: bool=False, commit: bool=True, void: bool=False, je_date: Union[str, date, datetime]=None, verify_journal_entries: bool=True, raise_exception: bool=True, **kwargs): if self.can_migrate() or force_migrate: (txs_qs, txs_digest) = self.ledger.digest(user_model=user_model, entity_slug=entity_slug, process_groups=True, process_roles=False, process_ratios=False, signs=False, by_unit=True) digest_data = txs_digest['tx_digest']['accounts'] current_ledger_state = {(a['account_uuid'], a['unit_uuid'], a['balance_type']): a['balance'] for a in digest_data} item_data = list(self.get_migration_data(queryset=itemtxs_qs)) cogs_adjustment = defaultdict(lambda : Decimal('0.00')) inventory_adjustment = defaultdict(lambda : Decimal('0.00')) if self.accrue: progress = self.progress if not self.amount_due: progress = 0 progress = (self.amount_paid or 0) / self.amount_due if isinstance(self, lazy_loader.get_bill_model()): for item in item_data: account_uuid_expense = item.get('item_model__expense_account__uuid') account_uuid_inventory = item.get('item_model__inventory_account__uuid') if account_uuid_expense: item['account_uuid'] = account_uuid_expense item['account_balance_type'] = item.get('item_model__expense_account__balance_type') elif account_uuid_inventory: item['account_uuid'] = account_uuid_inventory item['account_balance_type'] = item.get('item_model__inventory_account__balance_type') elif isinstance(self, lazy_loader.get_invoice_model()): for item in item_data: account_uuid_earnings = item.get('item_model__earnings_account__uuid') account_uuid_cogs = item.get('item_model__cogs_account__uuid') account_uuid_inventory = item.get('item_model__inventory_account__uuid') if account_uuid_earnings: item['account_uuid'] = account_uuid_earnings item['account_balance_type'] = item.get('item_model__earnings_account__balance_type') if account_uuid_cogs and account_uuid_inventory: try: irq = item.get('item_model__inventory_received') irv = item.get('item_model__inventory_received_value') tot_amt = 0 if irq is not None and irv is not None and (irq != 0): qty = item.get('quantity', Decimal('0.00')) if not isinstance(qty, Decimal): qty = Decimal.from_float(qty) cogs_unit_cost = irv / irq tot_amt = round(cogs_unit_cost * qty, 2) except ZeroDivisionError: tot_amt = 0 if tot_amt != 0: cogs_adjustment[account_uuid_cogs, item.get('entity_unit__uuid'), item.get('item_model__cogs_account__balance_type')] += tot_amt * progress inventory_adjustment[account_uuid_inventory, item.get('entity_unit__uuid'), item.get('item_model__inventory_account__balance_type')] -= tot_amt * progress item_data_gb = groupby(item_data, key=lambda a: (a['account_uuid'], a['entity_unit__uuid'], a['account_balance_type'])) progress_item_idx = {idx: round(sum((a['account_unit_total'] for a in ad)) * progress, 2) for (idx, ad) in item_data_gb} ua_gen = list(((k[1], v) for (k, v) in progress_item_idx.items())) ua_gen.sort(key=lambda a: str(a[0]) if a[0] else '') unit_amounts = {u: sum((a[1] for a in l)) for (u, l) in groupby(ua_gen, key=lambda x: x[0])} total_amount = sum(unit_amounts.values()) unit_percents = {k: v / total_amount if progress and total_amount else Decimal('0.00') for (k, v) in unit_amounts.items()} if not void: new_state = {'amount_paid': self.get_amount_cash(), 'amount_receivable': self.get_amount_prepaid(), 'amount_unearned': self.get_amount_unearned(), 'amount_earned': self.get_amount_earned()} if commit: self.update_state(new_state) new_state = new_state else: void_state = {'amount_paid': Decimal.from_float(0.0), 'amount_receivable': Decimal.from_float(0.0), 'amount_unearned': Decimal.from_float(0.0), 'amount_earned': Decimal.from_float(0.0)} if commit: self.update_state(void_state) new_state = void_state running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.cash_account_id, u, 'debit'] = new_state['amount_paid'] - running_alloc else: alloc = round(p * new_state['amount_paid'], 2) split_results[self.cash_account_id, u, 'debit'] = alloc running_alloc += alloc amount_paid_split = split_results running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.prepaid_account_id, u, 'debit'] = new_state['amount_receivable'] - running_alloc else: alloc = round(p * new_state['amount_receivable'], 2) split_results[self.prepaid_account_id, u, 'debit'] = alloc running_alloc += alloc amount_prepaid_split = split_results running_alloc = 0 SPLIT_LEN = len(unit_percents) - 1 split_results = dict() for (i, (u, p)) in enumerate(unit_percents.items()): if i == SPLIT_LEN: split_results[self.unearned_account_id, u, 'credit'] = new_state['amount_unearned'] - running_alloc else: alloc = round(p * new_state['amount_unearned'], 2) split_results[self.unearned_account_id, u, 'credit'] = alloc running_alloc += alloc amount_unearned_split = split_results new_ledger_state = dict() new_ledger_state.update(amount_paid_split) new_ledger_state.update(amount_prepaid_split) new_ledger_state.update(amount_unearned_split) if inventory_adjustment and cogs_adjustment: new_ledger_state.update(cogs_adjustment) new_ledger_state.update(inventory_adjustment) new_ledger_state.update(progress_item_idx) idx_keys = set(list(current_ledger_state) + list(new_ledger_state)) diff_idx = {k: new_ledger_state.get(k, Decimal('0.00')) - current_ledger_state.get(k, Decimal('0.00')) for k in idx_keys} diff_idx = {k: v for (k, v) in diff_idx.items() if v} if commit: JournalEntryModel = lazy_loader.get_journal_entry_model() TransactionModel = lazy_loader.get_transaction_model() unit_uuids = list(set((k[1] for k in idx_keys))) if je_date: je_date = validate_io_date(dt=je_date) now_timestamp = localtime() if not je_date else je_date je_list = {u: JournalEntryModel(entity_unit_id=u, timestamp=now_timestamp, description=self.get_migrate_state_desc(), origin='migration', ledger_id=self.ledger_id) for u in unit_uuids} for (u, je) in je_list.items(): je.clean(verify=False) txs_list = [(unit_uuid, TransactionModel(journal_entry=je_list.get(unit_uuid), amount=abs(round(amt, 2)), tx_type=self.get_tx_type(acc_bal_type=bal_type, adjustment_amount=amt), account_id=acc_uuid, description=self.get_migrate_state_desc())) for ((acc_uuid, unit_uuid, bal_type), amt) in diff_idx.items() if amt] for (unit_uuid, tx) in txs_list: tx.clean() for uid in unit_uuids: balance_tx_data(tx_data=[tx for (ui, tx) in txs_list if uid == ui], perform_correction=True) txs = [tx for (ui, tx) in txs_list] balance_tx_data(tx_data=txs, perform_correction=True) TransactionModel.objects.bulk_create(txs) if verify_journal_entries: for (_, je) in je_list.items(): je.clean(verify=True) if je.is_verified(): je.mark_as_posted(commit=False, verify=False, raise_exception=True) je.mark_as_locked(commit=False, raise_exception=True) if all([je.is_verified() for (_, je) in je_list.items()]): JournalEntryModel.objects.bulk_update(objs=[je for (_, je) in je_list.items()], fields=['posted', 'locked', 'activity']) return (item_data, digest_data) elif raise_exception: raise ValidationError(f'{self.REL_NAME_PREFIX.upper()} state migration not allowed')
django-ledger
positive
def __init__(self, optimizer, swa_start=None, swa_freq=None, swa_lr=None): """Implements Stochastic Weight Averaging (SWA). Stochastic Weight Averaging was proposed in `Averaging Weights Leads to Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson (UAI 2018). SWA is implemented as a wrapper class taking optimizer instance as input and applying SWA on top of that optimizer. SWA can be used in two modes: automatic and manual. In the automatic mode SWA running averages are automatically updated every :attr:`swa_freq` steps after :attr:`swa_start` steps of optimization. If :attr:`swa_lr` is provided, the learning rate of the optimizer is reset to :attr:`swa_lr` at every step starting from :attr:`swa_start`. To use SWA in automatic mode provide values for both :attr:`swa_start` and :attr:`swa_freq` arguments. Alternatively, in the manual mode, use :meth:`update_swa` or :meth:`update_swa_group` methods to update the SWA running averages. In the end of training use `swap_swa_sgd` method to set the optimized variables to the computed averages. Args: optimizer (torch.optim.Optimizer): optimizer to use with SWA swa_start (int): number of steps before starting to apply SWA in automatic mode; if None, manual mode is selected (default: None) swa_freq (int): number of steps between subsequent updates of SWA running averages in automatic mode; if None, manual mode is selected (default: None) swa_lr (float): learning rate to use starting from step swa_start in automatic mode; if None, learning rate is not changed (default: None) Examples: >>> # automatic mode >>> base_opt = torch.optim.SGD(model.parameters(), lr=0.1) >>> opt = torchcontrib.optim.SWA( >>> base_opt, swa_start=10, swa_freq=5, swa_lr=0.05) >>> for _ in range(100): >>> opt.zero_grad() >>> loss_fn(model(input), target).backward() >>> opt.step() >>> opt.swap_swa_sgd() >>> # manual mode >>> opt = torchcontrib.optim.SWA(base_opt) >>> for i in range(100): >>> opt.zero_grad() >>> loss_fn(model(input), target).backward() >>> opt.step() >>> if i > 10 and i % 5 == 0: >>> opt.update_swa() >>> opt.swap_swa_sgd() .. note:: SWA does not support parameter-specific values of :attr:`swa_start`, :attr:`swa_freq` or :attr:`swa_lr`. In automatic mode SWA uses the same :attr:`swa_start`, :attr:`swa_freq` and :attr:`swa_lr` for all parameter groups. If needed, use manual mode with :meth:`update_swa_group` to use different update schedules for different parameter groups. .. note:: Call :meth:`swap_swa_sgd` in the end of training to use the computed running averages. .. note:: If you are using SWA to optimize the parameters of a Neural Network containing Batch Normalization layers, you need to update the :attr:`running_mean` and :attr:`running_var` statistics of the Batch Normalization module. You can do so by using `torchcontrib.optim.swa.bn_update` utility. .. note:: See the blogpost https://pytorch.org/blog/stochastic-weight-averaging-in-pytorch/ for an extended description of this SWA implementation. .. note:: The repo https://github.com/izmailovpavel/contrib_swa_examples contains examples of using this SWA implementation. .. _Averaging Weights Leads to Wider Optima and Better Generalization: https://arxiv.org/abs/1803.05407 .. _Improving Consistency-Based Semi-Supervised Learning with Weight Averaging: https://arxiv.org/abs/1806.05594 """ <DeepExtract> params = [self, swa_start] params_none = [param is None for param in params] if not all(params_none) and any(params_none): warnings.warn('Some of swa_start, swa_freq is None, ignoring other') for (i, param) in enumerate(params): if param is not None and (not isinstance(param, int)): params[i] = int(param) warnings.warn('Casting swa_start, swa_freq to int') (self._auto_mode, (self.swa_start, self.swa_freq)) = (not any(params_none), params) </DeepExtract> self.swa_lr = swa_lr if self._auto_mode: if swa_start < 0: raise ValueError('Invalid swa_start: {}'.format(swa_start)) if swa_freq < 1: raise ValueError('Invalid swa_freq: {}'.format(swa_freq)) else: if self.swa_lr is not None: warnings.warn('Some of swa_start, swa_freq is None, ignoring swa_lr') self.swa_lr = None self.swa_start = None self.swa_freq = None if self.swa_lr is not None and self.swa_lr < 0: raise ValueError('Invalid SWA learning rate: {}'.format(swa_lr)) self.optimizer = optimizer self.defaults = self.optimizer.defaults self.param_groups = self.optimizer.param_groups self.state = defaultdict(dict) self.opt_state = self.optimizer.state for group in self.param_groups: group['n_avg'] = 0 group['step_counter'] = 0
def __init__(self, optimizer, swa_start=None, swa_freq=None, swa_lr=None): """Implements Stochastic Weight Averaging (SWA). Stochastic Weight Averaging was proposed in `Averaging Weights Leads to Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson (UAI 2018). SWA is implemented as a wrapper class taking optimizer instance as input and applying SWA on top of that optimizer. SWA can be used in two modes: automatic and manual. In the automatic mode SWA running averages are automatically updated every :attr:`swa_freq` steps after :attr:`swa_start` steps of optimization. If :attr:`swa_lr` is provided, the learning rate of the optimizer is reset to :attr:`swa_lr` at every step starting from :attr:`swa_start`. To use SWA in automatic mode provide values for both :attr:`swa_start` and :attr:`swa_freq` arguments. Alternatively, in the manual mode, use :meth:`update_swa` or :meth:`update_swa_group` methods to update the SWA running averages. In the end of training use `swap_swa_sgd` method to set the optimized variables to the computed averages. Args: optimizer (torch.optim.Optimizer): optimizer to use with SWA swa_start (int): number of steps before starting to apply SWA in automatic mode; if None, manual mode is selected (default: None) swa_freq (int): number of steps between subsequent updates of SWA running averages in automatic mode; if None, manual mode is selected (default: None) swa_lr (float): learning rate to use starting from step swa_start in automatic mode; if None, learning rate is not changed (default: None) Examples: >>> # automatic mode >>> base_opt = torch.optim.SGD(model.parameters(), lr=0.1) >>> opt = torchcontrib.optim.SWA( >>> base_opt, swa_start=10, swa_freq=5, swa_lr=0.05) >>> for _ in range(100): >>> opt.zero_grad() >>> loss_fn(model(input), target).backward() >>> opt.step() >>> opt.swap_swa_sgd() >>> # manual mode >>> opt = torchcontrib.optim.SWA(base_opt) >>> for i in range(100): >>> opt.zero_grad() >>> loss_fn(model(input), target).backward() >>> opt.step() >>> if i > 10 and i % 5 == 0: >>> opt.update_swa() >>> opt.swap_swa_sgd() .. note:: SWA does not support parameter-specific values of :attr:`swa_start`, :attr:`swa_freq` or :attr:`swa_lr`. In automatic mode SWA uses the same :attr:`swa_start`, :attr:`swa_freq` and :attr:`swa_lr` for all parameter groups. If needed, use manual mode with :meth:`update_swa_group` to use different update schedules for different parameter groups. .. note:: Call :meth:`swap_swa_sgd` in the end of training to use the computed running averages. .. note:: If you are using SWA to optimize the parameters of a Neural Network containing Batch Normalization layers, you need to update the :attr:`running_mean` and :attr:`running_var` statistics of the Batch Normalization module. You can do so by using `torchcontrib.optim.swa.bn_update` utility. .. note:: See the blogpost https://pytorch.org/blog/stochastic-weight-averaging-in-pytorch/ for an extended description of this SWA implementation. .. note:: The repo https://github.com/izmailovpavel/contrib_swa_examples contains examples of using this SWA implementation. .. _Averaging Weights Leads to Wider Optima and Better Generalization: https://arxiv.org/abs/1803.05407 .. _Improving Consistency-Based Semi-Supervised Learning with Weight Averaging: https://arxiv.org/abs/1806.05594 """ params = [self, swa_start] params_none = [param is None for param in params] if not all(params_none) and any(params_none): warnings.warn('Some of swa_start, swa_freq is None, ignoring other') for (i, param) in enumerate(params): if param is not None and (not isinstance(param, int)): params[i] = int(param) warnings.warn('Casting swa_start, swa_freq to int') (self._auto_mode, (self.swa_start, self.swa_freq)) = (not any(params_none), params) self.swa_lr = swa_lr if self._auto_mode: if swa_start < 0: raise ValueError('Invalid swa_start: {}'.format(swa_start)) if swa_freq < 1: raise ValueError('Invalid swa_freq: {}'.format(swa_freq)) else: if self.swa_lr is not None: warnings.warn('Some of swa_start, swa_freq is None, ignoring swa_lr') self.swa_lr = None self.swa_start = None self.swa_freq = None if self.swa_lr is not None and self.swa_lr < 0: raise ValueError('Invalid SWA learning rate: {}'.format(swa_lr)) self.optimizer = optimizer self.defaults = self.optimizer.defaults self.param_groups = self.optimizer.param_groups self.state = defaultdict(dict) self.opt_state = self.optimizer.state for group in self.param_groups: group['n_avg'] = 0 group['step_counter'] = 0
BERT-SDA
positive
def _set_ellipse(self, instance, value): super(CircularRippleBehavior, self)._set_ellipse(instance, value) if self.ellipse.size[0] > self.width * 0.6 and (not self.fading_out): <DeepExtract> rc = self.ripple_color if not self.fading_out: Animation.cancel_all(self, 'ripple_color') anim = Animation(ripple_color=[rc[0], rc[1], rc[2], 0.0], t=self.ripple_func_out, duration=self.ripple_duration_out) anim.bind(on_complete=self.anim_complete) self.fading_out = True anim.start(self) </DeepExtract> self.ellipse.pos = (self.center_x - self.ripple_rad / 2.0, self.center_y - self.ripple_rad / 2.0)
def _set_ellipse(self, instance, value): super(CircularRippleBehavior, self)._set_ellipse(instance, value) if self.ellipse.size[0] > self.width * 0.6 and (not self.fading_out): rc = self.ripple_color if not self.fading_out: Animation.cancel_all(self, 'ripple_color') anim = Animation(ripple_color=[rc[0], rc[1], rc[2], 0.0], t=self.ripple_func_out, duration=self.ripple_duration_out) anim.bind(on_complete=self.anim_complete) self.fading_out = True anim.start(self) self.ellipse.pos = (self.center_x - self.ripple_rad / 2.0, self.center_y - self.ripple_rad / 2.0)
CoPilot
positive
def _build_network_base(images, normal_cell, reduction_cell, num_classes, hparams, is_training): """Constructs a AmoebaNet image model.""" if hparams.get('use_bp16', False) and hparams.get('use_tpu', False): images = tf.cast(images, dtype=tf.bfloat16) end_points = {} filter_scaling_rate = 2 reduction_indices = network_utils.calc_reduction_layers(hparams.num_cells, hparams.num_reduction_layers) stem_cell = reduction_cell if hparams.stem_type == 'imagenet': <DeepExtract> num_stem_filters = hparams.stem_reduction_size with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter): net = slim.conv2d(images, num_stem_filters, [3, 3], stride=2, scope='conv0', padding='VALID') net = network_utils.batch_norm(net, scope='conv0_bn') tf.logging.info('imagenet_stem shape after conv2d_bn: {}'.format(net.shape)) cell_outputs = [None, net] filter_scaling = 1.0 / filter_scaling_rate ** hparams.num_stem_cells for cell_num in range(hparams.num_stem_cells): net = stem_cell(net, scope='cell_stem_{}'.format(cell_num), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=cell_num) cell_outputs.append(net) filter_scaling *= filter_scaling_rate tf.logging.info('imagenet_stem shape at reductionlayer{}: {}'.format(cell_num, net.shape)) (net, cell_outputs) = (net, cell_outputs) </DeepExtract> else: <DeepExtract> num_stem_filters = hparams.stem_reduction_size with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter): net = slim.conv2d(images, num_stem_filters, [3, 3], stride=1, scope='conv0', padding='VALID') net = network_utils.batch_norm(net, scope='conv0_bn') tf.logging.info('basic_stem shape: {}'.format(net.shape)) (net, cell_outputs) = (net, [None, net]) </DeepExtract> aux_head_cell_idxes = [] if len(reduction_indices) >= 2: aux_head_cell_idxes.append(reduction_indices[1] - 1) filter_scaling = 1.0 true_cell_num = hparams.num_stem_cells for cell_num in range(hparams.num_cells): tf.logging.info('Current cell num: {}'.format(true_cell_num)) if cell_num in reduction_indices: filter_scaling *= filter_scaling_rate net = reduction_cell(net, scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=true_cell_num) cell_outputs.append(net) tf.logging.info('Reduction cell shape atlayer{}: {}'.format(true_cell_num, net.shape)) true_cell_num += 1 net = normal_cell(net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=1, prev_layer=cell_outputs[-2], cell_num=true_cell_num) if hparams.use_aux_head and cell_num in aux_head_cell_idxes and num_classes and is_training: aux_net = tf.nn.relu(net) <DeepExtract> aux_scaling = 1.0 if hasattr(hparams, 'aux_scaling'): aux_scaling = hparams.aux_scaling tf.logging.info('aux scaling: {}'.format(aux_scaling)) with tf.variable_scope('aux_{}'.format(cell_num), custom_getter=network_utils.bp16_getter): aux_logits = tf.identity(aux_net) with tf.variable_scope('aux_logits'): aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID') aux_logits = slim.conv2d(aux_logits, int(128 * aux_scaling), [1, 1], scope='proj') aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn0') aux_logits = tf.nn.relu(aux_logits) shape = aux_logits.shape if hparams.data_format == 'NHWC': shape = shape[1:3] else: shape = shape[2:4] aux_logits = slim.conv2d(aux_logits, int(768 * aux_scaling), shape, padding='VALID') aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn1') aux_logits = tf.nn.relu(aux_logits) aux_logits = tf.contrib.layers.flatten(aux_logits) aux_logits = slim.fully_connected(aux_logits, num_classes) end_point_name = 'aux_logits' if 'aux_logits' not in end_points else 'aux_logits_2' end_points[end_point_name] = tf.cast(aux_logits, tf.float32) </DeepExtract> cell_outputs.append(net) tf.logging.info('Normalnet shape atlayer {}: {}'.format(true_cell_num, net.shape)) true_cell_num += 1 with tf.variable_scope('final_layer', custom_getter=network_utils.bp16_getter): net = tf.nn.relu(net) net = network_utils.global_avg_pool(net) end_points['global_pool'] = net if not num_classes: return (net, end_points) net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout') logits = slim.fully_connected(net, num_classes) logits = tf.cast(logits, tf.float32) predictions = tf.nn.softmax(logits, name='predictions') end_points['logits'] = logits end_points['predictions'] = predictions end_points['cell_outputs'] = cell_outputs return (logits, end_points)
def _build_network_base(images, normal_cell, reduction_cell, num_classes, hparams, is_training): """Constructs a AmoebaNet image model.""" if hparams.get('use_bp16', False) and hparams.get('use_tpu', False): images = tf.cast(images, dtype=tf.bfloat16) end_points = {} filter_scaling_rate = 2 reduction_indices = network_utils.calc_reduction_layers(hparams.num_cells, hparams.num_reduction_layers) stem_cell = reduction_cell if hparams.stem_type == 'imagenet': num_stem_filters = hparams.stem_reduction_size with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter): net = slim.conv2d(images, num_stem_filters, [3, 3], stride=2, scope='conv0', padding='VALID') net = network_utils.batch_norm(net, scope='conv0_bn') tf.logging.info('imagenet_stem shape after conv2d_bn: {}'.format(net.shape)) cell_outputs = [None, net] filter_scaling = 1.0 / filter_scaling_rate ** hparams.num_stem_cells for cell_num in range(hparams.num_stem_cells): net = stem_cell(net, scope='cell_stem_{}'.format(cell_num), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=cell_num) cell_outputs.append(net) filter_scaling *= filter_scaling_rate tf.logging.info('imagenet_stem shape at reductionlayer{}: {}'.format(cell_num, net.shape)) (net, cell_outputs) = (net, cell_outputs) else: num_stem_filters = hparams.stem_reduction_size with tf.variable_scope('stem', custom_getter=network_utils.bp16_getter): net = slim.conv2d(images, num_stem_filters, [3, 3], stride=1, scope='conv0', padding='VALID') net = network_utils.batch_norm(net, scope='conv0_bn') tf.logging.info('basic_stem shape: {}'.format(net.shape)) (net, cell_outputs) = (net, [None, net]) aux_head_cell_idxes = [] if len(reduction_indices) >= 2: aux_head_cell_idxes.append(reduction_indices[1] - 1) filter_scaling = 1.0 true_cell_num = hparams.num_stem_cells for cell_num in range(hparams.num_cells): tf.logging.info('Current cell num: {}'.format(true_cell_num)) if cell_num in reduction_indices: filter_scaling *= filter_scaling_rate net = reduction_cell(net, scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)), filter_scaling=filter_scaling, stride=2, prev_layer=cell_outputs[-2], cell_num=true_cell_num) cell_outputs.append(net) tf.logging.info('Reduction cell shape atlayer{}: {}'.format(true_cell_num, net.shape)) true_cell_num += 1 net = normal_cell(net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=1, prev_layer=cell_outputs[-2], cell_num=true_cell_num) if hparams.use_aux_head and cell_num in aux_head_cell_idxes and num_classes and is_training: aux_net = tf.nn.relu(net) aux_scaling = 1.0 if hasattr(hparams, 'aux_scaling'): aux_scaling = hparams.aux_scaling tf.logging.info('aux scaling: {}'.format(aux_scaling)) with tf.variable_scope('aux_{}'.format(cell_num), custom_getter=network_utils.bp16_getter): aux_logits = tf.identity(aux_net) with tf.variable_scope('aux_logits'): aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3, padding='VALID') aux_logits = slim.conv2d(aux_logits, int(128 * aux_scaling), [1, 1], scope='proj') aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn0') aux_logits = tf.nn.relu(aux_logits) shape = aux_logits.shape if hparams.data_format == 'NHWC': shape = shape[1:3] else: shape = shape[2:4] aux_logits = slim.conv2d(aux_logits, int(768 * aux_scaling), shape, padding='VALID') aux_logits = network_utils.batch_norm(aux_logits, scope='aux_bn1') aux_logits = tf.nn.relu(aux_logits) aux_logits = tf.contrib.layers.flatten(aux_logits) aux_logits = slim.fully_connected(aux_logits, num_classes) end_point_name = 'aux_logits' if 'aux_logits' not in end_points else 'aux_logits_2' end_points[end_point_name] = tf.cast(aux_logits, tf.float32) cell_outputs.append(net) tf.logging.info('Normalnet shape atlayer {}: {}'.format(true_cell_num, net.shape)) true_cell_num += 1 with tf.variable_scope('final_layer', custom_getter=network_utils.bp16_getter): net = tf.nn.relu(net) net = network_utils.global_avg_pool(net) end_points['global_pool'] = net if not num_classes: return (net, end_points) net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout') logits = slim.fully_connected(net, num_classes) logits = tf.cast(logits, tf.float32) predictions = tf.nn.softmax(logits, name='predictions') end_points['logits'] = logits end_points['predictions'] = predictions end_points['cell_outputs'] = cell_outputs return (logits, end_points)
DAPPLE
positive
def has_permission(self, request, view): token = request.auth if not token: return False if hasattr(token, 'scope'): <DeepExtract> try: required_alternate_scopes = getattr(view, 'required_alternate_scopes') except AttributeError: raise ImproperlyConfigured('TokenMatchesOASRequirements requires the view to define the required_alternate_scopes attribute') </DeepExtract> m = request.method.upper() if m in required_alternate_scopes: log.debug('Required scopes alternatives to access resource: {0}'.format(required_alternate_scopes[m])) for alt in required_alternate_scopes[m]: if token.is_valid(alt): return True return False else: log.warning('no scope alternates defined for method {0}'.format(m)) return False assert False, 'TokenMatchesOASRequirements requires the`oauth2_provider.rest_framework.OAuth2Authentication` authentication class to be used.'
def has_permission(self, request, view): token = request.auth if not token: return False if hasattr(token, 'scope'): try: required_alternate_scopes = getattr(view, 'required_alternate_scopes') except AttributeError: raise ImproperlyConfigured('TokenMatchesOASRequirements requires the view to define the required_alternate_scopes attribute') m = request.method.upper() if m in required_alternate_scopes: log.debug('Required scopes alternatives to access resource: {0}'.format(required_alternate_scopes[m])) for alt in required_alternate_scopes[m]: if token.is_valid(alt): return True return False else: log.warning('no scope alternates defined for method {0}'.format(m)) return False assert False, 'TokenMatchesOASRequirements requires the`oauth2_provider.rest_framework.OAuth2Authentication` authentication class to be used.'
django-oauth-toolkit
positive
def parse(self): """Return the actual text contained within an HTML document. Implemented using :py:mod:`BeautifulSoup <bs4>` (https://www.crummy.com/software/BeautifulSoup/). """ url = urllib.parse.urlparse(self.url) if self.url else None <DeepExtract> try: soup = bs4.BeautifulSoup(self.text, 'lxml') except ValueError: soup = bs4.BeautifulSoup(self.text) </DeepExtract> if not soup.body: return '' <DeepExtract> if 'mirror_hints' not in self._args: return func = lambda attr: attr and any((hint in attr for hint in self._args['mirror_hints'])) if soup.find_all(href=func) or soup.find_all(src=func): raise ParserExclusionError() </DeepExtract> body = soup.body if url and url.netloc == 'web.archive.org' and url.path.endswith('.pdf'): playback = body.find(id='playback') if playback and 'src' in playback.attrs: raise ParserRedirectError(playback.attrs['src']) <DeepExtract> is_comment = lambda text: isinstance(text, bs4.element.Comment) for comment in body.find_all(text=is_comment): comment.extract() for tag in self.hidden_tags: for element in body.find_all(tag): element.extract() content = '\n'.join(body.stripped_strings) </DeepExtract> if url and url.netloc.endswith('.blogspot.com') and (not content): <DeepExtract> match = re.search("'postId': '(\\d+)'", self.text) if not match: content = '' post_id = match.group(1) url = 'https://%s/feeds/posts/default/%s?' % (url.netloc, post_id) params = {'alt': 'json', 'v': '2', 'dynamicviews': '1', 'rewriteforssl': 'true'} raw = self._open(url + urllib.parse.urlencode(params), allow_content_types=['application/json']) if raw is None: content = '' try: parsed = json.loads(raw) except ValueError: content = '' try: text = parsed['entry']['content']['$t'] except KeyError: content = '' soup = self._get_soup(text) content = self._clean_soup(soup.body) </DeepExtract> return content
def parse(self): """Return the actual text contained within an HTML document. Implemented using :py:mod:`BeautifulSoup <bs4>` (https://www.crummy.com/software/BeautifulSoup/). """ url = urllib.parse.urlparse(self.url) if self.url else None try: soup = bs4.BeautifulSoup(self.text, 'lxml') except ValueError: soup = bs4.BeautifulSoup(self.text) if not soup.body: return '' if 'mirror_hints' not in self._args: return func = lambda attr: attr and any((hint in attr for hint in self._args['mirror_hints'])) if soup.find_all(href=func) or soup.find_all(src=func): raise ParserExclusionError() body = soup.body if url and url.netloc == 'web.archive.org' and url.path.endswith('.pdf'): playback = body.find(id='playback') if playback and 'src' in playback.attrs: raise ParserRedirectError(playback.attrs['src']) is_comment = lambda text: isinstance(text, bs4.element.Comment) for comment in body.find_all(text=is_comment): comment.extract() for tag in self.hidden_tags: for element in body.find_all(tag): element.extract() content = '\n'.join(body.stripped_strings) if url and url.netloc.endswith('.blogspot.com') and (not content): match = re.search("'postId': '(\\d+)'", self.text) if not match: content = '' post_id = match.group(1) url = 'https://%s/feeds/posts/default/%s?' % (url.netloc, post_id) params = {'alt': 'json', 'v': '2', 'dynamicviews': '1', 'rewriteforssl': 'true'} raw = self._open(url + urllib.parse.urlencode(params), allow_content_types=['application/json']) if raw is None: content = '' try: parsed = json.loads(raw) except ValueError: content = '' try: text = parsed['entry']['content']['$t'] except KeyError: content = '' soup = self._get_soup(text) content = self._clean_soup(soup.body) return content
earwigbot
positive