before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def _malloc(self, size): i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): <DeepExtract> mask = mmap.PAGESIZE - 1 length = max(self._size, size) + mask & ~mask </DeepExtract> self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[arena, start] del self._stop_to_block[arena, stop] return block
def _malloc(self, size): i = bisect.bisect_left(self._lengths, size) if i == len(self._lengths): mask = mmap.PAGESIZE - 1 length = max(self._size, size) + mask & ~mask self._size *= 2 info('allocating a new mmap of length %d', length) arena = Arena(length) self._arenas.append(arena) return (arena, 0, length) else: length = self._lengths[i] seq = self._len_to_seq[length] block = seq.pop() if not seq: del self._len_to_seq[length], self._lengths[i] (arena, start, stop) = block del self._start_to_block[arena, start] del self._stop_to_block[arena, stop] return block
3DFasterRCNN_LungNoduleDetector
positive
def build_temp_dir(prefix='test-attributecode-'): """ Create and return a new unique empty directory created in base_dir. """ location = tempfile.mkdtemp(prefix=prefix) <DeepExtract> if not os.path.exists(location): os.makedirs(location) os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH) </DeepExtract> return location
def build_temp_dir(prefix='test-attributecode-'): """ Create and return a new unique empty directory created in base_dir. """ location = tempfile.mkdtemp(prefix=prefix) if not os.path.exists(location): os.makedirs(location) os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH) return location
aboutcode-toolkit
positive
def slot_tabview_change(self, index): if index not in (0, 1): return status_prev: str = self.view_status_label.text() if index == 0: <DeepExtract> self.view_status_label.setText(self.view_status_label_analysis_cache) </DeepExtract> self.view_status_label_rulegen_cache = status_prev self.view_reset_button.setText('Reset Selections') elif index == 1: <DeepExtract> self.view_status_label.setText(self.view_status_label_rulegen_cache) </DeepExtract> self.view_status_label_analysis_cache = status_prev self.view_reset_button.setText('Clear')
def slot_tabview_change(self, index): if index not in (0, 1): return status_prev: str = self.view_status_label.text() if index == 0: self.view_status_label.setText(self.view_status_label_analysis_cache) self.view_status_label_rulegen_cache = status_prev self.view_reset_button.setText('Reset Selections') elif index == 1: self.view_status_label.setText(self.view_status_label_rulegen_cache) self.view_status_label_analysis_cache = status_prev self.view_reset_button.setText('Clear')
capa
positive
def read_input(): ncases = int(input()) for case in range(1, ncases + 1): n = int(input()) way = input() <DeepExtract> if way[0] == way[-1]: if way[0] == 'E': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'S': y = sum((ch == 'S' for ch in way[:i])) + 1 path = 'S' * y + 'E' * n - 1 + 'S' * (n - 1 - y) elif way[0] == 'S': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'E': y = sum((ch == 'E' for ch in way[:i])) + 1 path = 'E' * y + 'S' * n - 1 + 'E' * (n - 1 - y) elif way[0] == 'E': path = 'S' * n - 1 + 'E' * n - 1 elif way[0] == 'S': path = 'E' * n - 1 + 'S' * n - 1 </DeepExtract> print('CASE #{}: {}'.format(case, path))
def read_input(): ncases = int(input()) for case in range(1, ncases + 1): n = int(input()) way = input() if way[0] == way[-1]: if way[0] == 'E': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'S': y = sum((ch == 'S' for ch in way[:i])) + 1 path = 'S' * y + 'E' * n - 1 + 'S' * (n - 1 - y) elif way[0] == 'S': for (i, char) in enumerate(way): if way[i] == way[i + 1] and way[i] == 'E': y = sum((ch == 'E' for ch in way[:i])) + 1 path = 'E' * y + 'S' * n - 1 + 'E' * (n - 1 - y) elif way[0] == 'E': path = 'S' * n - 1 + 'E' * n - 1 elif way[0] == 'S': path = 'E' * n - 1 + 'S' * n - 1 print('CASE #{}: {}'.format(case, path))
algorithms
positive
def _batch_action(self): async def batch_action(settings: ModelView.schemes.BatchSettings, user: ModelView.schemes.User=Security(utils.authorization.auth_dependency, scopes=self.scopes['batch_action'])): <DeepExtract> if settings.command in self.custom_commands: query = self.custom_commands[settings.command](self.orm_model) if settings.command == 'delete': query = self.orm_model.delete </DeepExtract> if query is None: raise HTTPException(status_code=404, detail='Batch command not found') if self.orm_model != models.User and user: query = query.where(self.orm_model.user_id == user.id) query = query.where(self.orm_model.id.in_(settings.ids)) if self.custom_methods.get('batch_action'): await self.custom_methods['batch_action'](query, settings, user) else: await query.gino.status() return True return batch_action
def _batch_action(self): async def batch_action(settings: ModelView.schemes.BatchSettings, user: ModelView.schemes.User=Security(utils.authorization.auth_dependency, scopes=self.scopes['batch_action'])): if settings.command in self.custom_commands: query = self.custom_commands[settings.command](self.orm_model) if settings.command == 'delete': query = self.orm_model.delete if query is None: raise HTTPException(status_code=404, detail='Batch command not found') if self.orm_model != models.User and user: query = query.where(self.orm_model.user_id == user.id) query = query.where(self.orm_model.id.in_(settings.ids)) if self.custom_methods.get('batch_action'): await self.custom_methods['batch_action'](query, settings, user) else: await query.gino.status() return True return batch_action
bitcart
positive
def __init__(self, parameter): """ :param parameter: the parts of a tplarg. """ <DeepExtract> sep = '|' parameters = [] cur = 0 for (s, e) in findMatchingBraces(parameter): par = parameter[cur:s].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par elif not parameters: parameters = [''] parameters[-1] += parameter[s:e] cur = e par = parameter[cur:].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par parts = parameters </DeepExtract> self.name = Template.parse(parts[0]) if len(parts) > 1: self.default = Template.parse(parts[1]) else: self.default = None
def __init__(self, parameter): """ :param parameter: the parts of a tplarg. """ sep = '|' parameters = [] cur = 0 for (s, e) in findMatchingBraces(parameter): par = parameter[cur:s].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par elif not parameters: parameters = [''] parameters[-1] += parameter[s:e] cur = e par = parameter[cur:].split(sep) if par: if parameters: parameters[-1] += par[0] if len(par) > 1: parameters.extend(par[1:]) else: parameters = par parts = parameters self.name = Template.parse(parts[0]) if len(parts) > 1: self.default = Template.parse(parts[1]) else: self.default = None
DistillBERT
positive
def unpack_directory(data): <DeepExtract> header = struct_unpack(HEADER_FORMAT, data)[0] </DeepExtract> numTables = header['numTables'] data = data[HEADER_SIZE:] directory = [] for index in range(numTables): <DeepExtract> (keys, format_string) = _struct_get_format(DIRECTORY_FORMAT) size = struct.calcsize(format_string) values = struct.unpack(format_string, data[:size]) unpacked = {} for (index, key) in enumerate(keys): value = values[index] unpacked[key] = value (table, data) = (unpacked, data[size:]) </DeepExtract> directory.append(table) return directory
def unpack_directory(data): header = struct_unpack(HEADER_FORMAT, data)[0] numTables = header['numTables'] data = data[HEADER_SIZE:] directory = [] for index in range(numTables): (keys, format_string) = _struct_get_format(DIRECTORY_FORMAT) size = struct.calcsize(format_string) values = struct.unpack(format_string, data[:size]) unpacked = {} for (index, key) in enumerate(keys): value = values[index] unpacked[key] = value (table, data) = (unpacked, data[size:]) directory.append(table) return directory
django-gateone
positive
def format_json(lib): import json summary = lib.summarize() non_users = [] for u in summary['non_users']: non_users.append(u.to_dict()) non_users.sort(key=lambda x: x['path']) users = [] for (u, usage) in summary['users']: symbols = [s.to_dict() for s in usage] symbols.sort(key=lambda x: x['name']) users.append({'user': u.to_dict(), 'used_symbols': symbols}) users.sort(key=lambda x: x['user']['path']) unused_symbols = [] for s in summary['unused_symbols']: unused_symbols.append(s.to_dict()) unused_symbols.sort(key=lambda x: x['name']) excluded_symbols = [] for s in summary['excluded_symbols']: excluded_symbols.append(s.to_dict()) excluded_symbols.sort(key=lambda x: x['name']) used_symbols = {} for (s, user) in summary['used_symbols'].items(): lst = used_symbols.setdefault(s.name, []) for u in user: if isinstance(u, User): lst.append(('binary', u.path)) elif isinstance(u, tuple): <DeepExtract> u[1] = list(u[1]) lines = [] with open(u[0], encoding='utf-8') as f: for (i, line) in enumerate(f): if i in u[1]: lines.append((i, line.strip())) u[1].remove(i) if not u[1]: break lines = lines </DeepExtract> lst.append(('source', lines)) lst.sort() report = {'non_users': non_users, 'users': users, 'unused_symbols': unused_symbols, 'excluded_symbols': excluded_symbols, 'used_symbols': used_symbols} json.dump(report, sys.stdout, indent=2, sort_keys=True)
def format_json(lib): import json summary = lib.summarize() non_users = [] for u in summary['non_users']: non_users.append(u.to_dict()) non_users.sort(key=lambda x: x['path']) users = [] for (u, usage) in summary['users']: symbols = [s.to_dict() for s in usage] symbols.sort(key=lambda x: x['name']) users.append({'user': u.to_dict(), 'used_symbols': symbols}) users.sort(key=lambda x: x['user']['path']) unused_symbols = [] for s in summary['unused_symbols']: unused_symbols.append(s.to_dict()) unused_symbols.sort(key=lambda x: x['name']) excluded_symbols = [] for s in summary['excluded_symbols']: excluded_symbols.append(s.to_dict()) excluded_symbols.sort(key=lambda x: x['name']) used_symbols = {} for (s, user) in summary['used_symbols'].items(): lst = used_symbols.setdefault(s.name, []) for u in user: if isinstance(u, User): lst.append(('binary', u.path)) elif isinstance(u, tuple): u[1] = list(u[1]) lines = [] with open(u[0], encoding='utf-8') as f: for (i, line) in enumerate(f): if i in u[1]: lines.append((i, line.strip())) u[1].remove(i) if not u[1]: break lines = lines lst.append(('source', lines)) lst.sort() report = {'non_users': non_users, 'users': users, 'unused_symbols': unused_symbols, 'excluded_symbols': excluded_symbols, 'used_symbols': used_symbols} json.dump(report, sys.stdout, indent=2, sort_keys=True)
barbieri-playground
positive
def test_delete_question_with_essay_question(self): EssayQuestion.objects.create(question_id=1, assignment=Assignment.objects.get(assignment_id=1), title='Evolvers', description='Write an essay about the Evolvers.') kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} <DeepExtract> client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client </DeepExtract> response = client.post('/teacher/course/1/assignment/1/delete_question', {'question_id': 1, 'question_type': settings.ESSAY_QUESTION_TYPE}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'question was deleted') self.assertEqual(array['status'], 'success')
def test_delete_question_with_essay_question(self): EssayQuestion.objects.create(question_id=1, assignment=Assignment.objects.get(assignment_id=1), title='Evolvers', description='Write an essay about the Evolvers.') kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'} client = Client() client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD) client = client response = client.post('/teacher/course/1/assignment/1/delete_question', {'question_id': 1, 'question_type': settings.ESSAY_QUESTION_TYPE}, **kwargs) self.assertEqual(response.status_code, 200) json_string = response.content.decode(encoding='UTF-8') array = json.loads(json_string) self.assertEqual(array['message'], 'question was deleted') self.assertEqual(array['status'], 'success')
academicstoday-django
positive
def findPathsUtil(maze, m, n, i, j, path, indx): global allPaths global storePaths if i == m - 1: for k in range(j, n): path[indx + k - j] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i, m): path[indx + k - i] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx] = maze[i][j] <DeepExtract> global allPaths global storePaths if i + 1 == m - 1: for k in range(j, n): path[indx + 1 + k - j] = maze[i + 1][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i + 1, m): path[indx + 1 + k - i + 1] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i + 1][j] findPathsUtil(maze, m, n, i + 1 + 1, j, path, indx + 1 + 1) findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) </DeepExtract> <DeepExtract> global allPaths global storePaths if i == m - 1: for k in range(j + 1, n): path[indx + 1 + k - j + 1] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j + 1 == n - 1: for k in range(i, m): path[indx + 1 + k - i] = maze[k][j + 1] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i][j + 1] findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) findPathsUtil(maze, m, n, i, j + 1 + 1, path, indx + 1 + 1) </DeepExtract>
def findPathsUtil(maze, m, n, i, j, path, indx): global allPaths global storePaths if i == m - 1: for k in range(j, n): path[indx + k - j] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i, m): path[indx + k - i] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx] = maze[i][j] global allPaths global storePaths if i + 1 == m - 1: for k in range(j, n): path[indx + 1 + k - j] = maze[i + 1][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j == n - 1: for k in range(i + 1, m): path[indx + 1 + k - i + 1] = maze[k][j] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i + 1][j] findPathsUtil(maze, m, n, i + 1 + 1, j, path, indx + 1 + 1) findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) global allPaths global storePaths if i == m - 1: for k in range(j + 1, n): path[indx + 1 + k - j + 1] = maze[i][k] storePaths += ''.join(path) + '|' allPaths.append(path) return if j + 1 == n - 1: for k in range(i, m): path[indx + 1 + k - i] = maze[k][j + 1] storePaths += ''.join(path) + '|' allPaths.append(path) return path[indx + 1] = maze[i][j + 1] findPathsUtil(maze, m, n, i + 1, j + 1, path, indx + 1 + 1) findPathsUtil(maze, m, n, i, j + 1 + 1, path, indx + 1 + 1) </DeepExtract>
Competitive-Coding-Platforms
positive
@pytest.mark.parametrize('n, shape, grid', [([0], (1, 1, 1), (3, 4, 1)), ([1, 14], (10, 20, 30), (3, 1, 5)), ([14, 14, 14], (10, 20, 30), (1, 4, 5))]) def test_getDiscretisation_bools(n, shape, grid): <DeepExtract> coords = np.concatenate([np.linspace(0, s, len(n)).reshape(-1, 1) for s in shape], axis=1) species = np.array(n) (atoms, species) = (coords, species) </DeepExtract> x = [np.linspace(0, 0.1, g) for g in grid] for b1 in (True, False): for b2 in (True, False): f = get_discretisation(atoms, species, x, pointwise=b1, FT=b2, **params) assert f.shape == grid
@pytest.mark.parametrize('n, shape, grid', [([0], (1, 1, 1), (3, 4, 1)), ([1, 14], (10, 20, 30), (3, 1, 5)), ([14, 14, 14], (10, 20, 30), (1, 4, 5))]) def test_getDiscretisation_bools(n, shape, grid): coords = np.concatenate([np.linspace(0, s, len(n)).reshape(-1, 1) for s in shape], axis=1) species = np.array(n) (atoms, species) = (coords, species) x = [np.linspace(0, 0.1, g) for g in grid] for b1 in (True, False): for b2 in (True, False): f = get_discretisation(atoms, species, x, pointwise=b1, FT=b2, **params) assert f.shape == grid
diffsims
positive
def start_lock_delay(self): """ Setup the lock delay timer based on user prefs - if there is no delay, or if idle locking isn't enabled, we run the callback immediately, or simply return, respectively. """ if not settings.get_idle_lock_enabled(): return if not utils.user_can_lock(): return lock_delay = settings.get_idle_lock_delay() if lock_delay == 0: <DeepExtract> DEBUG("manager: locking after delay ('lock-delay')") self.set_locked(True) return False </DeepExtract> else: trackers.timer_tracker_get().start_seconds('idle-lock-delay', lock_delay, self.on_lock_delay_timeout)
def start_lock_delay(self): """ Setup the lock delay timer based on user prefs - if there is no delay, or if idle locking isn't enabled, we run the callback immediately, or simply return, respectively. """ if not settings.get_idle_lock_enabled(): return if not utils.user_can_lock(): return lock_delay = settings.get_idle_lock_delay() if lock_delay == 0: DEBUG("manager: locking after delay ('lock-delay')") self.set_locked(True) return False else: trackers.timer_tracker_get().start_seconds('idle-lock-delay', lock_delay, self.on_lock_delay_timeout)
cinnamon-screensaver
positive
def __init__(self, data): """ Initialise the DataHfProvider class with the `data` being a supported data container (currently python dictionary or HDF5 file). Let `nf` denote the number of Fock spin orbitals (i.e. the sum of both the alpha and the beta orbitals) and `nb` the number of basis functions. With `array` we indicate either a `np.array` or an HDF5 dataset. The following keys are required in the container: 1. **restricted** (`bool`): `True` for a restricted SCF calculation, `False` otherwise 2. **conv_tol** (`float`): Tolerance value used for SCF convergence, should be roughly equivalent to l2 norm of the Pulay error. 3. **orbcoeff_fb** (`.array` with dtype `float`, size `(nf, nb)`): SCF orbital coefficients, i.e. the uniform transform from the basis to the molecular orbitals. 4. **occupation_f** (`array` with dtype `float`, size `(nf, )`: Occupation number for each SCF orbitals (i.e. diagonal of the HF density matrix in the SCF orbital basis). 5. **orben_f** (`array` with dtype `float`, size `(nf, )`: SCF orbital energies 6. **fock_ff** (`array` with dtype `float`, size `(nf, nf)`: Fock matrix in SCF orbital basis. Notice, the full matrix is expected also for restricted calculations. 7. **eri_phys_asym_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Antisymmetrised electron-repulsion integral tensor in the SCF orbital basis, using the Physicists' indexing convention, i.e. that the index tuple `(i,j,k,l)` refers to the integral :math:`\\langle ij || kl \\rangle`, i.e. .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_k(r_1) \\phi_l(r_2)}{|r_1 - r_2|} - \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_l(r_1) \\phi_k(r_2)}{|r_1 - r_2|} The full tensor (including zero blocks) is expected. As an alternative to `eri_phys_asym_ffff`, the user may provide 8. **eri_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Electron-repulsion integral tensor in chemists' notation. The index tuple `(i,j,k,l)` thus refers to the integral :math:`(ij|kl)`, which is .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{\\phi_i(r_1) \\phi_j(r_1) \\phi_k(r_2) \\phi_l(r_2)}{|r_1 - r_2|} Notice, that no antisymmetrisation has been applied in this tensor. The above keys define the least set of quantities to start a calculation in `adcc`. In order to have access to properties such as dipole moments or to get the correct state energies, further keys are highly recommended to be provided as well. 9. **energy_scf** (`float`): Final total SCF energy of both electronic and nuclear energy terms. (default: `0.0`) 10. **multipoles**: Container with electric and nuclear multipole moments. Can be another dictionary or simply an HDF5 group. - **elec_1** (`array`, size `(3, nb, nb)`): Electric dipole moment integrals in the atomic orbital basis (i.e. the discretisation basis with `nb` elements). First axis indicates cartesian component (x, y, z). - **nuc_0** (`float`): Total nuclear charge - **nuc_1** (`array` size `(3, )`: Nuclear dipole moment The defaults for all entries are all-zero multipoles. 11. **spin_multiplicity** (`int`): The spin mulitplicity of the HF ground state described by the data. A value of `0` (for unknown) should be supplied for unrestricted calculations. (default: 1 for restricted and 0 for unrestricted calculations) A descriptive string for the backend can be supplied optionally as well. In case of using a python `dict` as the data container, this should be done using the key `backend`. For an HDF5 file, this should be done using the attribute `backend`. Defaults based on the filename are generated. Parameters ---------- data : dict or h5py.File Dictionary containing the HartreeFock data to use. For the required keys see details above. """ super().__init__() self.data = data if isinstance(data, dict): self.__backend = data.get('backend', 'dict') elif isinstance(data, h5py.File): if 'r' not in data.mode: raise ValueError('Passed h5py.File stream (filename: {}) not readable.'.format(data.filename)) self.__backend = data.attrs.get('backend', '<HDF5 file "{}">'.format(data.filename)) else: raise TypeError('Can only deal with data objects of type dict or h5py.File.') if data['orbcoeff_fb'].shape[0] % 2 != 0: raise ValueError('orbcoeff_fb first axis should have even length') <DeepExtract> nb = self.data['orbcoeff_fb'].shape[1] </DeepExtract> nf = 2 * self.get_n_orbs_alpha() checks = [('orbcoeff_fb', (nf, nb)), ('occupation_f', (nf,)), ('orben_f', (nf,)), ('fock_ff', (nf, nf)), ('eri_ffff', (nf, nf, nf, nf)), ('eri_phys_asym_ffff', (nf, nf, nf, nf))] for (key, exshape) in checks: if key not in data: continue if data[key].shape != exshape: raise ValueError('Shape mismatch for key {}: Expected {}, but got {}.'.format(key, exshape, data[key].shape)) opprov = DataOperatorIntegralProvider(self.__backend) mmp = data.get('multipoles', {}) if 'elec_1' in mmp: if mmp['elec_1'].shape != (3, nb, nb): raise ValueError('multipoles/elec_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(mmp['elec_1'].shape)) opprov.electric_dipole = np.asarray(mmp['elec_1']) magm = data.get('magnetic_moments', {}) if 'mag_1' in magm: if magm['mag_1'].shape != (3, nb, nb): raise ValueError('magnetic_moments/mag_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(magm['mag_1'].shape)) opprov.magnetic_dipole = np.asarray(magm['mag_1']) derivs = data.get('derivatives', {}) if 'nabla' in derivs: if derivs['nabla'].shape != (3, nb, nb): raise ValueError('derivatives/nabla is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(derivs['nabla'].shape)) opprov.nabla = np.asarray(derivs['nabla']) self.operator_integral_provider = opprov
def __init__(self, data): """ Initialise the DataHfProvider class with the `data` being a supported data container (currently python dictionary or HDF5 file). Let `nf` denote the number of Fock spin orbitals (i.e. the sum of both the alpha and the beta orbitals) and `nb` the number of basis functions. With `array` we indicate either a `np.array` or an HDF5 dataset. The following keys are required in the container: 1. **restricted** (`bool`): `True` for a restricted SCF calculation, `False` otherwise 2. **conv_tol** (`float`): Tolerance value used for SCF convergence, should be roughly equivalent to l2 norm of the Pulay error. 3. **orbcoeff_fb** (`.array` with dtype `float`, size `(nf, nb)`): SCF orbital coefficients, i.e. the uniform transform from the basis to the molecular orbitals. 4. **occupation_f** (`array` with dtype `float`, size `(nf, )`: Occupation number for each SCF orbitals (i.e. diagonal of the HF density matrix in the SCF orbital basis). 5. **orben_f** (`array` with dtype `float`, size `(nf, )`: SCF orbital energies 6. **fock_ff** (`array` with dtype `float`, size `(nf, nf)`: Fock matrix in SCF orbital basis. Notice, the full matrix is expected also for restricted calculations. 7. **eri_phys_asym_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Antisymmetrised electron-repulsion integral tensor in the SCF orbital basis, using the Physicists' indexing convention, i.e. that the index tuple `(i,j,k,l)` refers to the integral :math:`\\langle ij || kl \\rangle`, i.e. .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_k(r_1) \\phi_l(r_2)}{|r_1 - r_2|} - \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{ \\phi_i(r_1) \\phi_j(r_2) \\phi_l(r_1) \\phi_k(r_2)}{|r_1 - r_2|} The full tensor (including zero blocks) is expected. As an alternative to `eri_phys_asym_ffff`, the user may provide 8. **eri_ffff** (`array` with dtype `float`, size `(nf, nf, nf, nf)`: Electron-repulsion integral tensor in chemists' notation. The index tuple `(i,j,k,l)` thus refers to the integral :math:`(ij|kl)`, which is .. math:: \\int_\\Omega \\int_\\Omega d r_1 d r_2 \\frac{\\phi_i(r_1) \\phi_j(r_1) \\phi_k(r_2) \\phi_l(r_2)}{|r_1 - r_2|} Notice, that no antisymmetrisation has been applied in this tensor. The above keys define the least set of quantities to start a calculation in `adcc`. In order to have access to properties such as dipole moments or to get the correct state energies, further keys are highly recommended to be provided as well. 9. **energy_scf** (`float`): Final total SCF energy of both electronic and nuclear energy terms. (default: `0.0`) 10. **multipoles**: Container with electric and nuclear multipole moments. Can be another dictionary or simply an HDF5 group. - **elec_1** (`array`, size `(3, nb, nb)`): Electric dipole moment integrals in the atomic orbital basis (i.e. the discretisation basis with `nb` elements). First axis indicates cartesian component (x, y, z). - **nuc_0** (`float`): Total nuclear charge - **nuc_1** (`array` size `(3, )`: Nuclear dipole moment The defaults for all entries are all-zero multipoles. 11. **spin_multiplicity** (`int`): The spin mulitplicity of the HF ground state described by the data. A value of `0` (for unknown) should be supplied for unrestricted calculations. (default: 1 for restricted and 0 for unrestricted calculations) A descriptive string for the backend can be supplied optionally as well. In case of using a python `dict` as the data container, this should be done using the key `backend`. For an HDF5 file, this should be done using the attribute `backend`. Defaults based on the filename are generated. Parameters ---------- data : dict or h5py.File Dictionary containing the HartreeFock data to use. For the required keys see details above. """ super().__init__() self.data = data if isinstance(data, dict): self.__backend = data.get('backend', 'dict') elif isinstance(data, h5py.File): if 'r' not in data.mode: raise ValueError('Passed h5py.File stream (filename: {}) not readable.'.format(data.filename)) self.__backend = data.attrs.get('backend', '<HDF5 file "{}">'.format(data.filename)) else: raise TypeError('Can only deal with data objects of type dict or h5py.File.') if data['orbcoeff_fb'].shape[0] % 2 != 0: raise ValueError('orbcoeff_fb first axis should have even length') nb = self.data['orbcoeff_fb'].shape[1] nf = 2 * self.get_n_orbs_alpha() checks = [('orbcoeff_fb', (nf, nb)), ('occupation_f', (nf,)), ('orben_f', (nf,)), ('fock_ff', (nf, nf)), ('eri_ffff', (nf, nf, nf, nf)), ('eri_phys_asym_ffff', (nf, nf, nf, nf))] for (key, exshape) in checks: if key not in data: continue if data[key].shape != exshape: raise ValueError('Shape mismatch for key {}: Expected {}, but got {}.'.format(key, exshape, data[key].shape)) opprov = DataOperatorIntegralProvider(self.__backend) mmp = data.get('multipoles', {}) if 'elec_1' in mmp: if mmp['elec_1'].shape != (3, nb, nb): raise ValueError('multipoles/elec_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(mmp['elec_1'].shape)) opprov.electric_dipole = np.asarray(mmp['elec_1']) magm = data.get('magnetic_moments', {}) if 'mag_1' in magm: if magm['mag_1'].shape != (3, nb, nb): raise ValueError('magnetic_moments/mag_1 is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(magm['mag_1'].shape)) opprov.magnetic_dipole = np.asarray(magm['mag_1']) derivs = data.get('derivatives', {}) if 'nabla' in derivs: if derivs['nabla'].shape != (3, nb, nb): raise ValueError('derivatives/nabla is expected to have shape ' + str((3, nb, nb)) + ' not ' + str(derivs['nabla'].shape)) opprov.nabla = np.asarray(derivs['nabla']) self.operator_integral_provider = opprov
adcc
positive
def dpMain(*args): """ Main function. Check existen nodes and call the scripted function. """ callAction = False <DeepExtract> selList = cmds.ls(selection=True) if selList: for item in selList: if self.dpCheckAllGrp(item): self.allGrp = item for item in selList: relativeList = cmds.listRelatives(item, allParents=True, type='transform') while relativeList: if self.dpCheckAllGrp(relativeList[0]): self.allGrp = relativeList[0] relativeList = cmds.listRelatives(relativeList[0], allParents=True, type='transform') self.allGrp = False </DeepExtract> if self.allGrp: callAction = True else: <DeepExtract> allGrpNodeList = [] allNodeList = cmds.ls(selection=False, type='transform') for nodeName in allNodeList: allGrp = self.dpCheckAllGrp(nodeName) if allGrp: allGrpNodeList.append(allGrp) allGrpList = allGrpNodeList </DeepExtract> if allGrpList: if len(allGrpList) > 1: self.allGrp = cmds.confirmDialog(title=self.langDic[self.langName]['m166_selAllControls'], message=self.langDic[self.langName]['m168_wichAllGrp'], button=allGrpList) else: <DeepExtract> if cmds.objExists(self.allGrp): if cmds.objExists(self.allGrp + '.' + self.masterAttr): if cmds.getAttr(self.allGrp + '.' + self.masterAttr) == 1: self.allGrp = self.allGrp self.allGrp = False </DeepExtract> if self.allGrp: callAction = True else: <DeepExtract> allNodeList = cmds.ls(selection=False) if allNodeList: for item in allNodeList: if self.dpCheckAllGrp(item): self.allGrp = item self.allGrp = False </DeepExtract> if self.allGrp: callAction = True if callAction: <DeepExtract> ctrlsToSelectList = [] if cmds.objExists(self.allGrp + '.' + self.ctrlsAttr): ctrlsAttr = cmds.getAttr(self.allGrp + '.' + self.ctrlsAttr) if ctrlsAttr: currentNamespace = '' if ':' in self.allGrp: currentNamespace = self.allGrp[:self.allGrp.find(':')] ctrlsList = ctrlsAttr.split(';') if ctrlsList: for ctrlName in ctrlsList: if ctrlName: if currentNamespace: ctrlsToSelectList.append(currentNamespace + ':' + ctrlName) else: ctrlsToSelectList.append(ctrlName) cmds.select(ctrlsToSelectList) print(self.langDic[self.langName]['m169_selectedCtrls'] + str(ctrlsToSelectList)) else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";') </DeepExtract> else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";')
def dpMain(*args): """ Main function. Check existen nodes and call the scripted function. """ callAction = False selList = cmds.ls(selection=True) if selList: for item in selList: if self.dpCheckAllGrp(item): self.allGrp = item for item in selList: relativeList = cmds.listRelatives(item, allParents=True, type='transform') while relativeList: if self.dpCheckAllGrp(relativeList[0]): self.allGrp = relativeList[0] relativeList = cmds.listRelatives(relativeList[0], allParents=True, type='transform') self.allGrp = False if self.allGrp: callAction = True else: allGrpNodeList = [] allNodeList = cmds.ls(selection=False, type='transform') for nodeName in allNodeList: allGrp = self.dpCheckAllGrp(nodeName) if allGrp: allGrpNodeList.append(allGrp) allGrpList = allGrpNodeList if allGrpList: if len(allGrpList) > 1: self.allGrp = cmds.confirmDialog(title=self.langDic[self.langName]['m166_selAllControls'], message=self.langDic[self.langName]['m168_wichAllGrp'], button=allGrpList) else: if cmds.objExists(self.allGrp): if cmds.objExists(self.allGrp + '.' + self.masterAttr): if cmds.getAttr(self.allGrp + '.' + self.masterAttr) == 1: self.allGrp = self.allGrp self.allGrp = False if self.allGrp: callAction = True else: allNodeList = cmds.ls(selection=False) if allNodeList: for item in allNodeList: if self.dpCheckAllGrp(item): self.allGrp = item self.allGrp = False if self.allGrp: callAction = True if callAction: ctrlsToSelectList = [] if cmds.objExists(self.allGrp + '.' + self.ctrlsAttr): ctrlsAttr = cmds.getAttr(self.allGrp + '.' + self.ctrlsAttr) if ctrlsAttr: currentNamespace = '' if ':' in self.allGrp: currentNamespace = self.allGrp[:self.allGrp.find(':')] ctrlsList = ctrlsAttr.split(';') if ctrlsList: for ctrlName in ctrlsList: if ctrlName: if currentNamespace: ctrlsToSelectList.append(currentNamespace + ':' + ctrlName) else: ctrlsToSelectList.append(ctrlName) cmds.select(ctrlsToSelectList) print(self.langDic[self.langName]['m169_selectedCtrls'] + str(ctrlsToSelectList)) else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";') else: mel.eval('warning "' + self.langDic[self.langName]['e019_notFoundAllGrp'] + '";')
dpAutoRigSystem
positive
def _core_network(self, l_p, h_p, x_t): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t, 256x1 vector """ <DeepExtract> sensor_output = self._refined_glimpse_sensor(x_t, l_p) sensor_output = T.flatten(sensor_output) h_g = self._relu(T.dot(sensor_output, self.W_g0)) h_l = self._relu(T.dot(l_p, self.W_g1)) g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl)) g_t = g </DeepExtract> h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h) <DeepExtract> l_t = T.dot(h_t, self.W_l) </DeepExtract> if not self.disable_reinforce: sampled_l_t = self._sample_gaussian(l_t, self.cov) <DeepExtract> norm2d_var = 1.0 / T.sqrt((2 * np.pi) ** 2 * self.cov_det_var) * T.exp(-0.5 * (disconnected_grad(sampled_l_t) - l_t).T.dot(self.cov_inv_var).dot(disconnected_grad(sampled_l_t) - l_t)) sampled_pdf = norm2d_var </DeepExtract> wl_grad = T.grad(T.log(sampled_pdf), self.W_l) else: sampled_l_t = l_t wl_grad = self.W_l if self.random_glimpse and self.disable_reinforce: sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7) <DeepExtract> z = self._relu(T.dot(h_t, self.W_a) + self.B_a) a_t = self._softmax(z) </DeepExtract> return (sampled_l_t, h_t, a_t, wl_grad)
def _core_network(self, l_p, h_p, x_t): """ Parameters: x_t - 28x28 image l_p - 2x1 focus vector h_p - 256x1 vector Returns: h_t, 256x1 vector """ sensor_output = self._refined_glimpse_sensor(x_t, l_p) sensor_output = T.flatten(sensor_output) h_g = self._relu(T.dot(sensor_output, self.W_g0)) h_l = self._relu(T.dot(l_p, self.W_g1)) g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl)) g_t = g h_t = self._tanh(T.dot(g_t, self.W_h_g) + T.dot(h_p, self.W_h) + self.B_h) l_t = T.dot(h_t, self.W_l) if not self.disable_reinforce: sampled_l_t = self._sample_gaussian(l_t, self.cov) norm2d_var = 1.0 / T.sqrt((2 * np.pi) ** 2 * self.cov_det_var) * T.exp(-0.5 * (disconnected_grad(sampled_l_t) - l_t).T.dot(self.cov_inv_var).dot(disconnected_grad(sampled_l_t) - l_t)) sampled_pdf = norm2d_var wl_grad = T.grad(T.log(sampled_pdf), self.W_l) else: sampled_l_t = l_t wl_grad = self.W_l if self.random_glimpse and self.disable_reinforce: sampled_l_t = self.srng.uniform((2,), low=-1.7, high=1.7) z = self._relu(T.dot(h_t, self.W_a) + self.B_a) a_t = self._softmax(z) return (sampled_l_t, h_t, a_t, wl_grad)
deepy
positive
def check_device_state(self, device_id, state_name): <DeepExtract> devices = requests.get('https://{host_uri}/{device_list_endpoint}'.format(host_uri=self.HOST_URI, device_list_endpoint=self.DEVICE_LIST_ENDPOINT), headers={'MyQApplicationId': self.APP_ID, 'SecurityToken': self.myq_security_token}) devices = devices.json()['Devices'] </DeepExtract> for dev in devices: if str(dev['MyQDeviceId']) == str(device_id): for attribute in dev['Attributes']: if attribute['AttributeDisplayName'] == state_name: door_state = attribute['Value'] return door_state
def check_device_state(self, device_id, state_name): devices = requests.get('https://{host_uri}/{device_list_endpoint}'.format(host_uri=self.HOST_URI, device_list_endpoint=self.DEVICE_LIST_ENDPOINT), headers={'MyQApplicationId': self.APP_ID, 'SecurityToken': self.myq_security_token}) devices = devices.json()['Devices'] for dev in devices: if str(dev['MyQDeviceId']) == str(device_id): for attribute in dev['Attributes']: if attribute['AttributeDisplayName'] == state_name: door_state = attribute['Value'] return door_state
Alexa-MyQGarage
positive
def test(): <DeepExtract> cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} net = ShuffleNet(cfg) </DeepExtract> x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
def test(): cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2} net = ShuffleNet(cfg) x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
dhp
positive
def next(self): """ Default implementation for built-in backtrader method. Defines one step environment routine; Handles order execution logic according to action received. Note that orders can only be submitted for data_lines in action_space (assets). `self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions are emitted while doing `skip_frame` loop. """ <DeepExtract> current_value = self.env.broker.get_value() norm_state = self.get_normalisation() positions = [self.env.broker.getposition(data) for data in self.datas] exposure = sum([abs(pos.size) for pos in positions]) self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None) for (key, method) in self.collection_get_broker_stat_methods.items(): update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer) self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])]) self.trade_just_closed = False self.trade_result = 0 </DeepExtract> if '_skip_this' in self.action.keys(): if self.action_repeated < self.num_action_repeats: self.next_process_fn(self.action_to_repeat) self.action_repeated += 1 else: self.next_process_fn(self.action) self.action_repeated = 0 self.action_to_repeat = self.action
def next(self): """ Default implementation for built-in backtrader method. Defines one step environment routine; Handles order execution logic according to action received. Note that orders can only be submitted for data_lines in action_space (assets). `self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions are emitted while doing `skip_frame` loop. """ current_value = self.env.broker.get_value() norm_state = self.get_normalisation() positions = [self.env.broker.getposition(data) for data in self.datas] exposure = sum([abs(pos.size) for pos in positions]) self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None) for (key, method) in self.collection_get_broker_stat_methods.items(): update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer) self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])]) self.trade_just_closed = False self.trade_result = 0 if '_skip_this' in self.action.keys(): if self.action_repeated < self.num_action_repeats: self.next_process_fn(self.action_to_repeat) self.action_repeated += 1 else: self.next_process_fn(self.action) self.action_repeated = 0 self.action_to_repeat = self.action
btgym
positive
def tree_is_perfect_match(self): """ Returns True if self.trees is a singleton that perfectly matches the words in the utterances (with certain simplifactions to each to accommodate different notation and information). """ if len(self.trees) != 1: return False <DeepExtract> tree_lems = self.tree_lemmas() tree_lems = [x for x in tree_lems if x[1] not in {'-NONE-', '-DFL-'}] tree_lems = [(re.sub('-$', '', x[0]), x[1]) for x in tree_lems] tree_lems = tree_lems </DeepExtract> <DeepExtract> pos_lems = self.pos_lemmas() pos_lems = [x for x in pos_lems if x and len(x) == 2] nontree_nodes = ('^PRP^BES', '^FW', '^MD', '^MD^RB', '^PRP^VBZ', '^WP$', '^NN^HVS', 'NN|VBG', '^DT^BES', '^MD^VB', '^DT^JJ', '^PRP^HVS', '^NN^POS', '^WP^BES', '^NN^BES', 'NN|CD', '^WDT', '^VB^PRP') pos_lems = [x for x in pos_lems if x[1] not in nontree_nodes] pos_lems = [x for x in pos_lems if x[0] != '--'] pos_lems = [(re.sub('-$', '', x[0]), x[1]) for x in pos_lems] pos_lems = pos_lems </DeepExtract> if pos_lems == tree_lems: return True else: return False
def tree_is_perfect_match(self): """ Returns True if self.trees is a singleton that perfectly matches the words in the utterances (with certain simplifactions to each to accommodate different notation and information). """ if len(self.trees) != 1: return False tree_lems = self.tree_lemmas() tree_lems = [x for x in tree_lems if x[1] not in {'-NONE-', '-DFL-'}] tree_lems = [(re.sub('-$', '', x[0]), x[1]) for x in tree_lems] tree_lems = tree_lems pos_lems = self.pos_lemmas() pos_lems = [x for x in pos_lems if x and len(x) == 2] nontree_nodes = ('^PRP^BES', '^FW', '^MD', '^MD^RB', '^PRP^VBZ', '^WP$', '^NN^HVS', 'NN|VBG', '^DT^BES', '^MD^VB', '^DT^JJ', '^PRP^HVS', '^NN^POS', '^WP^BES', '^NN^BES', 'NN|CD', '^WDT', '^VB^PRP') pos_lems = [x for x in pos_lems if x[1] not in nontree_nodes] pos_lems = [x for x in pos_lems if x[0] != '--'] pos_lems = [(re.sub('-$', '', x[0]), x[1]) for x in pos_lems] pos_lems = pos_lems if pos_lems == tree_lems: return True else: return False
dialog-processing
positive
def mergeSort(alist): length = len(alist) mid = length // 2 if length > 1: left = alist[:mid] right = alist[mid:] <DeepExtract> length = len(left) mid = length // 2 if length > 1: left = left[:mid] right = left[mid:] mergeSort(left) mergeSort(right) merge(left, left, right) </DeepExtract> <DeepExtract> length = len(right) mid = length // 2 if length > 1: left = right[:mid] right = right[mid:] mergeSort(left) mergeSort(right) merge(right, left, right) </DeepExtract> <DeepExtract> l = 0 r = 0 i = 0 L_len = len(left) R_len = len(right) while l < L_len and r < R_len: if left[l] < right[r]: alist[i] = left[l] i += 1 l += 1 else: alist[i] = right[r] i += 1 r += 1 while l < L_len: alist[i] = left[l] i += 1 l += 1 while r < R_len: alist[i] = right[r] i += 1 r += 1 </DeepExtract>
def mergeSort(alist): length = len(alist) mid = length // 2 if length > 1: left = alist[:mid] right = alist[mid:] length = len(left) mid = length // 2 if length > 1: left = left[:mid] right = left[mid:] mergeSort(left) mergeSort(right) merge(left, left, right) length = len(right) mid = length // 2 if length > 1: left = right[:mid] right = right[mid:] mergeSort(left) mergeSort(right) merge(right, left, right) l = 0 r = 0 i = 0 L_len = len(left) R_len = len(right) while l < L_len and r < R_len: if left[l] < right[r]: alist[i] = left[l] i += 1 l += 1 else: alist[i] = right[r] i += 1 r += 1 while l < L_len: alist[i] = left[l] i += 1 l += 1 while r < R_len: alist[i] = right[r] i += 1 r += 1 </DeepExtract>
168206
positive
def test_epoch_end(self, outputs: List[Any]) -> None: averaged_epoch_loss = sum([output['loss'] for output in outputs]) / len(outputs) self.log(f'{self.TEST_METRICS_PREFIX}_loss', averaged_epoch_loss, on_step=False, prog_bar=True, on_epoch=True) <DeepExtract> metrics = self._head.get_metrics(True) </DeepExtract> for (key, val) in metrics.items(): if key.startswith('_'): metric_name = self.TEST_METRICS_PREFIX + key else: metric_name = self.TEST_METRICS_PREFIX + '_' + key self.log(metric_name, val, on_step=False, prog_bar=not key.startswith('_'), on_epoch=True)
def test_epoch_end(self, outputs: List[Any]) -> None: averaged_epoch_loss = sum([output['loss'] for output in outputs]) / len(outputs) self.log(f'{self.TEST_METRICS_PREFIX}_loss', averaged_epoch_loss, on_step=False, prog_bar=True, on_epoch=True) metrics = self._head.get_metrics(True) for (key, val) in metrics.items(): if key.startswith('_'): metric_name = self.TEST_METRICS_PREFIX + key else: metric_name = self.TEST_METRICS_PREFIX + '_' + key self.log(metric_name, val, on_step=False, prog_bar=not key.startswith('_'), on_epoch=True)
biome-text
positive
@cache_page(1800) def by_arch(request): <DeepExtract> qs = Package.objects.select_related().values('arch__name', 'repo__name').annotate(count=Count('pk'), csize=Sum('compressed_size'), isize=Sum('installed_size'), flagged=Count('flag_date')).order_by() arches = Arch.objects.values_list('name', flat=True) repos = Repo.objects.values_list('name', flat=True) def build_map(name, arch, repo): key = '%s:%s' % (repo or '', arch or '') data = {'key': key, 'name': name, 'arch': arch, 'repo': repo, 'data': []} arch_groups = {a: build_map(a, a, None) for a in arches} repo_groups = {r: build_map(r, None, r) for r in repos} for row in qs: arch = row['arch__name'] repo = row['repo__name'] values = {'arch': arch, 'repo': repo, 'name': '%s (%s)' % (repo, arch), 'key': '%s:%s' % (repo, arch), 'csize': row['csize'], 'isize': row['isize'], 'count': row['count'], 'flagged': row['flagged']} arch_groups[arch]['data'].append(values) repo_groups[repo]['data'].append(values) data = {'by_arch': {'name': 'Architectures', 'data': list(arch_groups.values())}, 'by_repo': {'name': 'Repositories', 'data': list(repo_groups.values())}} data = data </DeepExtract> to_json = json.dumps(data['by_arch'], ensure_ascii=False) return HttpResponse(to_json, content_type='application/json')
@cache_page(1800) def by_arch(request): qs = Package.objects.select_related().values('arch__name', 'repo__name').annotate(count=Count('pk'), csize=Sum('compressed_size'), isize=Sum('installed_size'), flagged=Count('flag_date')).order_by() arches = Arch.objects.values_list('name', flat=True) repos = Repo.objects.values_list('name', flat=True) def build_map(name, arch, repo): key = '%s:%s' % (repo or '', arch or '') data = {'key': key, 'name': name, 'arch': arch, 'repo': repo, 'data': []} arch_groups = {a: build_map(a, a, None) for a in arches} repo_groups = {r: build_map(r, None, r) for r in repos} for row in qs: arch = row['arch__name'] repo = row['repo__name'] values = {'arch': arch, 'repo': repo, 'name': '%s (%s)' % (repo, arch), 'key': '%s:%s' % (repo, arch), 'csize': row['csize'], 'isize': row['isize'], 'count': row['count'], 'flagged': row['flagged']} arch_groups[arch]['data'].append(values) repo_groups[repo]['data'].append(values) data = {'by_arch': {'name': 'Architectures', 'data': list(arch_groups.values())}, 'by_repo': {'name': 'Repositories', 'data': list(repo_groups.values())}} data = data to_json = json.dumps(data['by_arch'], ensure_ascii=False) return HttpResponse(to_json, content_type='application/json')
archweb
positive
def enum_host_info(self): <DeepExtract> try: ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % self.host) resp = ldapConnection.search(scope=ldapasn1_impacket.Scope('baseObject'), attributes=['defaultNamingContext', 'dnsHostName'], sizeLimit=0) for item in resp: if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True: continue target = None targetDomain = None baseDN = None try: for attribute in item['attributes']: if str(attribute['type']) == 'defaultNamingContext': baseDN = str(attribute['vals'][0]) targetDomain = sub(',DC=', '.', baseDN[baseDN.lower().find('dc='):], flags=I)[3:] if str(attribute['type']) == 'dnsHostName': target = str(attribute['vals'][0]) except Exception as e: logging.debug('Exception:', exc_info=True) logging.debug('Skipping item, cannot process due to error %s' % str(e)) except OSError as e: (self.target, self.targetDomain, self.baseDN) = [None, None, None] (self.target, self.targetDomain, self.baseDN) = [target, targetDomain, baseDN] </DeepExtract> self.hostname = self.target self.domain = self.targetDomain if self.args.no_smb: self.domain = self.args.domain else: self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0] try: self.conn.login('', '') except Exception as e: if 'STATUS_NOT_SUPPORTED' in str(e): self.no_ntlm = True pass if not self.no_ntlm: self.domain = self.conn.getServerDNSDomainName() self.hostname = self.conn.getServerName() self.server_os = self.conn.getServerOS() self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning'] <DeepExtract> try: stringBinding = 'ncacn_ip_tcp:{}[135]'.format(self.host) transport = DCERPCTransportFactory(stringBinding) transport.set_connect_timeout(5) dce = transport.get_dce_rpc() if self.args.kerberos: dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE) dce.connect() try: dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')) except (DCERPCException, e): if str(e).find('syntaxes_not_supported') >= 0: dce.disconnect() self.os_arch = 32 else: dce.disconnect() self.os_arch = 64 except Exception as e: logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e))) self.os_arch = 0 </DeepExtract> self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime('%Y-%m-%d_%H%M%S'))) self.output_filename = self.output_filename.replace(':', '-') if not self.domain: self.domain = self.hostname try: "plaintext_login\n DC's seem to want us to logoff first, windows workstations sometimes reset the connection\n (go home Windows, you're drunk)\n " self.conn.logoff() except: pass if self.args.domain: self.domain = self.args.domain if self.args.local_auth: self.domain = self.hostname <DeepExtract> if not self.args.no_smb: if self.create_smbv1_conn(): return True elif self.create_smbv3_conn(): return True return False else: return True </DeepExtract>
def enum_host_info(self): try: ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % self.host) resp = ldapConnection.search(scope=ldapasn1_impacket.Scope('baseObject'), attributes=['defaultNamingContext', 'dnsHostName'], sizeLimit=0) for item in resp: if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True: continue target = None targetDomain = None baseDN = None try: for attribute in item['attributes']: if str(attribute['type']) == 'defaultNamingContext': baseDN = str(attribute['vals'][0]) targetDomain = sub(',DC=', '.', baseDN[baseDN.lower().find('dc='):], flags=I)[3:] if str(attribute['type']) == 'dnsHostName': target = str(attribute['vals'][0]) except Exception as e: logging.debug('Exception:', exc_info=True) logging.debug('Skipping item, cannot process due to error %s' % str(e)) except OSError as e: (self.target, self.targetDomain, self.baseDN) = [None, None, None] (self.target, self.targetDomain, self.baseDN) = [target, targetDomain, baseDN] self.hostname = self.target self.domain = self.targetDomain if self.args.no_smb: self.domain = self.args.domain else: self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0] try: self.conn.login('', '') except Exception as e: if 'STATUS_NOT_SUPPORTED' in str(e): self.no_ntlm = True pass if not self.no_ntlm: self.domain = self.conn.getServerDNSDomainName() self.hostname = self.conn.getServerName() self.server_os = self.conn.getServerOS() self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning'] try: stringBinding = 'ncacn_ip_tcp:{}[135]'.format(self.host) transport = DCERPCTransportFactory(stringBinding) transport.set_connect_timeout(5) dce = transport.get_dce_rpc() if self.args.kerberos: dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE) dce.connect() try: dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')) except (DCERPCException, e): if str(e).find('syntaxes_not_supported') >= 0: dce.disconnect() self.os_arch = 32 else: dce.disconnect() self.os_arch = 64 except Exception as e: logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e))) self.os_arch = 0 self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime('%Y-%m-%d_%H%M%S'))) self.output_filename = self.output_filename.replace(':', '-') if not self.domain: self.domain = self.hostname try: "plaintext_login\n DC's seem to want us to logoff first, windows workstations sometimes reset the connection\n (go home Windows, you're drunk)\n " self.conn.logoff() except: pass if self.args.domain: self.domain = self.args.domain if self.args.local_auth: self.domain = self.hostname if not self.args.no_smb: if self.create_smbv1_conn(): return True elif self.create_smbv3_conn(): return True return False else: return True </DeepExtract>
CrackMapExec
positive
def run(self): self.buffer += 'digraph G {' self.buffer += DOT_STYLE if isinstance(self.g, DiGraph): for edge in self.g.edges: <DeepExtract> labels = '' if edge.kind is not None: data = '' if edge.data is None else str(edge.data) labels = '[label="%s - %s"]' % (edge.kind, data) nid1 = self.get_node_id(edge.source) nid2 = self.get_node_id(edge.dest) self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels) </DeepExtract> elif isinstance(self.g, Tree): root = self.g.root worklist = [root] while worklist: current = worklist.pop(0) if current.has_children(): num_children = current.num_children() i = 0 while i < num_children: child = current.children[i] if child is None: i += 1 continue <DeepExtract> nid1 = self.get_node_id(current) nid2 = self.get_node_id(child) self.buffer += '%s -> %s;\n' % (nid1, nid2) </DeepExtract> worklist.insert(0, child) i += 1 else: <DeepExtract> if current not in self.node_ids: self.node_ids[current] = 'node_%d' % current.gid self.add_node(current, self.node_ids[current]) nid = self.node_ids[current] </DeepExtract> self.buffer += '}\n'
def run(self): self.buffer += 'digraph G {' self.buffer += DOT_STYLE if isinstance(self.g, DiGraph): for edge in self.g.edges: labels = '' if edge.kind is not None: data = '' if edge.data is None else str(edge.data) labels = '[label="%s - %s"]' % (edge.kind, data) nid1 = self.get_node_id(edge.source) nid2 = self.get_node_id(edge.dest) self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels) elif isinstance(self.g, Tree): root = self.g.root worklist = [root] while worklist: current = worklist.pop(0) if current.has_children(): num_children = current.num_children() i = 0 while i < num_children: child = current.children[i] if child is None: i += 1 continue nid1 = self.get_node_id(current) nid2 = self.get_node_id(child) self.buffer += '%s -> %s;\n' % (nid1, nid2) worklist.insert(0, child) i += 1 else: if current not in self.node_ids: self.node_ids[current] = 'node_%d' % current.gid self.add_node(current, self.node_ids[current]) nid = self.node_ids[current] self.buffer += '}\n'
equip
positive
@force_fp32(apply_to='cls_score') def _merge_score(self, cls_score): """ Do softmax in each bin. Decay the score of normal classes with the score of fg. From v1. """ num_proposals = cls_score.shape[0] <DeepExtract> new_preds = [] num_bins = self.pred_slice.shape[0] for i in range(num_bins): start = self.pred_slice[i, 0] length = self.pred_slice[i, 1] sliced_pred = cls_score.narrow(1, start, length) new_preds.append(sliced_pred) new_preds = new_preds </DeepExtract> new_scores = [F.softmax(pred, dim=1) for pred in new_preds] bg_score = new_scores[0] fg_score = new_scores[1:] fg_merge = torch.zeros((num_proposals, self.num_classes)).cuda() merge = torch.zeros((num_proposals, self.num_classes)).cuda() for (i, split) in enumerate(self.fg_splits): fg_merge[:, split] = fg_score[i][:, 1:] weight = bg_score.narrow(1, 1, 1) fg_merge = weight * fg_merge merge[:, 0] = bg_score[:, 0] merge[:, 1:] = fg_merge[:, 1:] return merge
@force_fp32(apply_to='cls_score') def _merge_score(self, cls_score): """ Do softmax in each bin. Decay the score of normal classes with the score of fg. From v1. """ num_proposals = cls_score.shape[0] new_preds = [] num_bins = self.pred_slice.shape[0] for i in range(num_bins): start = self.pred_slice[i, 0] length = self.pred_slice[i, 1] sliced_pred = cls_score.narrow(1, start, length) new_preds.append(sliced_pred) new_preds = new_preds new_scores = [F.softmax(pred, dim=1) for pred in new_preds] bg_score = new_scores[0] fg_score = new_scores[1:] fg_merge = torch.zeros((num_proposals, self.num_classes)).cuda() merge = torch.zeros((num_proposals, self.num_classes)).cuda() for (i, split) in enumerate(self.fg_splits): fg_merge[:, split] = fg_score[i][:, 1:] weight = bg_score.narrow(1, 1, 1) fg_merge = weight * fg_merge merge[:, 0] = bg_score[:, 0] merge[:, 1:] = fg_merge[:, 1:] return merge
BalancedGroupSoftmax
positive
def __init__(self, kvs, delete_on_exit=True): <DeepExtract> (fd, fname) = tempfile.mkstemp('.mat', prefix='ao_', dir=dir) os.close(fd) if contents is not None: make_file(fname, contents) self.fname = os.path.abspath(fname) </DeepExtract> self.delete_on_exit = delete_on_exit scipy.io.savemat(self.fname, kvs)
def __init__(self, kvs, delete_on_exit=True): (fd, fname) = tempfile.mkstemp('.mat', prefix='ao_', dir=dir) os.close(fd) if contents is not None: make_file(fname, contents) self.fname = os.path.abspath(fname) self.delete_on_exit = delete_on_exit scipy.io.savemat(self.fname, kvs)
avobjects
positive
def train_step(self, data): """One training step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of sentences 'Y' {LongTensor [batch_size, max_sent_len]} -- label ids of corresponding tokens Returns: dict of data -- returned keys and values 'loss' {FloatTensor []} -- loss to backword dict of statistics -- returned keys and values 'loss' {float} -- batch loss """ (X, Y) = (data['X'], data['Y']) X_floor = data['X_floor'] Y_in = Y[:, :-1].contiguous() Y_out = Y[:, 1:].contiguous() batch_size = X.size(0) max_y_len = Y_out.size(1) <DeepExtract> (batch_size, history_len, max_sent_len) = X.size() input_lens = (X != self.pad_token_id).sum(-1) dial_lens = (input_lens > 0).long().sum(1) flat_inputs = X.view(batch_size * history_len, max_sent_len) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) tgt_floors = [] tgt_word_encodings = [] for (dial_idx, dial_len) in enumerate(dial_lens): tgt_floors.append(X_floor[dial_idx, dial_len - 1]) tgt_word_encodings.append(word_encodings[dial_idx, dial_len - 1, :, :]) tgt_floors = torch.stack(tgt_floors, 0) tgt_word_encodings = torch.stack(tgt_word_encodings, 0) src_floors = X_floor.view(-1) tgt_floors = tgt_floors.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) = (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) </DeepExtract> if self.attention_type == 'word': attn_keys = word_encodings.view(batch_size, -1, word_encodings.size(-1)) attn_mask = self._get_attn_mask(X).view(batch_size, -1) elif self.attention_type == 'sent': attn_keys = sent_encodings.view(batch_size, -1, sent_encodings.size(-1)) attn_mask = (X != self.pad_token_id).sum(-1) > 0 decoder_ret_dict = self._decode(dec_inputs=Y_in, word_encodings=tgt_word_encodings, sent_encodings=dial_encodings, attn_ctx=attn_keys, attn_mask=attn_mask) loss = 0 logits = decoder_ret_dict['logits'] label_losses = F.cross_entropy(logits.view(-1, self.label_vocab_size), Y_out.view(-1), ignore_index=self.pad_label_id, reduction='none').view(batch_size, max_y_len) sent_loss = label_losses.sum(1).mean(0) loss += sent_loss ret_data = {'loss': loss} ret_stat = {'loss': loss.item()} return (ret_data, ret_stat)
def train_step(self, data): """One training step Arguments: data {dict of data} -- required keys and values: 'X' {LongTensor [batch_size, history_len, max_x_sent_len]} -- token ids of sentences 'X_floor' {LongTensor [batch_size, history_len]} -- floors of sentences 'Y' {LongTensor [batch_size, max_sent_len]} -- label ids of corresponding tokens Returns: dict of data -- returned keys and values 'loss' {FloatTensor []} -- loss to backword dict of statistics -- returned keys and values 'loss' {float} -- batch loss """ (X, Y) = (data['X'], data['Y']) X_floor = data['X_floor'] Y_in = Y[:, :-1].contiguous() Y_out = Y[:, 1:].contiguous() batch_size = X.size(0) max_y_len = Y_out.size(1) (batch_size, history_len, max_sent_len) = X.size() input_lens = (X != self.pad_token_id).sum(-1) dial_lens = (input_lens > 0).long().sum(1) flat_inputs = X.view(batch_size * history_len, max_sent_len) flat_input_lens = input_lens.view(batch_size * history_len) (word_encodings, _, sent_encodings) = self.sent_encoder(flat_inputs, flat_input_lens) word_encodings = word_encodings.view(batch_size, history_len, max_sent_len, -1) sent_encodings = sent_encodings.view(batch_size, history_len, -1) tgt_floors = [] tgt_word_encodings = [] for (dial_idx, dial_len) in enumerate(dial_lens): tgt_floors.append(X_floor[dial_idx, dial_len - 1]) tgt_word_encodings.append(word_encodings[dial_idx, dial_len - 1, :, :]) tgt_floors = torch.stack(tgt_floors, 0) tgt_word_encodings = torch.stack(tgt_word_encodings, 0) src_floors = X_floor.view(-1) tgt_floors = tgt_floors.unsqueeze(1).repeat(1, history_len).view(-1) sent_encodings = sent_encodings.view(batch_size * history_len, -1) sent_encodings = self.floor_encoder(sent_encodings, src_floors=src_floors, tgt_floors=tgt_floors) sent_encodings = sent_encodings.view(batch_size, history_len, -1) (_, _, dial_encodings) = self.dial_encoder(sent_encodings, dial_lens) (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) = (word_encodings, sent_encodings, dial_encodings, tgt_word_encodings) if self.attention_type == 'word': attn_keys = word_encodings.view(batch_size, -1, word_encodings.size(-1)) attn_mask = self._get_attn_mask(X).view(batch_size, -1) elif self.attention_type == 'sent': attn_keys = sent_encodings.view(batch_size, -1, sent_encodings.size(-1)) attn_mask = (X != self.pad_token_id).sum(-1) > 0 decoder_ret_dict = self._decode(dec_inputs=Y_in, word_encodings=tgt_word_encodings, sent_encodings=dial_encodings, attn_ctx=attn_keys, attn_mask=attn_mask) loss = 0 logits = decoder_ret_dict['logits'] label_losses = F.cross_entropy(logits.view(-1, self.label_vocab_size), Y_out.view(-1), ignore_index=self.pad_label_id, reduction='none').view(batch_size, max_y_len) sent_loss = label_losses.sum(1).mean(0) loss += sent_loss ret_data = {'loss': loss} ret_stat = {'loss': loss.item()} return (ret_data, ret_stat)
dialog-processing
positive
def metadata_action(args: argparse.Namespace) -> int: try: r = acd_client.get_metadata(args.node, args.assets) <DeepExtract> print(json.dumps(r, indent=4, sort_keys=True)) </DeepExtract> except RequestError as e: print(e) return INVALID_ARG_RETVAL
def metadata_action(args: argparse.Namespace) -> int: try: r = acd_client.get_metadata(args.node, args.assets) print(json.dumps(r, indent=4, sort_keys=True)) except RequestError as e: print(e) return INVALID_ARG_RETVAL
acd_cli
positive
def scalar_jacfunc(vs, obj, obj_scalar, free_variables): if not hasattr(scalar_jacfunc, 'vs'): scalar_jacfunc.vs = vs * 0 + 1e+16 if np.max(np.abs(vs - scalar_jacfunc.vs)) == 0: return scalar_jacfunc.J <DeepExtract> cur = 0 changed = False for (idx, freevar) in enumerate(free_variables): sz = freevar.r.size newvals = vs[cur:cur + sz].copy().reshape(free_variables[idx].shape) if np.max(np.abs(newvals - free_variables[idx]).ravel()) > 0: free_variables[idx][:] = newvals changed = True cur += sz methods_without_callback = ('anneal', 'powell', 'cobyla', 'slsqp') if callback is not None and changed and (method.lower() in methods_without_callback): callback(None) return changed </DeepExtract> if True: result = np.concatenate([np.array(obj_scalar.lop(wrt, np.array([[1]]))).ravel() for wrt in free_variables]) else: jacs = [obj_scalar.dr_wrt(wrt) for wrt in free_variables] for (idx, jac) in enumerate(jacs): if sp.issparse(jac): jacs[idx] = jacs[idx].todense() result = np.concatenate([jac.ravel() for jac in jacs]) scalar_jacfunc.J = result scalar_jacfunc.vs = vs return result.ravel()
def scalar_jacfunc(vs, obj, obj_scalar, free_variables): if not hasattr(scalar_jacfunc, 'vs'): scalar_jacfunc.vs = vs * 0 + 1e+16 if np.max(np.abs(vs - scalar_jacfunc.vs)) == 0: return scalar_jacfunc.J cur = 0 changed = False for (idx, freevar) in enumerate(free_variables): sz = freevar.r.size newvals = vs[cur:cur + sz].copy().reshape(free_variables[idx].shape) if np.max(np.abs(newvals - free_variables[idx]).ravel()) > 0: free_variables[idx][:] = newvals changed = True cur += sz methods_without_callback = ('anneal', 'powell', 'cobyla', 'slsqp') if callback is not None and changed and (method.lower() in methods_without_callback): callback(None) return changed if True: result = np.concatenate([np.array(obj_scalar.lop(wrt, np.array([[1]]))).ravel() for wrt in free_variables]) else: jacs = [obj_scalar.dr_wrt(wrt) for wrt in free_variables] for (idx, jac) in enumerate(jacs): if sp.issparse(jac): jacs[idx] = jacs[idx].todense() result = np.concatenate([jac.ravel() for jac in jacs]) scalar_jacfunc.J = result scalar_jacfunc.vs = vs return result.ravel()
chumpy
positive
def dispatch_admin_message(self, msg): """Dispatches a message originating from an admin to all handlers.""" if msg.command == 'PRIVMSG': <DeepExtract> pass </DeepExtract> if self.is_command(msg): <DeepExtract> cmd_name = msg.params[-1].split(' ')[0] cmd_name = cmd_name.strip(self.get_command_prefix()) cmd_name = cmd_name </DeepExtract> <DeepExtract> handler_name = self.admin_handler_prefix + cmd_name func = getattr(self, handler_name, None) </DeepExtract> if func is not None: func(msg)
def dispatch_admin_message(self, msg): """Dispatches a message originating from an admin to all handlers.""" if msg.command == 'PRIVMSG': pass if self.is_command(msg): cmd_name = msg.params[-1].split(' ')[0] cmd_name = cmd_name.strip(self.get_command_prefix()) cmd_name = cmd_name handler_name = self.admin_handler_prefix + cmd_name func = getattr(self, handler_name, None) if func is not None: func(msg)
botnet
positive
def _init_modules(self): assert cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5] assert cfg.RESNETS.FREEZE_AT <= self.convX for i in range(1, cfg.RESNETS.FREEZE_AT + 1): <DeepExtract> for p in getattr(self, 'res%d' % i).parameters(): p.requires_grad = False </DeepExtract> self.apply(lambda m: freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)
def _init_modules(self): assert cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5] assert cfg.RESNETS.FREEZE_AT <= self.convX for i in range(1, cfg.RESNETS.FREEZE_AT + 1): for p in getattr(self, 'res%d' % i).parameters(): p.requires_grad = False self.apply(lambda m: freeze_params(m) if isinstance(m, mynn.AffineChannel2d) else None)
DIoU-pytorch-detectron
positive
def forward_train(self): <DeepExtract> self.d0 = self.net.forward(self.var_ref, self.var_p0, retPerLayer=retPerLayer) </DeepExtract> <DeepExtract> self.d1 = self.net.forward(self.var_ref, self.var_p1, retPerLayer=retPerLayer) </DeepExtract> <DeepExtract> d1_lt_d0 = (self.d1 < self.d0).cpu().data.numpy().flatten() judge_per = self.input_judge.cpu().numpy().flatten() self.acc_r = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) </DeepExtract> self.var_judge = Variable(1.0 * self.input_judge).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge * 2.0 - 1.0) return self.loss_total
def forward_train(self): self.d0 = self.net.forward(self.var_ref, self.var_p0, retPerLayer=retPerLayer) self.d1 = self.net.forward(self.var_ref, self.var_p1, retPerLayer=retPerLayer) d1_lt_d0 = (self.d1 < self.d0).cpu().data.numpy().flatten() judge_per = self.input_judge.cpu().numpy().flatten() self.acc_r = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) self.var_judge = Variable(1.0 * self.input_judge).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge * 2.0 - 1.0) return self.loss_total
DASR
positive
def load_by_order(self, path): hdf5_dict = read_hdf5(path) assigned_params = 0 kernel_idx = 0 sigma_idx = 0 mu_idx = 0 gamma_idx = 0 beta_idx = 0 for (k, v) in self.state.model.named_parameters(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'conv.weight' in k: order_key = 'kernel{}'.format(kernel_idx) kernel_idx += 1 elif 'bn.weight' in k: order_key = 'gamma{}'.format(gamma_idx) gamma_idx += 1 elif 'bn.bias' in k: order_key = 'beta{}'.format(beta_idx) beta_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: <DeepExtract> v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) </DeepExtract> assigned_params += 1 for (k, v) in self.state.model.named_buffers(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'bn.running_mean' in k: order_key = 'mu{}'.format(mu_idx) mu_idx += 1 elif 'bn.running_var' in k: order_key = 'sigma{}'.format(sigma_idx) sigma_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: <DeepExtract> v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) </DeepExtract> assigned_params += 1 msg = 'Assigned {} params '.format(assigned_params) if path is not None: msg += ' from hdf5: {}'.format(path) <DeepExtract> if self.local_rank == 0: print(msg) </DeepExtract>
def load_by_order(self, path): hdf5_dict = read_hdf5(path) assigned_params = 0 kernel_idx = 0 sigma_idx = 0 mu_idx = 0 gamma_idx = 0 beta_idx = 0 for (k, v) in self.state.model.named_parameters(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'conv.weight' in k: order_key = 'kernel{}'.format(kernel_idx) kernel_idx += 1 elif 'bn.weight' in k: order_key = 'gamma{}'.format(gamma_idx) gamma_idx += 1 elif 'bn.bias' in k: order_key = 'beta{}'.format(beta_idx) beta_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) assigned_params += 1 for (k, v) in self.state.model.named_buffers(): if k in hdf5_dict: value = hdf5_dict[k] else: if 'bn.running_mean' in k: order_key = 'mu{}'.format(mu_idx) mu_idx += 1 elif 'bn.running_var' in k: order_key = 'sigma{}'.format(sigma_idx) sigma_idx += 1 else: order_key = None value = None if order_key is None else hdf5_dict[order_key] if value is not None: v.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor) assigned_params += 1 msg = 'Assigned {} params '.format(assigned_params) if path is not None: msg += ' from hdf5: {}'.format(path) if self.local_rank == 0: print(msg) </DeepExtract>
AOFP
positive
def forward(self, output, mask, ind, rotbin, rotres): pred = _tranpose_and_gather_feat(output, ind) <DeepExtract> pred = pred.view(-1, 8) rotbin = rotbin.view(-1, 2) rotres = rotres.view(-1, 2) mask = mask.view(-1, 1) loss_bin1 = compute_bin_loss(pred[:, 0:2], rotbin[:, 0], mask) loss_bin2 = compute_bin_loss(pred[:, 4:6], rotbin[:, 1], mask) loss_res = torch.zeros_like(loss_bin1) if rotbin[:, 0].nonzero().shape[0] > 0: idx1 = rotbin[:, 0].nonzero()[:, 0] valid_output1 = torch.index_select(pred, 0, idx1.long()) valid_target_res1 = torch.index_select(rotres, 0, idx1.long()) loss_sin1 = compute_res_loss(valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) loss_cos1 = compute_res_loss(valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) loss_res += loss_sin1 + loss_cos1 if rotbin[:, 1].nonzero().shape[0] > 0: idx2 = rotbin[:, 1].nonzero()[:, 0] valid_output2 = torch.index_select(pred, 0, idx2.long()) valid_target_res2 = torch.index_select(rotres, 0, idx2.long()) loss_sin2 = compute_res_loss(valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) loss_cos2 = compute_res_loss(valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) loss_res += loss_sin2 + loss_cos2 loss = loss_bin1 + loss_bin2 + loss_res </DeepExtract> return loss
def forward(self, output, mask, ind, rotbin, rotres): pred = _tranpose_and_gather_feat(output, ind) pred = pred.view(-1, 8) rotbin = rotbin.view(-1, 2) rotres = rotres.view(-1, 2) mask = mask.view(-1, 1) loss_bin1 = compute_bin_loss(pred[:, 0:2], rotbin[:, 0], mask) loss_bin2 = compute_bin_loss(pred[:, 4:6], rotbin[:, 1], mask) loss_res = torch.zeros_like(loss_bin1) if rotbin[:, 0].nonzero().shape[0] > 0: idx1 = rotbin[:, 0].nonzero()[:, 0] valid_output1 = torch.index_select(pred, 0, idx1.long()) valid_target_res1 = torch.index_select(rotres, 0, idx1.long()) loss_sin1 = compute_res_loss(valid_output1[:, 2], torch.sin(valid_target_res1[:, 0])) loss_cos1 = compute_res_loss(valid_output1[:, 3], torch.cos(valid_target_res1[:, 0])) loss_res += loss_sin1 + loss_cos1 if rotbin[:, 1].nonzero().shape[0] > 0: idx2 = rotbin[:, 1].nonzero()[:, 0] valid_output2 = torch.index_select(pred, 0, idx2.long()) valid_target_res2 = torch.index_select(rotres, 0, idx2.long()) loss_sin2 = compute_res_loss(valid_output2[:, 6], torch.sin(valid_target_res2[:, 1])) loss_cos2 = compute_res_loss(valid_output2[:, 7], torch.cos(valid_target_res2[:, 1])) loss_res += loss_sin2 + loss_cos2 loss = loss_bin1 + loss_bin2 + loss_res return loss
centerNet-deep-sort
positive
def test_clean(self): <DeepExtract> self.project.item.allow_overlapping = False self.project.item.save() </DeepExtract> self.spans.clean(self.project.item) self.assertEqual(len(self.spans), 2)
def test_clean(self): self.project.item.allow_overlapping = False self.project.item.save() self.spans.clean(self.project.item) self.assertEqual(len(self.spans), 2)
doccano
positive
def put(self, put_data, resource=None, id=None): url = '%s://%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path) if resource is not None: url = '%s/%s' % (url, resource) if id is not None: url = '%s/%s' % (url, id) data = self._module.jsonify(put_data) (r, info) = fetch_url(self._module, url=url, headers=self._headers, data=data, method='PUT') result = {} <DeepExtract> self.r = r self.info = info if r is not None: result['http_response_body'] = codecs.decode(r.read(), 'utf-8') elif 'body' in info: result['http_response_body'] = codecs.decode(info['body'], 'utf-8') del info['body'] else: result['http_response_body'] = '' result['http_response_data'] = info result['nitro_errorcode'] = None result['nitro_message'] = None result['nitro_severity'] = None if result['http_response_body'] != '': try: data = self._module.from_json(result['http_response_body']) del result['http_response_body'] except ValueError: data = {} result['data'] = data result['nitro_errorcode'] = data.get('errorcode') result['nitro_message'] = data.get('message') result['nitro_severity'] = data.get('severity') </DeepExtract> return result
def put(self, put_data, resource=None, id=None): url = '%s://%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path) if resource is not None: url = '%s/%s' % (url, resource) if id is not None: url = '%s/%s' % (url, id) data = self._module.jsonify(put_data) (r, info) = fetch_url(self._module, url=url, headers=self._headers, data=data, method='PUT') result = {} self.r = r self.info = info if r is not None: result['http_response_body'] = codecs.decode(r.read(), 'utf-8') elif 'body' in info: result['http_response_body'] = codecs.decode(info['body'], 'utf-8') del info['body'] else: result['http_response_body'] = '' result['http_response_data'] = info result['nitro_errorcode'] = None result['nitro_message'] = None result['nitro_severity'] = None if result['http_response_body'] != '': try: data = self._module.from_json(result['http_response_body']) del result['http_response_body'] except ValueError: data = {} result['data'] = data result['nitro_errorcode'] = data.get('errorcode') result['nitro_message'] = data.get('message') result['nitro_severity'] = data.get('severity') return result
citrix-adc-ansible-modules
positive
def forward(self, x): <DeepExtract> kernel_size_effective = self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x_pad = padded_inputs </DeepExtract> if self.use_res_connect: x = x + self.conv(x_pad) else: x = self.conv(x_pad) return x
def forward(self, x): kernel_size_effective = self.kernel_size + (self.kernel_size - 1) * (self.dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) x_pad = padded_inputs if self.use_res_connect: x = x + self.conv(x_pad) else: x = self.conv(x_pad) return x
CVPR2020_MANet
positive
def accuracy(predict, label, pre_pro): predict = np.array(predict) label = np.array(label) if len(predict) == 0: return None if pre_pro == 'sm': <DeepExtract> orig_shape = predict.shape if len(predict.shape) > 1: exp_minmax = lambda x: np.exp(predict - np.max(predict)) denom = lambda x: 1.0 / np.sum(predict) predict = np.apply_along_axis(exp_minmax, 1, predict) denominator = np.apply_along_axis(denom, 1, predict) if len(denominator.shape) == 1: denominator = denominator.reshape((denominator.shape[0], 1)) predict = predict * denominator else: x_max = np.max(predict) predict = predict - x_max numerator = np.exp(predict) denominator = 1.0 / np.sum(numerator) predict = numerator.dot(denominator) assert predict.shape == orig_shape predict = predict </DeepExtract> if pre_pro == 'Lsm': predict = np.power(math.e, predict) total = len(predict) true = 0 for i in range(total): result = np.argmax(predict[i]) if result == label[i]: true += 1 return float(true) / float(total)
def accuracy(predict, label, pre_pro): predict = np.array(predict) label = np.array(label) if len(predict) == 0: return None if pre_pro == 'sm': orig_shape = predict.shape if len(predict.shape) > 1: exp_minmax = lambda x: np.exp(predict - np.max(predict)) denom = lambda x: 1.0 / np.sum(predict) predict = np.apply_along_axis(exp_minmax, 1, predict) denominator = np.apply_along_axis(denom, 1, predict) if len(denominator.shape) == 1: denominator = denominator.reshape((denominator.shape[0], 1)) predict = predict * denominator else: x_max = np.max(predict) predict = predict - x_max numerator = np.exp(predict) denominator = 1.0 / np.sum(numerator) predict = numerator.dot(denominator) assert predict.shape == orig_shape predict = predict if pre_pro == 'Lsm': predict = np.power(math.e, predict) total = len(predict) true = 0 for i in range(total): result = np.argmax(predict[i]) if result == label[i]: true += 1 return float(true) / float(total)
Deep-RNN-Framework
positive
def test(self): benji_obj = self.benji_open() store = BenjiStore(benji_obj) addr = ('127.0.0.1', self.SERVER_PORT) read_only = False discard_changes = False self.nbd_server = NbdServer(addr, store, read_only, discard_changes) logger.info('Starting to serve NBD on %s:%s' % (addr[0], addr[1])) <DeepExtract> completed = subprocess.run(args=['sudo', 'modprobe', 'nbd'], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', errors='ignore') if check and completed.returncode != 0: self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) if success_regexp: if not re.match(success_regexp, completed.stdout, re.I | re.M | re.S): self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) </DeepExtract> self.nbd_client_thread = threading.Thread(target=self.nbd_client, daemon=True, args=(self.version_uid,)) self.nbd_client_thread.start() self.nbd_server.serve_forever() self.nbd_client_thread.join() self.assertEqual({self.version_uid[0]}, {version.uid for version in benji_obj.find_versions_with_filter()}) benji_obj.close()
def test(self): benji_obj = self.benji_open() store = BenjiStore(benji_obj) addr = ('127.0.0.1', self.SERVER_PORT) read_only = False discard_changes = False self.nbd_server = NbdServer(addr, store, read_only, discard_changes) logger.info('Starting to serve NBD on %s:%s' % (addr[0], addr[1])) completed = subprocess.run(args=['sudo', 'modprobe', 'nbd'], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', errors='ignore') if check and completed.returncode != 0: self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) if success_regexp: if not re.match(success_regexp, completed.stdout, re.I | re.M | re.S): self.fail('command {} failed: {}'.format(' '.join(['sudo', 'modprobe', 'nbd']), completed.stdout.replace('\n', '|'))) self.nbd_client_thread = threading.Thread(target=self.nbd_client, daemon=True, args=(self.version_uid,)) self.nbd_client_thread.start() self.nbd_server.serve_forever() self.nbd_client_thread.join() self.assertEqual({self.version_uid[0]}, {version.uid for version in benji_obj.find_versions_with_filter()}) benji_obj.close()
benji
positive
def one_hot(x, num_classes, *, dtype=None, axis=-1): """One-hot encodes the given indicies. Each index in the input ``x`` is encoded as a vector of zeros of length ``num_classes`` with the element at ``index`` set to one:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([0, 1, 2]), 3) Array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) Indicies outside the range [0, num_classes) will be encoded as zeros:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([-1, 3]), 3) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) Args: x: A tensor of indices. num_classes: Number of classes in the one-hot dimension. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). axis: the axis or axes along which the function should be computed. """ num_classes = jax.core.concrete_or_error(int, num_classes, 'The error arose in jax.nn.one_hot argument `num_classes`.') dtype = jax.dtypes.canonicalize_dtype(jnp.float64 if dtype is None else dtype) x = jnp.asarray(x.value if isinstance(x, Array) else x) try: <DeepExtract> axis = operator.index(axis) if not -x.ndim + 1 <= axis < x.ndim + 1: raise ValueError('axis {} is out of bounds for array of dimension {}'.format(axis, x.ndim + 1)) if axis < 0: axis = axis + x.ndim + 1 output_pos_axis = axis </DeepExtract> except TypeError: axis_size = jax.lax.psum(1, axis) if num_classes != axis_size: raise ValueError(f'Expected num_classes to match the size of axis {axis}, but {num_classes} != {axis_size}') from None axis_idx = jax.lax.axis_index(axis) return jnp.asarray(x == axis_idx, dtype=dtype) axis = operator.index(axis) lhs = jax.lax.expand_dims(x, (axis,)) rhs_shape = [1] * x.ndim rhs_shape.insert(output_pos_axis, num_classes) rhs = jax.lax.broadcast_in_dim(jnp.arange(num_classes, dtype=x.dtype), rhs_shape, (output_pos_axis,)) return jnp.asarray(lhs == rhs, dtype=dtype)
def one_hot(x, num_classes, *, dtype=None, axis=-1): """One-hot encodes the given indicies. Each index in the input ``x`` is encoded as a vector of zeros of length ``num_classes`` with the element at ``index`` set to one:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([0, 1, 2]), 3) Array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) Indicies outside the range [0, num_classes) will be encoded as zeros:: >>> import jax.numpy as jnp >>> one_hot(jnp.array([-1, 3]), 3) Array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) Args: x: A tensor of indices. num_classes: Number of classes in the one-hot dimension. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). axis: the axis or axes along which the function should be computed. """ num_classes = jax.core.concrete_or_error(int, num_classes, 'The error arose in jax.nn.one_hot argument `num_classes`.') dtype = jax.dtypes.canonicalize_dtype(jnp.float64 if dtype is None else dtype) x = jnp.asarray(x.value if isinstance(x, Array) else x) try: axis = operator.index(axis) if not -x.ndim + 1 <= axis < x.ndim + 1: raise ValueError('axis {} is out of bounds for array of dimension {}'.format(axis, x.ndim + 1)) if axis < 0: axis = axis + x.ndim + 1 output_pos_axis = axis except TypeError: axis_size = jax.lax.psum(1, axis) if num_classes != axis_size: raise ValueError(f'Expected num_classes to match the size of axis {axis}, but {num_classes} != {axis_size}') from None axis_idx = jax.lax.axis_index(axis) return jnp.asarray(x == axis_idx, dtype=dtype) axis = operator.index(axis) lhs = jax.lax.expand_dims(x, (axis,)) rhs_shape = [1] * x.ndim rhs_shape.insert(output_pos_axis, num_classes) rhs = jax.lax.broadcast_in_dim(jnp.arange(num_classes, dtype=x.dtype), rhs_shape, (output_pos_axis,)) return jnp.asarray(lhs == rhs, dtype=dtype)
BrainPy
positive
def got_ops_callback(self, ops): for (op, blockheader, block_index, txs) in ops: if op == 'add': <DeepExtract> with self._lock: self.set_last_block_index(block_index) for tx in txs: self._process_confirmed_tx(tx, blockheader, block_index) </DeepExtract> elif op == 'remove': <DeepExtract> with self._lock: self.set_last_block_index(block_index - 1) self.persistence.invalidate_block_index_for_spendables(block_index) </DeepExtract> else: raise Exception('unknown op: %s' % op)
def got_ops_callback(self, ops): for (op, blockheader, block_index, txs) in ops: if op == 'add': with self._lock: self.set_last_block_index(block_index) for tx in txs: self._process_confirmed_tx(tx, blockheader, block_index) elif op == 'remove': with self._lock: self.set_last_block_index(block_index - 1) self.persistence.invalidate_block_index_for_spendables(block_index) else: raise Exception('unknown op: %s' % op)
dashman
positive
def autojoin_cb(data, buffer, args): """Old behaviour: doesn't save empty channel list""" "In fact should also save open buffers with a /part'ed channel" "But I can't believe somebody would want that behaviour" <DeepExtract> items = {} infolist = w.infolist_get('irc_server', '', '') while w.infolist_next(infolist): items[w.infolist_string(infolist, 'name')] = '' w.infolist_free(infolist) for server in items.keys(): keys = [] channels = [] items[server] = '' infolist = w.infolist_get('irc_channel', '', server) while w.infolist_next(infolist): if w.infolist_integer(infolist, 'nicks_count') == 0: continue if w.infolist_integer(infolist, 'type') == 0: channels.append(w.infolist_string(infolist, 'name')) key = w.infolist_string(infolist, 'key') if len(key) > 0: keys.append(key) items[server] = ','.join(channels) if len(keys) > 0: items[server] += ' %s' % ','.join(keys) w.infolist_free(infolist) items = items </DeepExtract> for (server, channels) in items.iteritems(): channels = channels.rstrip(',') if not channels: continue command = '/set irc.server.%s.autojoin %s' % (server, channels) if args == '--run': w.command('', command) else: w.prnt('', command) return w.WEECHAT_RC_OK
def autojoin_cb(data, buffer, args): """Old behaviour: doesn't save empty channel list""" "In fact should also save open buffers with a /part'ed channel" "But I can't believe somebody would want that behaviour" items = {} infolist = w.infolist_get('irc_server', '', '') while w.infolist_next(infolist): items[w.infolist_string(infolist, 'name')] = '' w.infolist_free(infolist) for server in items.keys(): keys = [] channels = [] items[server] = '' infolist = w.infolist_get('irc_channel', '', server) while w.infolist_next(infolist): if w.infolist_integer(infolist, 'nicks_count') == 0: continue if w.infolist_integer(infolist, 'type') == 0: channels.append(w.infolist_string(infolist, 'name')) key = w.infolist_string(infolist, 'key') if len(key) > 0: keys.append(key) items[server] = ','.join(channels) if len(keys) > 0: items[server] += ' %s' % ','.join(keys) w.infolist_free(infolist) items = items for (server, channels) in items.iteritems(): channels = channels.rstrip(',') if not channels: continue command = '/set irc.server.%s.autojoin %s' % (server, channels) if args == '--run': w.command('', command) else: w.prnt('', command) return w.WEECHAT_RC_OK
dotfiles
positive
def simple_test(self, img, img_meta, rescale=True): """Simple test with single image.""" <DeepExtract> assert self.test_cfg.mode in ['slide', 'whole'] ori_shape = img_meta[0]['ori_shape'] assert all((_['ori_shape'] == ori_shape for _ in img_meta)) if self.test_cfg.mode == 'slide': seg_logit = self.slide_inference(img, img_meta, rescale) else: seg_logit = self.whole_inference(img, img_meta, rescale) output = F.softmax(seg_logit, dim=1) flip = img_meta[0]['flip'] if flip: flip_direction = img_meta[0]['flip_direction'] assert flip_direction in ['horizontal', 'vertical'] if flip_direction == 'horizontal': output = output.flip(dims=(3,)) elif flip_direction == 'vertical': output = output.flip(dims=(2,)) seg_logit = output </DeepExtract> seg_pred = seg_logit.argmax(dim=1) if torch.onnx.is_in_onnx_export(): seg_pred = seg_pred.unsqueeze(0) return seg_pred seg_pred = seg_pred.cpu().numpy() seg_pred = list(seg_pred) return seg_pred
def simple_test(self, img, img_meta, rescale=True): """Simple test with single image.""" assert self.test_cfg.mode in ['slide', 'whole'] ori_shape = img_meta[0]['ori_shape'] assert all((_['ori_shape'] == ori_shape for _ in img_meta)) if self.test_cfg.mode == 'slide': seg_logit = self.slide_inference(img, img_meta, rescale) else: seg_logit = self.whole_inference(img, img_meta, rescale) output = F.softmax(seg_logit, dim=1) flip = img_meta[0]['flip'] if flip: flip_direction = img_meta[0]['flip_direction'] assert flip_direction in ['horizontal', 'vertical'] if flip_direction == 'horizontal': output = output.flip(dims=(3,)) elif flip_direction == 'vertical': output = output.flip(dims=(2,)) seg_logit = output seg_pred = seg_logit.argmax(dim=1) if torch.onnx.is_in_onnx_export(): seg_pred = seg_pred.unsqueeze(0) return seg_pred seg_pred = seg_pred.cpu().numpy() seg_pred = list(seg_pred) return seg_pred
BPR
positive
def test_geo_value(self): """test whether geo values are valid for specific geo types""" <DeepExtract> rows = [CovidcastTestRow.make_default_row(geo_type='msa', geo_value=MSA[i - 1], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [1, 2, 3]] + [CovidcastTestRow.make_default_row(geo_type='fips', geo_value=FIPS[i - 4], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [4, 5, 6]] self._insert_rows(rows) rows = rows </DeepExtract> expected = [row.as_api_compatibility_row_dict() for row in rows[:3]] def fetch(geo_value): <DeepExtract> params = self.params_from_row(rows[0], endpoint='covidcast', **kwargs) Epidata.BASE_URL = BASE_URL response = Epidata.covidcast(**params) response = response </DeepExtract> return response <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=MSA[0]) r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:1]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value='11111') r = response </DeepExtract> self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[1]}') r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:2]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[2]}') r = response </DeepExtract> self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], [expected[0], expected[2]]) <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},11111') r = response </DeepExtract> self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value='') r = response </DeepExtract> self.assertEqual(r['message'], 'geo_value is empty for the requested geo_type msa!') <DeepExtract> response = self.request_based_on_row(rows[0], geo_value=MSA[3]) r = response </DeepExtract> self.assertEqual(r['message'], 'no results')
def test_geo_value(self): """test whether geo values are valid for specific geo types""" rows = [CovidcastTestRow.make_default_row(geo_type='msa', geo_value=MSA[i - 1], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [1, 2, 3]] + [CovidcastTestRow.make_default_row(geo_type='fips', geo_value=FIPS[i - 4], value=i * 1.0, stderr=i * 10.0, sample_size=i * 100.0) for i in [4, 5, 6]] self._insert_rows(rows) rows = rows expected = [row.as_api_compatibility_row_dict() for row in rows[:3]] def fetch(geo_value): params = self.params_from_row(rows[0], endpoint='covidcast', **kwargs) Epidata.BASE_URL = BASE_URL response = Epidata.covidcast(**params) response = response return response response = self.request_based_on_row(rows[0], geo_value=MSA[0]) r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:1]) response = self.request_based_on_row(rows[0], geo_value='11111') r = response self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[1]}') r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], expected[0:2]) response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},{MSA[2]}') r = response self.assertEqual(r['message'], 'success') self.assertEqual(r['epidata'], [expected[0], expected[2]]) response = self.request_based_on_row(rows[0], geo_value=f'{MSA[0]},11111') r = response self.assertEqual(r['message'], 'Invalid geo_value(s) 11111 for the requested geo_type msa') response = self.request_based_on_row(rows[0], geo_value='') r = response self.assertEqual(r['message'], 'geo_value is empty for the requested geo_type msa!') response = self.request_based_on_row(rows[0], geo_value=MSA[3]) r = response self.assertEqual(r['message'], 'no results')
delphi-epidata
positive
def build_fasttree(aln_file, out_file, clean_up=True, nthreads=1, tree_builder_args=None): """ build tree using fasttree """ log_file = out_file + '.log' <DeepExtract> exe = next(filter(shutil.which, ['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree']), default) if exe is None: print('Unable to find any of %s in PATH=%s' % (['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree'], os.environ['PATH'])) print('\nHint: You can install the missing program using conda or homebrew or apt-get.\n') raise Exception fasttree = exe </DeepExtract> extra_env = {'OMP_NUM_THREADS': str(nthreads)} call = [fasttree, tree_builder_args, shquote(aln_file), '1>', shquote(out_file), '2>', shquote(log_file)] cmd = ' '.join(call) print('Building a tree via:\n\t' + cmd + '\n\tPrice et al: FastTree 2 - Approximately Maximum-Likelihood Trees for Large Alignments.' + '\n\tPLoS ONE 5(3): e9490. https://doi.org/10.1371/journal.pone.0009490\n') try: run_shell_command(cmd, raise_errors=True, extra_env=extra_env) T = Phylo.read(out_file, 'newick') except Exception as error: print('ERROR: TREE BUILDING FAILED') print(f'ERROR: {error}') if os.path.isfile(log_file): print('Please see the log file for more details: {}'.format(log_file)) T = None return T
def build_fasttree(aln_file, out_file, clean_up=True, nthreads=1, tree_builder_args=None): """ build tree using fasttree """ log_file = out_file + '.log' exe = next(filter(shutil.which, ['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree']), default) if exe is None: print('Unable to find any of %s in PATH=%s' % (['FastTreeDblMP', 'FastTreeDbl', 'FastTreeMP', 'fasttreeMP', 'FastTree', 'fasttree'], os.environ['PATH'])) print('\nHint: You can install the missing program using conda or homebrew or apt-get.\n') raise Exception fasttree = exe extra_env = {'OMP_NUM_THREADS': str(nthreads)} call = [fasttree, tree_builder_args, shquote(aln_file), '1>', shquote(out_file), '2>', shquote(log_file)] cmd = ' '.join(call) print('Building a tree via:\n\t' + cmd + '\n\tPrice et al: FastTree 2 - Approximately Maximum-Likelihood Trees for Large Alignments.' + '\n\tPLoS ONE 5(3): e9490. https://doi.org/10.1371/journal.pone.0009490\n') try: run_shell_command(cmd, raise_errors=True, extra_env=extra_env) T = Phylo.read(out_file, 'newick') except Exception as error: print('ERROR: TREE BUILDING FAILED') print(f'ERROR: {error}') if os.path.isfile(log_file): print('Please see the log file for more details: {}'.format(log_file)) T = None return T
augur
positive
def get_query_model(name, *args, random_state=None, **kwargs): """Get an instance of the query strategy. Arguments --------- name: str Name of the query strategy. *args: Arguments for the model. **kwargs: Keyword arguments for the model. Returns ------- asreview.query.base.BaseQueryModel Initialized instance of query strategy. """ <DeepExtract> try: query_class = _model_class_from_entry_point(name, entry_name='asreview.models.query') except ValueError: raise ValueError(f"Error: query name '{name}' is not implemented.") </DeepExtract> try: return query_class(*args, random_state=random_state, **kwargs) except TypeError: return query_class(*args, **kwargs)
def get_query_model(name, *args, random_state=None, **kwargs): """Get an instance of the query strategy. Arguments --------- name: str Name of the query strategy. *args: Arguments for the model. **kwargs: Keyword arguments for the model. Returns ------- asreview.query.base.BaseQueryModel Initialized instance of query strategy. """ try: query_class = _model_class_from_entry_point(name, entry_name='asreview.models.query') except ValueError: raise ValueError(f"Error: query name '{name}' is not implemented.") try: return query_class(*args, random_state=random_state, **kwargs) except TypeError: return query_class(*args, **kwargs)
asreview
positive
def _install_tools(env, tools_conf=None): """ Install tools needed for Galaxy along with tool configuration directories needed by Galaxy. """ if not tools_conf: <DeepExtract> with open(_tools_conf_path(env)) as in_handle: full_data = yaml.safe_load(in_handle) tools_conf = full_data </DeepExtract> if _read_boolean(env, 'galaxy_install_dependencies', False): <DeepExtract> if not env.safe_exists(env.galaxy_tools_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_tools_dir) bin_dir = os.path.join(env.galaxy_tools_dir, 'bin') if not env.safe_exists(bin_dir): env.safe_sudo('mkdir -p %s' % bin_dir) _chown_galaxy(env, bin_dir) line = 'export PATH={0}:$PATH'.format(bin_dir) _add_to_profiles(line) if not env.safe_exists(env.galaxy_jars_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_jars_dir) _chown_galaxy(env, env.galaxy_jars_dir) </DeepExtract> <DeepExtract> applications = tools_conf['applications'] or {} defer_errors = env.get('galaxy_tool_defer_errors', True) exceptions = {} for (name, tool_conf) in applications.iteritems(): if not __check_conditional(tool_conf): continue try: _install_application(name, tool_conf) except BaseException as e: exceptions[name] = e if not defer_errors: break if exceptions: for (name, exception) in exceptions.iteritems(): env.logger.warn(FAILED_INSTALL_MESSAGE % name) first_exception = list(exceptions.values())[0] raise first_exception </DeepExtract> _chown_galaxy(env, env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_jars_dir) if _read_boolean(env, 'galaxy_install_r_packages', False): _install_r_packages(tools_conf)
def _install_tools(env, tools_conf=None): """ Install tools needed for Galaxy along with tool configuration directories needed by Galaxy. """ if not tools_conf: with open(_tools_conf_path(env)) as in_handle: full_data = yaml.safe_load(in_handle) tools_conf = full_data if _read_boolean(env, 'galaxy_install_dependencies', False): if not env.safe_exists(env.galaxy_tools_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_tools_dir) bin_dir = os.path.join(env.galaxy_tools_dir, 'bin') if not env.safe_exists(bin_dir): env.safe_sudo('mkdir -p %s' % bin_dir) _chown_galaxy(env, bin_dir) line = 'export PATH={0}:$PATH'.format(bin_dir) _add_to_profiles(line) if not env.safe_exists(env.galaxy_jars_dir): env.safe_sudo('mkdir -p %s' % env.galaxy_jars_dir) _chown_galaxy(env, env.galaxy_jars_dir) applications = tools_conf['applications'] or {} defer_errors = env.get('galaxy_tool_defer_errors', True) exceptions = {} for (name, tool_conf) in applications.iteritems(): if not __check_conditional(tool_conf): continue try: _install_application(name, tool_conf) except BaseException as e: exceptions[name] = e if not defer_errors: break if exceptions: for (name, exception) in exceptions.iteritems(): env.logger.warn(FAILED_INSTALL_MESSAGE % name) first_exception = list(exceptions.values())[0] raise first_exception _chown_galaxy(env, env.galaxy_tools_dir) _chown_galaxy(env, env.galaxy_jars_dir) if _read_boolean(env, 'galaxy_install_r_packages', False): _install_r_packages(tools_conf)
cloudbiolinux
positive
def gather_options(self): if not self.initialized: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) <DeepExtract> parser.add_argument('--dataroot', type=str, default='.', help='path to images (should have subfolders train, test etc)') parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size') parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD') parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG') parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | single]') parser.add_argument('--model', type=str, default='apdrawing_gan', help='chooses which model to use. [apdrawing_gan | test]') parser.add_argument('--use_local', action='store_true', help='use local part network') parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions') parser.add_argument('--lm_dir', type=str, default='dataset/landmark/ALL', help='path to facial landmarks') parser.add_argument('--bg_dir', type=str, default='dataset/mask/ALL', help='path to background masks') parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border') parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H') parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W') parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H') parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W') parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H') parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W') parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder') parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--display_winsize', type=int, default=256, help='display window size') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display') parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') self.initialized = True parser = parser </DeepExtract> (opt, _) = parser.parse_known_args() if UseTest: opt.model = 'test' model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
def gather_options(self): if not self.initialized: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dataroot', type=str, default='.', help='path to images (should have subfolders train, test etc)') parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size') parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD') parser.add_argument('--netG', type=str, default='unet_256', help='selects model to use for netG') parser.add_argument('--nnG', type=int, default=9, help='specify nblock for resnet_nblocks, ndown for unet for unet_ndown') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. [aligned | single]') parser.add_argument('--model', type=str, default='apdrawing_gan', help='chooses which model to use. [apdrawing_gan | test]') parser.add_argument('--use_local', action='store_true', help='use local part network') parser.add_argument('--comb_op', type=int, default=1, help='use min-pooling(1) or max-pooling(0) for overlapping regions') parser.add_argument('--lm_dir', type=str, default='dataset/landmark/ALL', help='path to facial landmarks') parser.add_argument('--bg_dir', type=str, default='dataset/mask/ALL', help='path to background masks') parser.add_argument('--soft_border', type=int, default=0, help='use mask with soft border') parser.add_argument('--EYE_H', type=int, default=40, help='EYE_H') parser.add_argument('--EYE_W', type=int, default=56, help='EYE_W') parser.add_argument('--NOSE_H', type=int, default=48, help='NOSE_H') parser.add_argument('--NOSE_W', type=int, default=48, help='NOSE_W') parser.add_argument('--MOUTH_H', type=int, default=40, help='MOUTH_H') parser.add_argument('--MOUTH_W', type=int, default=64, help='MOUTH_W') parser.add_argument('--which_direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') parser.add_argument('--auxiliary_root', type=str, default='auxiliary', help='auxiliary model folder') parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--display_winsize', type=int, default=256, help='display window size') parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') parser.add_argument('--display_server', type=str, default='http://localhost', help='visdom server of the web display') parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}') self.initialized = True parser = parser (opt, _) = parser.parse_known_args() if UseTest: opt.model = 'test' model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
dualFace
positive
def esd_pnms(esd, pnms_thresh): scores = [] dets = [] for ele in esd: score = ele['score'] quad = ele['ke_quad'] det = np.array([[quad[0][0], quad[0][1]], [quad[1][0], quad[1][1]], [quad[2][0], quad[2][1]], [quad[3][0], quad[3][1]]]) scores.append(score) dets.append(det) scores = np.array(scores) dets = np.array(dets) <DeepExtract> pts = [] for det in dets: pts.append([[det[i][0], det[i][1]] for i in range(len(det))]) order = scores.argsort()[::-1] areas = np.zeros(scores.shape) order = scores.argsort()[::-1] inter_areas = np.zeros((scores.shape[0], scores.shape[0])) for il in range(len(pts)): poly = Polygon(pts[il]) areas[il] = poly.area for jl in range(il, len(pts)): polyj = Polygon(pts[jl]) try: inS = poly.intersection(polyj) except: print(poly, polyj) inter_areas[il][jl] = inS.area inter_areas[jl][il] = inS.area keep = [] while order.size > 0: i = order[0] keep.append(i) ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) inds = np.where(ovr <= pnms_thresh)[0] order = order[inds + 1] keep = keep </DeepExtract> return keep
def esd_pnms(esd, pnms_thresh): scores = [] dets = [] for ele in esd: score = ele['score'] quad = ele['ke_quad'] det = np.array([[quad[0][0], quad[0][1]], [quad[1][0], quad[1][1]], [quad[2][0], quad[2][1]], [quad[3][0], quad[3][1]]]) scores.append(score) dets.append(det) scores = np.array(scores) dets = np.array(dets) pts = [] for det in dets: pts.append([[det[i][0], det[i][1]] for i in range(len(det))]) order = scores.argsort()[::-1] areas = np.zeros(scores.shape) order = scores.argsort()[::-1] inter_areas = np.zeros((scores.shape[0], scores.shape[0])) for il in range(len(pts)): poly = Polygon(pts[il]) areas[il] = poly.area for jl in range(il, len(pts)): polyj = Polygon(pts[jl]) try: inS = poly.intersection(polyj) except: print(poly, polyj) inter_areas[il][jl] = inS.area inter_areas[jl][il] = inS.area keep = [] while order.size > 0: i = order[0] keep.append(i) ovr = inter_areas[i][order[1:]] / (areas[i] + areas[order[1:]] - inter_areas[i][order[1:]]) inds = np.where(ovr <= pnms_thresh)[0] order = order[inds + 1] keep = keep return keep
Box_Discretization_Network
positive
def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ <DeepExtract> new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label cdf = new </DeepExtract> cdf.ps **= k return cdf
def Max(self, k): """Computes the CDF of the maximum of k selections from this dist. k: int returns: new Cdf """ new = copy.copy(self) new.d = copy.copy(self.d) new.label = label if label is not None else self.label cdf = new cdf.ps **= k return cdf
data-science-ipython-notebooks
positive
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): <DeepExtract> if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, Callable))) </DeepExtract> self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies
def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): if k not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % k) if isinstance(v, Callable): self.hooks[k].append(v) elif hasattr(v, '__iter__'): self.hooks[k].extend((h for h in v if isinstance(h, Callable))) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies
alexa-sky-hd
positive
def flatten_sequence(self, sequence, gold_snippets=False): if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] if gold_snippets: no_snippets_sequence = self.interaction.expand_snippets(sequence) else: <DeepExtract> if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] no_snippets_sequence = self.interaction.expand_snippets(sequence) no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) no_snippets_sequence = no_snippets_sequence </DeepExtract> no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql') return deanon_sequence
def flatten_sequence(self, sequence, gold_snippets=False): if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] if gold_snippets: no_snippets_sequence = self.interaction.expand_snippets(sequence) else: if sequence[-1] == vocab.EOS_TOK: sequence = sequence[:-1] no_snippets_sequence = self.interaction.expand_snippets(sequence) no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) no_snippets_sequence = no_snippets_sequence no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence) deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql') return deanon_sequence
editsql
positive
def get_children(parent, tag_name): if parent is None: return [] <DeepExtract> if parent is None: parent = self.root ret = parent.findall('.//' + self.ns + tag_name) </DeepExtract> if not ret: <DeepExtract> if parent is None: parent = self.root ret_list = parent.findall('.//' + self.ns + tag_name + '-REF') </DeepExtract> ret = [self.get_short_name_path(item.text) for item in ret_list] if len(ret) > 0: raise 'use follow_all_ref!' return ret
def get_children(parent, tag_name): if parent is None: return [] if parent is None: parent = self.root ret = parent.findall('.//' + self.ns + tag_name) if not ret: if parent is None: parent = self.root ret_list = parent.findall('.//' + self.ns + tag_name + '-REF') ret = [self.get_short_name_path(item.text) for item in ret_list] if len(ret) > 0: raise 'use follow_all_ref!' return ret
canmatrix
positive
@ddt.data(*CourseSamples.course_ids) def test_any_activity(self, course_id): <DeepExtract> raise NotImplementedError </DeepExtract> <DeepExtract> response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'ANY')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='ANY', count=300)) </DeepExtract> <DeepExtract> response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'any')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='any', count=300)) </DeepExtract>
@ddt.data(*CourseSamples.course_ids) def test_any_activity(self, course_id): raise NotImplementedError response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'ANY')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='ANY', count=300)) response = self.authenticated_get('/api/v0/courses/{}/recent_activity?activity_type={}'.format(course_id, 'any')) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, self.get_activity_record(course_id=course_id, activity_type='any', count=300)) </DeepExtract>
edx-analytics-data-api
positive
def regularization(self, train_targets, train_features, coef=None, featselect_featvar=False): """Generate the omgea2 and coef value's. Parameters ---------- train_targets : array Dependent data used for training. train_features : array Independent data used for training. coef : int List of indices in the feature database. """ reg_data = {'result': None} if coef is None: <DeepExtract> omega2_min = float('inf') omega2_list = [] epe_list = [] if self.W2 is None or self.Vh is None: (V, self.W2, self.Vh) = np.linalg.svd(np.dot(train_features.T, train_features), full_matrices=True) if self.cv is 'loocv': (U, W, Vh) = np.linalg.svd(train_features, full_matrices=False) (whigh, wlow) = (np.log(self.W2[0] * 2.0), np.log(self.W2[-1] * 0.5)) basesearchwidth = whigh - wlow omega2_range = [1e-06 * np.exp(wlow)] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) omega2_range.append(1000000.0 * np.exp(whigh)) for s in range(self.rsteps): if self.cv is 'bootstrap': BS_res = self._bootstrap_master(train_features, train_targets, p, omega2_range, self.Ns) (_, _, epe_list_i, _) = BS_res if self.cv is 'loocv': epe_list_i = self._LOOCV_l(train_features, train_targets, p, omega2_range, U, W) omega2_list += omega2_range epe_list += epe_list_i.tolist() epe_ind = np.argmin(epe_list) omega2_min = omega2_list[epe_ind] if s is 0 and epe_ind is 0 or epe_ind is len(omega2_list) - 1: b = omega2_min logmin_epe = np.log(omega2_min) basesearchwidth = 2 * basesearchwidth / (self.wsteps - 1) wlow = logmin_epe - basesearchwidth * 0.5 whigh = logmin_epe + basesearchwidth * 0.5 omega2_range = [] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) b = omega2_min </DeepExtract> coef = self.RR(train_features, train_targets, omega2=b, featselect_featvar=featselect_featvar)[0] reg_data['result'] = (coef, b) return reg_data
def regularization(self, train_targets, train_features, coef=None, featselect_featvar=False): """Generate the omgea2 and coef value's. Parameters ---------- train_targets : array Dependent data used for training. train_features : array Independent data used for training. coef : int List of indices in the feature database. """ reg_data = {'result': None} if coef is None: omega2_min = float('inf') omega2_list = [] epe_list = [] if self.W2 is None or self.Vh is None: (V, self.W2, self.Vh) = np.linalg.svd(np.dot(train_features.T, train_features), full_matrices=True) if self.cv is 'loocv': (U, W, Vh) = np.linalg.svd(train_features, full_matrices=False) (whigh, wlow) = (np.log(self.W2[0] * 2.0), np.log(self.W2[-1] * 0.5)) basesearchwidth = whigh - wlow omega2_range = [1e-06 * np.exp(wlow)] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) omega2_range.append(1000000.0 * np.exp(whigh)) for s in range(self.rsteps): if self.cv is 'bootstrap': BS_res = self._bootstrap_master(train_features, train_targets, p, omega2_range, self.Ns) (_, _, epe_list_i, _) = BS_res if self.cv is 'loocv': epe_list_i = self._LOOCV_l(train_features, train_targets, p, omega2_range, U, W) omega2_list += omega2_range epe_list += epe_list_i.tolist() epe_ind = np.argmin(epe_list) omega2_min = omega2_list[epe_ind] if s is 0 and epe_ind is 0 or epe_ind is len(omega2_list) - 1: b = omega2_min logmin_epe = np.log(omega2_min) basesearchwidth = 2 * basesearchwidth / (self.wsteps - 1) wlow = logmin_epe - basesearchwidth * 0.5 whigh = logmin_epe + basesearchwidth * 0.5 omega2_range = [] for pp in np.linspace(wlow, whigh, self.wsteps): omega2_range.append(np.exp(pp)) b = omega2_min coef = self.RR(train_features, train_targets, omega2=b, featselect_featvar=featselect_featvar)[0] reg_data['result'] = (coef, b) return reg_data
CatLearn
positive
def main(): parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true') parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger('maskrcnn_benchmark', output_dir, get_rank()) logger.info('Using {} GPUs'.format(num_gpus)) logger.info(args) logger.info('Collecting env info (might take some time)') logger.info('\n' + collect_env_info()) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = '\n' + cf.read() logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) <DeepExtract> model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) optimizers = make_optimizer(cfg, model) schedulers = make_lr_scheduler(cfg, optimizers) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False) arguments = {} arguments['iteration'] = 0 output_dir = cfg.OUTPUT_DIR save_to_disk = get_rank() == 0 checkpointer = DetectronCheckpointer(cfg, model, optimizers, schedulers, output_dir, save_to_disk) extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT) arguments.update(extra_checkpoint_data) data_loader = make_data_loader(cfg, is_train=True, is_distributed=args.distributed, start_iter=arguments['iteration']) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD do_train(model, data_loader, optimizers, schedulers, checkpointer, device, checkpoint_period, arguments) model = model </DeepExtract> if not args.skip_test: <DeepExtract> if args.distributed: model = model.module torch.cuda.empty_cache() iou_types = ('bbox',) if cfg.MODEL.MASK_ON: iou_types = iou_types + ('segm',) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ('keypoints',) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for (idx, dataset_name) in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=args.distributed) for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val): inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder) synchronize() </DeepExtract>
def main(): parser = argparse.ArgumentParser(description='PyTorch Object Detection Training') parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file', type=str) parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--skip-test', dest='skip_test', help='Do not test the final model', action='store_true') parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = num_gpus > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: mkdir(output_dir) logger = setup_logger('maskrcnn_benchmark', output_dir, get_rank()) logger.info('Using {} GPUs'.format(num_gpus)) logger.info(args) logger.info('Collecting env info (might take some time)') logger.info('\n' + collect_env_info()) logger.info('Loaded configuration file {}'.format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = '\n' + cf.read() logger.info(config_str) logger.info('Running with config:\n{}'.format(cfg)) model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) optimizers = make_optimizer(cfg, model) schedulers = make_lr_scheduler(cfg, optimizers) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False) arguments = {} arguments['iteration'] = 0 output_dir = cfg.OUTPUT_DIR save_to_disk = get_rank() == 0 checkpointer = DetectronCheckpointer(cfg, model, optimizers, schedulers, output_dir, save_to_disk) extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT) arguments.update(extra_checkpoint_data) data_loader = make_data_loader(cfg, is_train=True, is_distributed=args.distributed, start_iter=arguments['iteration']) checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD do_train(model, data_loader, optimizers, schedulers, checkpointer, device, checkpoint_period, arguments) model = model if not args.skip_test: if args.distributed: model = model.module torch.cuda.empty_cache() iou_types = ('bbox',) if cfg.MODEL.MASK_ON: iou_types = iou_types + ('segm',) if cfg.MODEL.KEYPOINT_ON: iou_types = iou_types + ('keypoints',) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for (idx, dataset_name) in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=args.distributed) for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val): inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder) synchronize() </DeepExtract>
AE-WTN
positive
def start_proxy_process(self): <DeepExtract> out = [_PROXY_EXE, '-address', self.address, '-tcp-address', self.tcp_address, '-api-url', self.gateway_url + '/api/v1/routes', '-log-level', self.log_level] if is_child_process: out.append('-is-child-process') if bool(self.tls_cert) != bool(self.tls_key): raise ValueError('Must set both tls_cert and tls_key') if self.tls_cert: out.extend(['-tls-cert', self.tls_cert, '-tls-key', self.tls_key]) command = out </DeepExtract> <DeepExtract> env = os.environ.copy() env['DASK_GATEWAY_PROXY_TOKEN'] = self.api_token env = env </DeepExtract> self.log.info('Starting the Dask gateway proxy...') proc = subprocess.Popen(command, env=env, stdin=subprocess.PIPE, stdout=None, stderr=None, start_new_session=True) self.proxy_process = proc self.log.info('Dask gateway proxy started') self.log.info('- %s routes listening at %s://%s', 'HTTPS' if self.tls_cert else 'HTTP', 'https' if self.tls_cert else 'http', self.address) self.log.info('- Scheduler routes listening at gateway://%s', self.tcp_address)
def start_proxy_process(self): out = [_PROXY_EXE, '-address', self.address, '-tcp-address', self.tcp_address, '-api-url', self.gateway_url + '/api/v1/routes', '-log-level', self.log_level] if is_child_process: out.append('-is-child-process') if bool(self.tls_cert) != bool(self.tls_key): raise ValueError('Must set both tls_cert and tls_key') if self.tls_cert: out.extend(['-tls-cert', self.tls_cert, '-tls-key', self.tls_key]) command = out env = os.environ.copy() env['DASK_GATEWAY_PROXY_TOKEN'] = self.api_token env = env self.log.info('Starting the Dask gateway proxy...') proc = subprocess.Popen(command, env=env, stdin=subprocess.PIPE, stdout=None, stderr=None, start_new_session=True) self.proxy_process = proc self.log.info('Dask gateway proxy started') self.log.info('- %s routes listening at %s://%s', 'HTTPS' if self.tls_cert else 'HTTP', 'https' if self.tls_cert else 'http', self.address) self.log.info('- Scheduler routes listening at gateway://%s', self.tcp_address)
dask-gateway
positive
@model.methodwrap(va=SVa, pid=SPid) def munmap(self, va, pid): <DeepExtract> if str(pid).startswith('a.'): simsym.assume(pid == False) </DeepExtract> del self.getproc(pid).va_map[va] return {'r': 0}
@model.methodwrap(va=SVa, pid=SPid) def munmap(self, va, pid): if str(pid).startswith('a.'): simsym.assume(pid == False) del self.getproc(pid).va_map[va] return {'r': 0}
commuter
positive
def testBatchGradientDescentNormalizedBacktrackF7PL0(self): epsilon = 12 attack = attacks.batch_gradient_descent.BatchGradientDescent() attack.max_iterations = 10 attack.base_lr = 100 attack.momentum = 0 attack.c = 0 attack.lr_factor = 1.5 attack.normalized = True attack.backtrack = True attack.initialization = attacks.initializations.L0UniformNormInitialization(epsilon) attack.projection = attacks.projections.SequentialProjections([attacks.projections.L0Projection(epsilon), attacks.projections.BoxProjection()]) attack.norm = attacks.norms.L0Norm() <DeepExtract> for (b, (images, labels)) in enumerate(self.adversarialloader): break images = common.torch.as_variable(images, self.cuda).permute(0, 3, 1, 2) labels = common.torch.as_variable(labels, self.cuda) success_rate = 0 for t in range(5): attacks.objectives.UntargetedF7PObjective().set(labels) (perturbations, errors) = attack.run(self.model, images, attacks.objectives.UntargetedF7PObjective()) perturbations = numpy.array([numpy.transpose(perturbations, (0, 2, 3, 1))]) success_rate += self.successRate(numpy.transpose(images.cpu().numpy(), (0, 2, 3, 1)), perturbations, labels.cpu().numpy()) success_rate /= 5 success_rate = success_rate </DeepExtract> self.assertGreaterEqual(success_rate, 0.95)
def testBatchGradientDescentNormalizedBacktrackF7PL0(self): epsilon = 12 attack = attacks.batch_gradient_descent.BatchGradientDescent() attack.max_iterations = 10 attack.base_lr = 100 attack.momentum = 0 attack.c = 0 attack.lr_factor = 1.5 attack.normalized = True attack.backtrack = True attack.initialization = attacks.initializations.L0UniformNormInitialization(epsilon) attack.projection = attacks.projections.SequentialProjections([attacks.projections.L0Projection(epsilon), attacks.projections.BoxProjection()]) attack.norm = attacks.norms.L0Norm() for (b, (images, labels)) in enumerate(self.adversarialloader): break images = common.torch.as_variable(images, self.cuda).permute(0, 3, 1, 2) labels = common.torch.as_variable(labels, self.cuda) success_rate = 0 for t in range(5): attacks.objectives.UntargetedF7PObjective().set(labels) (perturbations, errors) = attack.run(self.model, images, attacks.objectives.UntargetedF7PObjective()) perturbations = numpy.array([numpy.transpose(perturbations, (0, 2, 3, 1))]) success_rate += self.successRate(numpy.transpose(images.cpu().numpy(), (0, 2, 3, 1)), perturbations, labels.cpu().numpy()) success_rate /= 5 success_rate = success_rate self.assertGreaterEqual(success_rate, 0.95)
confidence-calibrated-adversarial-training
positive
def __init__(self, path, conf): self.filename = path self.tzinfo = conf.get('tzinfo', None) self.defaultcopywildcard = conf.get('copy_wildcard', '_[0-9]*.*') with io.open(path, 'r', encoding='utf-8', errors='replace') as fp: peak = lchop(fp.read(512), BOM_UTF8) fp.seek(0) if peak.startswith('---\n'): <DeepExtract> head = [] i = 0 while True: line = fp.readline() i += 1 if i == 1 and (not line.startswith('---')): raise AcrylamidException('no meta information in %r found' % fp.name) elif i > 1 and (not line.startswith('---')): head.append(line) elif i > 1 and line.startswith('---') or not line: break if yaml: try: (i, meta) = (i, yaml.load(''.join(head))) except yaml.YAMLError as e: raise AcrylamidException('YAMLError: %s' % str(e)) else: props = {} for (j, line) in enumerate(head): if line[0] == '#' or not line.strip(): continue try: (key, value) = [x.strip() for x in line.split(':', 1)] except ValueError: raise AcrylamidException('%s:%i ValueError: %s\n%s' % (fp.name, j, line.strip('\n'), 'Either your YAML is malformed or our naïve parser is to dumb \nto read it. Revalidate your YAML or install PyYAML parser with \n> easy_install -U pyyaml')) props[key] = distinguish(value) if 'title' not in props: raise AcrylamidException('No title given in %r' % fp.name) (i, meta) = (i, props) </DeepExtract> elif isrest(peak): <DeepExtract> import docutils from docutils.core import publish_doctree title = fp.readline().strip('\n') dash = fp.readline().strip('\n') if not title or not dash: raise AcrylamidException('No title given in %r' % fp.name) if len(dash) < len(title) or dash.count(dash[0]) < len(dash): raise AcrylamidException('title line does not match second line %r' % fp.name) i = 2 meta = [] while True: line = fp.readline() i += 1 if not line.strip() and i == 3: continue elif not line.strip(): break else: meta.append(line) document = publish_doctree(''.join(meta)) meta = dict(title=title) for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': (name_elem, body_elem) = element.children name = name_elem.astext() value = body_elem.astext() else: name = element.tagname value = element.astext() name = name.lower() if '\n\n' in value: value = value.split('\n\n') elif '\n' in value: value = value.replace('\n', ' ') meta[name] = distinguish(value.split('\n\n') if '\n\n' in value else value) (i, meta) = (i, meta) </DeepExtract> elif peak.startswith('% '): <DeepExtract> meta_pan_re = re.compile('^[ ]{0,3}%+\\s*(?P<value>.*)') meta_pan_more_re = re.compile('^\\s*(?P<value>.*)') meta_pan_authsplit = re.compile(';+\\s*') (i, j) = (0, 0) (meta, key) = ({}, None) poss_keys = ['title', 'author', 'date'] while True: line = fp.readline() i += 1 if line.strip() == '': break if j + 1 > len(poss_keys): raise AcrylamidException('%r has too many items in the Pandoc title block.' % fp.name) m1 = meta_pan_re.match(line) if m1: key = poss_keys[j] j += 1 valstrip = m1.group('value').strip() if not valstrip: continue value = distinguish(m1.group('value').strip()) if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta.setdefault(key, []).append(value) else: m2 = meta_pan_more_re.match(line) if m2 and key: value = m2.group('value').strip() if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta[key].append(value) else: break if 'title' not in meta: raise AcrylamidException('No title given in %r' % fp.name) if len(meta['title']) > 1: meta['title'] = ' '.join(meta['title']) if 'author' in meta: meta['author'] = sum(meta['author'], []) else: log.warn('%s does not have an Author in the Pandoc title block.' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) </DeepExtract> else: <DeepExtract> meta_re = re.compile('^[ ]{0,3}(?P<key>[A-Za-z0-9._-]+):\\s*(?P<value>.*)') meta_more_re = re.compile('^[ ]{4,}(?P<value>.*)') i = 0 (meta, key) = ({}, None) while True: line = fp.readline() i += 1 if line.strip() == '': break m1 = meta_re.match(line) if m1: key = m1.group('key').lower().strip() value = distinguish(m1.group('value').strip()) meta.setdefault(key, []).append(value) else: m2 = meta_more_re.match(line) if m2 and key: meta[key].append(m2.group('value').strip()) else: break if not meta: raise AcrylamidException('no meta information in %r found' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) </DeepExtract> meta['title'] = str(meta['title']) meta['category'] = lchop(dirname(path) + '/', conf['content_dir']).split('/') jekyll = '(?:(.+?)/)?(\\d{4}-\\d{2}-\\d{2})-(.+)' m = re.match('^' + conf['content_dir'] + jekyll + '$', splitext(path)[0]) if m: meta.setdefault('date', m.group(2)) meta.setdefault('slug', m.group(3)) if m.group(1) is not None: meta['category'] = m.group(1).split('/') self.offset = i Reader.__init__(self, conf, meta) (path, ext) = os.path.splitext(path) self.path = lchop(path, conf['content_dir']) self.extension = ext[1:]
def __init__(self, path, conf): self.filename = path self.tzinfo = conf.get('tzinfo', None) self.defaultcopywildcard = conf.get('copy_wildcard', '_[0-9]*.*') with io.open(path, 'r', encoding='utf-8', errors='replace') as fp: peak = lchop(fp.read(512), BOM_UTF8) fp.seek(0) if peak.startswith('---\n'): head = [] i = 0 while True: line = fp.readline() i += 1 if i == 1 and (not line.startswith('---')): raise AcrylamidException('no meta information in %r found' % fp.name) elif i > 1 and (not line.startswith('---')): head.append(line) elif i > 1 and line.startswith('---') or not line: break if yaml: try: (i, meta) = (i, yaml.load(''.join(head))) except yaml.YAMLError as e: raise AcrylamidException('YAMLError: %s' % str(e)) else: props = {} for (j, line) in enumerate(head): if line[0] == '#' or not line.strip(): continue try: (key, value) = [x.strip() for x in line.split(':', 1)] except ValueError: raise AcrylamidException('%s:%i ValueError: %s\n%s' % (fp.name, j, line.strip('\n'), 'Either your YAML is malformed or our naïve parser is to dumb \nto read it. Revalidate your YAML or install PyYAML parser with \n> easy_install -U pyyaml')) props[key] = distinguish(value) if 'title' not in props: raise AcrylamidException('No title given in %r' % fp.name) (i, meta) = (i, props) elif isrest(peak): import docutils from docutils.core import publish_doctree title = fp.readline().strip('\n') dash = fp.readline().strip('\n') if not title or not dash: raise AcrylamidException('No title given in %r' % fp.name) if len(dash) < len(title) or dash.count(dash[0]) < len(dash): raise AcrylamidException('title line does not match second line %r' % fp.name) i = 2 meta = [] while True: line = fp.readline() i += 1 if not line.strip() and i == 3: continue elif not line.strip(): break else: meta.append(line) document = publish_doctree(''.join(meta)) meta = dict(title=title) for docinfo in document.traverse(docutils.nodes.docinfo): for element in docinfo.children: if element.tagname == 'field': (name_elem, body_elem) = element.children name = name_elem.astext() value = body_elem.astext() else: name = element.tagname value = element.astext() name = name.lower() if '\n\n' in value: value = value.split('\n\n') elif '\n' in value: value = value.replace('\n', ' ') meta[name] = distinguish(value.split('\n\n') if '\n\n' in value else value) (i, meta) = (i, meta) elif peak.startswith('% '): meta_pan_re = re.compile('^[ ]{0,3}%+\\s*(?P<value>.*)') meta_pan_more_re = re.compile('^\\s*(?P<value>.*)') meta_pan_authsplit = re.compile(';+\\s*') (i, j) = (0, 0) (meta, key) = ({}, None) poss_keys = ['title', 'author', 'date'] while True: line = fp.readline() i += 1 if line.strip() == '': break if j + 1 > len(poss_keys): raise AcrylamidException('%r has too many items in the Pandoc title block.' % fp.name) m1 = meta_pan_re.match(line) if m1: key = poss_keys[j] j += 1 valstrip = m1.group('value').strip() if not valstrip: continue value = distinguish(m1.group('value').strip()) if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta.setdefault(key, []).append(value) else: m2 = meta_pan_more_re.match(line) if m2 and key: value = m2.group('value').strip() if key == 'author': value = value.strip(';') value = meta_pan_authsplit.split(value) meta[key].append(value) else: break if 'title' not in meta: raise AcrylamidException('No title given in %r' % fp.name) if len(meta['title']) > 1: meta['title'] = ' '.join(meta['title']) if 'author' in meta: meta['author'] = sum(meta['author'], []) else: log.warn('%s does not have an Author in the Pandoc title block.' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) else: meta_re = re.compile('^[ ]{0,3}(?P<key>[A-Za-z0-9._-]+):\\s*(?P<value>.*)') meta_more_re = re.compile('^[ ]{4,}(?P<value>.*)') i = 0 (meta, key) = ({}, None) while True: line = fp.readline() i += 1 if line.strip() == '': break m1 = meta_re.match(line) if m1: key = m1.group('key').lower().strip() value = distinguish(m1.group('value').strip()) meta.setdefault(key, []).append(value) else: m2 = meta_more_re.match(line) if m2 and key: meta[key].append(m2.group('value').strip()) else: break if not meta: raise AcrylamidException('no meta information in %r found' % fp.name) for (key, values) in iteritems(meta): if len(values) == 1: meta[key] = values[0] (i, meta) = (i, meta) meta['title'] = str(meta['title']) meta['category'] = lchop(dirname(path) + '/', conf['content_dir']).split('/') jekyll = '(?:(.+?)/)?(\\d{4}-\\d{2}-\\d{2})-(.+)' m = re.match('^' + conf['content_dir'] + jekyll + '$', splitext(path)[0]) if m: meta.setdefault('date', m.group(2)) meta.setdefault('slug', m.group(3)) if m.group(1) is not None: meta['category'] = m.group(1).split('/') self.offset = i Reader.__init__(self, conf, meta) (path, ext) = os.path.splitext(path) self.path = lchop(path, conf['content_dir']) self.extension = ext[1:]
acrylamid
positive
def __call__(self, batch, output, attns, normalization=1.0, shard_size=0, trunc_start=0, trunc_size=None): """Compute the forward loss, possibly in shards in which case this method also runs the backward pass and returns ``None`` as the loss value. Also supports truncated BPTT for long sequences by taking a range in the decoder output sequence to back propagate in. Range is from `(trunc_start, trunc_start + trunc_size)`. Note sharding is an exact efficiency trick to relieve memory required for the generation buffers. Truncation is an approximate efficiency trick to relieve the memory required in the RNN buffers. Args: batch (batch) : batch of labeled examples output (:obj:`FloatTensor`) : output of decoder model `[tgt_len x batch x hidden]` attns (dict) : dictionary of attention distributions `[tgt_len x batch x src_len]` normalization: Optional normalization factor. shard_size (int) : maximum number of examples in a shard trunc_start (int) : starting position of truncation window trunc_size (int) : length of truncation window Returns: A tuple with the loss and a :obj:`onmt.utils.Statistics` instance. """ if trunc_size is None: trunc_size = batch.tgt.size(0) - trunc_start trunc_range = (trunc_start, trunc_start + trunc_size) <DeepExtract> shard_state = NotImplementedError </DeepExtract> if shard_size == 0: <DeepExtract> (loss, stats) = NotImplementedError </DeepExtract> return (loss / float(normalization), stats) batch_stats = onmt.utils.Statistics() for shard in shards(shard_state, shard_size): <DeepExtract> (loss, stats) = NotImplementedError </DeepExtract> loss.div(float(normalization)).backward() batch_stats.update(stats) return (None, batch_stats)
def __call__(self, batch, output, attns, normalization=1.0, shard_size=0, trunc_start=0, trunc_size=None): """Compute the forward loss, possibly in shards in which case this method also runs the backward pass and returns ``None`` as the loss value. Also supports truncated BPTT for long sequences by taking a range in the decoder output sequence to back propagate in. Range is from `(trunc_start, trunc_start + trunc_size)`. Note sharding is an exact efficiency trick to relieve memory required for the generation buffers. Truncation is an approximate efficiency trick to relieve the memory required in the RNN buffers. Args: batch (batch) : batch of labeled examples output (:obj:`FloatTensor`) : output of decoder model `[tgt_len x batch x hidden]` attns (dict) : dictionary of attention distributions `[tgt_len x batch x src_len]` normalization: Optional normalization factor. shard_size (int) : maximum number of examples in a shard trunc_start (int) : starting position of truncation window trunc_size (int) : length of truncation window Returns: A tuple with the loss and a :obj:`onmt.utils.Statistics` instance. """ if trunc_size is None: trunc_size = batch.tgt.size(0) - trunc_start trunc_range = (trunc_start, trunc_start + trunc_size) shard_state = NotImplementedError if shard_size == 0: (loss, stats) = NotImplementedError return (loss / float(normalization), stats) batch_stats = onmt.utils.Statistics() for shard in shards(shard_state, shard_size): (loss, stats) = NotImplementedError loss.div(float(normalization)).backward() batch_stats.update(stats) return (None, batch_stats)
DDAMS
positive
def _get_connection_spec(self): if self._connection_addr is None: <DeepExtract> pidfile = os.path.join(self._data_dir, 'postmaster.pid') try: with open(pidfile, 'rt') as f: piddata = f.read() except FileNotFoundError: self._connection_addr = None lines = piddata.splitlines() if len(lines) < 6: self._connection_addr = None pmpid = int(lines[0]) if self._daemon_pid and pmpid != self._daemon_pid: self._connection_addr = None portnum = lines[3] sockdir = lines[4] hostaddr = lines[5] if sockdir: if sockdir[0] != '/': sockdir = os.path.normpath(os.path.join(self._data_dir, sockdir)) host_str = sockdir else: host_str = hostaddr if host_str == '*': host_str = 'localhost' elif host_str == '0.0.0.0': host_str = '127.0.0.1' elif host_str == '::': host_str = '::1' self._connection_addr = {'host': host_str, 'port': portnum} </DeepExtract> if self._connection_addr is not None: if self._connection_spec_override: args = self._connection_addr.copy() args.update(self._connection_spec_override) return args else: return self._connection_addr
def _get_connection_spec(self): if self._connection_addr is None: pidfile = os.path.join(self._data_dir, 'postmaster.pid') try: with open(pidfile, 'rt') as f: piddata = f.read() except FileNotFoundError: self._connection_addr = None lines = piddata.splitlines() if len(lines) < 6: self._connection_addr = None pmpid = int(lines[0]) if self._daemon_pid and pmpid != self._daemon_pid: self._connection_addr = None portnum = lines[3] sockdir = lines[4] hostaddr = lines[5] if sockdir: if sockdir[0] != '/': sockdir = os.path.normpath(os.path.join(self._data_dir, sockdir)) host_str = sockdir else: host_str = hostaddr if host_str == '*': host_str = 'localhost' elif host_str == '0.0.0.0': host_str = '127.0.0.1' elif host_str == '::': host_str = '::1' self._connection_addr = {'host': host_str, 'port': portnum} if self._connection_addr is not None: if self._connection_spec_override: args = self._connection_addr.copy() args.update(self._connection_spec_override) return args else: return self._connection_addr
asyncpg
positive
def createFolders(uid): """Create the folder structure and copy code files""" <DeepExtract> src = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'html') </DeepExtract> <DeepExtract> safeFolder = self.model.outputFolder if self.isWindows() == True: safeFolder = self.model.outputFolder.encode('ascii', 'ignore') dest = os.path.join(safeFolder, uid) </DeepExtract> try: if os.path.isdir(dest): self.log.info('delete previous folder ' + dest) shutil.rmtree(dest) shutil.copytree(src, dest, ignore=self.excludeFiles) except OSError as e: self.__logger.error(e.args[1])
def createFolders(uid): """Create the folder structure and copy code files""" src = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'html') safeFolder = self.model.outputFolder if self.isWindows() == True: safeFolder = self.model.outputFolder.encode('ascii', 'ignore') dest = os.path.join(safeFolder, uid) try: if os.path.isdir(dest): self.log.info('delete previous folder ' + dest) shutil.rmtree(dest) shutil.copytree(src, dest, ignore=self.excludeFiles) except OSError as e: self.__logger.error(e.args[1])
d3MapRenderer
positive
def test_exists_non_existent(self): <DeepExtract> filename = ''.join([random.choice(string.ascii_uppercase + string.digits) for x in range(length)]).lower() </DeepExtract> assert not self._storage.exists(filename)
def test_exists_non_existent(self): filename = ''.join([random.choice(string.ascii_uppercase + string.digits) for x in range(length)]).lower() assert not self._storage.exists(filename)
docker-registry
positive
def _get_description(ioc: Element) -> Optional[str]: <DeepExtract> tag = _tag(_NS_OPENIOC, 'description') </DeepExtract> description = ioc.find(tag) if description is None: return None return description.text
def _get_description(ioc: Element) -> Optional[str]: tag = _tag(_NS_OPENIOC, 'description') description = ioc.find(tag) if description is None: return None return description.text
connectors
positive
def test_eval_files(self): run_predict(predict_args(data=FileDataParams(images=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.png')]))))) r = run_eval(eval_args(gt_data=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))))) self.assertLess(r['avg_ler'], 0.0009, msg='Current best model yields about 0.09% CER') <DeepExtract> args = EvalArgs(gt=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))), pred=pred_data, checkpoint=checkpoint) </DeepExtract> with tempfile.TemporaryDirectory() as d: args.xlsx_output = os.path.join(d, 'output.xlsx') run_eval(args)
def test_eval_files(self): run_predict(predict_args(data=FileDataParams(images=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.png')]))))) r = run_eval(eval_args(gt_data=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))))) self.assertLess(r['avg_ler'], 0.0009, msg='Current best model yields about 0.09% CER') args = EvalArgs(gt=FileDataParams(texts=sorted(glob_all([os.path.join(this_dir, 'data', 'uw3_50lines', 'test', '*.gt.txt')]))), pred=pred_data, checkpoint=checkpoint) with tempfile.TemporaryDirectory() as d: args.xlsx_output = os.path.join(d, 'output.xlsx') run_eval(args)
calamari
positive
def _init_sem_data_gen(graph: nx.DiGraph, schema: Dict, n_samples: int, default_type: str, distributions: Dict[str, str], seed: int): np.random.seed(seed) if not nx.algorithms.is_directed_acyclic_graph(graph): raise ValueError('Provided graph is not a DAG.') <DeepExtract> default_distributions = {'continuous': 'gaussian', 'binary': 'logit', 'categorical': 'logit', 'weight': 'uniform', 'intercept': 'uniform', 'count': 0.05} if distributions is None: distributions = default_distributions default_distributions.update(distributions) distributions = default_distributions </DeepExtract> validated_schema = validate_schema(nodes=graph.nodes(), schema=schema, default_type=default_type) var_fte_mapper = VariableFeatureMapper(validated_schema) n_columns = var_fte_mapper.n_features x_mat = np.empty([n_samples, n_columns]) return (distributions, var_fte_mapper, x_mat)
def _init_sem_data_gen(graph: nx.DiGraph, schema: Dict, n_samples: int, default_type: str, distributions: Dict[str, str], seed: int): np.random.seed(seed) if not nx.algorithms.is_directed_acyclic_graph(graph): raise ValueError('Provided graph is not a DAG.') default_distributions = {'continuous': 'gaussian', 'binary': 'logit', 'categorical': 'logit', 'weight': 'uniform', 'intercept': 'uniform', 'count': 0.05} if distributions is None: distributions = default_distributions default_distributions.update(distributions) distributions = default_distributions validated_schema = validate_schema(nodes=graph.nodes(), schema=schema, default_type=default_type) var_fte_mapper = VariableFeatureMapper(validated_schema) n_columns = var_fte_mapper.n_features x_mat = np.empty([n_samples, n_columns]) return (distributions, var_fte_mapper, x_mat)
causalnex
positive
def test_bbox_head_loss(): """ Tests bbox head loss when truth is empty and non-empty """ self = BBoxHead(in_channels=8, roi_feat_size=3) num_imgs = 1 feat = torch.rand(1, 1, 3, 3) proposal_list = [torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]])] target_cfg = mmcv.Config({'pos_weight': 1}) def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels): """ Create sample results that can be passed to BBoxHead.get_target """ assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) return sampling_results gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] <DeepExtract> assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) sampling_results = sampling_results </DeepExtract> bbox_targets = self.get_target(sampling_results, gt_bboxes, gt_labels, target_cfg) (labels, label_weights, bbox_targets, bbox_weights) = bbox_targets num_sampled = sum((len(res.bboxes) for res in sampling_results)) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) (cls_scores, bbox_preds) = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero' gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])] gt_labels = [torch.LongTensor([2])] <DeepExtract> assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) sampling_results = sampling_results </DeepExtract> bbox_targets = self.get_target(sampling_results, gt_bboxes, gt_labels, target_cfg) (labels, label_weights, bbox_targets, bbox_weights) = bbox_targets num_sampled = sum((len(res.bboxes) for res in sampling_results)) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) (cls_scores, bbox_preds) = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero'
def test_bbox_head_loss(): """ Tests bbox head loss when truth is empty and non-empty """ self = BBoxHead(in_channels=8, roi_feat_size=3) num_imgs = 1 feat = torch.rand(1, 1, 3, 3) proposal_list = [torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]])] target_cfg = mmcv.Config({'pos_weight': 1}) def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels): """ Create sample results that can be passed to BBoxHead.get_target """ assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) return sampling_results gt_bboxes = [torch.empty((0, 4))] gt_labels = [torch.LongTensor([])] assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) sampling_results = sampling_results bbox_targets = self.get_target(sampling_results, gt_bboxes, gt_labels, target_cfg) (labels, label_weights, bbox_targets, bbox_weights) = bbox_targets num_sampled = sum((len(res.bboxes) for res in sampling_results)) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) (cls_scores, bbox_preds) = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero' gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])] gt_labels = [torch.LongTensor([2])] assign_config = {'type': 'MaxIoUAssigner', 'pos_iou_thr': 0.5, 'neg_iou_thr': 0.5, 'min_pos_iou': 0.5, 'ignore_iof_thr': -1} sampler_config = {'type': 'RandomSampler', 'num': 512, 'pos_fraction': 0.25, 'neg_pos_ub': -1, 'add_gt_as_proposals': True} bbox_assigner = build_assigner(assign_config) bbox_sampler = build_sampler(sampler_config) gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=feat) sampling_results.append(sampling_result) sampling_results = sampling_results bbox_targets = self.get_target(sampling_results, gt_bboxes, gt_labels, target_cfg) (labels, label_weights, bbox_targets, bbox_weights) = bbox_targets num_sampled = sum((len(res.bboxes) for res in sampling_results)) dummy_feats = torch.rand(num_sampled, 8 * 3 * 3) (cls_scores, bbox_preds) = self.forward(dummy_feats) losses = self.loss(cls_scores, bbox_preds, labels, label_weights, bbox_targets, bbox_weights) assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero' assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero'
D2Det
positive
def decode_seg_map_sequence(label_masks): if label_masks.ndim == 2: label_masks = label_masks[None, :, :] rgb_masks = [] for label_mask in label_masks: <DeepExtract> n_classes = 21 label_colours = get_pascal_labels() r = label_mask.copy() g = label_mask.copy() b = label_mask.copy() for ll in range(0, n_classes): r[label_mask == ll] = label_colours[ll, 0] g[label_mask == ll] = label_colours[ll, 1] b[label_mask == ll] = label_colours[ll, 2] rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)) rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 if plot: plt.imshow(rgb) plt.show() else: rgb_mask = rgb </DeepExtract> rgb_masks.append(rgb_mask) rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2])) return rgb_masks
def decode_seg_map_sequence(label_masks): if label_masks.ndim == 2: label_masks = label_masks[None, :, :] rgb_masks = [] for label_mask in label_masks: n_classes = 21 label_colours = get_pascal_labels() r = label_mask.copy() g = label_mask.copy() b = label_mask.copy() for ll in range(0, n_classes): r[label_mask == ll] = label_colours[ll, 0] g[label_mask == ll] = label_colours[ll, 1] b[label_mask == ll] = label_colours[ll, 2] rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)) rgb[:, :, 0] = r / 255.0 rgb[:, :, 1] = g / 255.0 rgb[:, :, 2] = b / 255.0 if plot: plt.imshow(rgb) plt.show() else: rgb_mask = rgb rgb_masks.append(rgb_mask) rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2])) return rgb_masks
DRS
positive
def compute_pvalues(iteration_result, num_motifs, force): """Compute motif scores. The result is a dictionary from cluster -> (feature_id, pvalue) containing a sparse gene-to-pvalue mapping for each cluster In order to influence the sequences that go into meme, the user can specify a list of sequence filter functions that have the signature (seqs, feature_ids, distance) -> seqs These filters are applied in the order they appear in the list. """ global SEQUENCE_FILTERS, ORGANISM, MEMBERSHIP cluster_pvalues = {} min_cluster_rows_allowed = self.config_params['memb.min_cluster_rows_allowed'] max_cluster_rows_allowed = self.config_params['memb.max_cluster_rows_allowed'] use_multiprocessing = self.config_params[scoring.KEY_MULTIPROCESSING] start_time = util.current_millis() SEQUENCE_FILTERS = self.__sequence_filters ORGANISM = self.organism MEMBERSHIP = self.membership cluster_seqs_params = [(cluster, self.seqtype) for cluster in xrange(1, self.num_clusters() + 1)] if use_multiprocessing: with util.get_mp_pool(self.config_params) as pool: seqs_list = pool.map(cluster_seqs, cluster_seqs_params) else: seqs_list = [cluster_seqs(p) for p in cluster_seqs_params] SEQUENCE_FILTERS = None ORGANISM = None MEMBERSHIP = None logging.debug('prepared sequences in %d ms.', util.current_millis() - start_time) start_time = util.current_millis() params = {} for cluster in xrange(1, self.num_clusters() + 1): if self.__last_motif_infos is not None: previous_motif_infos = self.__last_motif_infos.get(cluster, None) else: previous_motif_infos = None (seqs, feature_ids) = seqs_list[cluster - 1] params[cluster] = ComputeScoreParams(iteration_result['iteration'], cluster, feature_ids, seqs, self.used_seqs, self.meme_runner(), min_cluster_rows_allowed, max_cluster_rows_allowed, num_motifs, previous_motif_infos, self.config_params['output_dir'], self.config_params['num_iterations'], self.config_params['debug']) logging.debug('prepared MEME parameters in %d ms.', util.current_millis() - start_time) for cluster in xrange(1, self.num_clusters() + 1): if not cluster in iteration_result: iteration_result[cluster] = {} if not force and self.__last_results is not None: oldlen = len(params) params = {cluster: params[cluster] for cluster in xrange(1, self.num_clusters() + 1) if params[cluster].feature_ids != self.__last_results[cluster][0]} newlen = len(params) if oldlen - newlen > 0: logging.debug('%d clusters did not change !!!', oldlen - newlen) self.__last_motif_infos = {} if self.__last_results is None: self.__last_results = {} if use_multiprocessing: with util.get_mp_pool(self.config_params) as pool: results = pool.map(compute_cluster_score, params.values()) results = {r[0]: r[1:] for r in results} for cluster in xrange(1, self.num_clusters() + 1): if cluster in results: (pvalues, run_result) = results[cluster] self.__last_results[cluster] = (params[cluster].feature_ids, pvalues, run_result) else: (feature_ids, pvalues, run_result) = self.__last_results[cluster] cluster_pvalues[cluster] = pvalues if run_result: self.__last_motif_infos[cluster] = run_result.motif_infos <DeepExtract> result = [] if run_result is not None: motif_annotations = {} for gene in run_result.annotations: for annotation in run_result.annotations[gene]: motif_num = annotation[2] key = abs(motif_num) reverse = motif_num < 0 if key not in motif_annotations: motif_annotations[key] = [] motif_annotations[key].append({'gene': gene, 'position': annotation[1], 'pvalue': annotation[0], 'reverse': reverse}) for motif_info in run_result.motif_infos: motif_num = motif_info.motif_num motif_annot = [] if motif_num in motif_annotations: motif_annot = motif_annotations[motif_num] result.append({'motif_num': motif_num, 'pssm': motif_info.pssm, 'evalue': motif_info.evalue, 'annotations': motif_annot, 'sites': motif_info.sites}) iteration_result[cluster]['motif-info'] = result </DeepExtract> iteration_result[cluster]['pvalues'] = pvalues else: for cluster in xrange(1, self.num_clusters() + 1): if cluster in params: <DeepExtract> pvalues = {} run_result = None nseqs = len(params[cluster].seqs) logging.info('running meme/mast on cluster %d, # sequences: %d', params[cluster].cluster, nseqs) if nseqs >= params[cluster].min_cluster_rows and nseqs <= params[cluster].max_cluster_rows: run_result = params[cluster].meme_runner(params[cluster]) pvalues = {feature_id: pvalue for (feature_id, pvalue, evalue) in run_result.pe_values} else: logging.debug('# seqs (= %d) outside of defined limits, skipping cluster %d', len(params[cluster].seqs), params[cluster].cluster) (_, pvalues, run_result) = (params[cluster].cluster, pvalues, run_result) </DeepExtract> self.__last_results[cluster] = (params[cluster].feature_ids, pvalues, run_result) else: (_, pvalues, run_result) = self.__last_results[cluster] cluster_pvalues[cluster] = pvalues if run_result: self.__last_motif_infos[cluster] = run_result.motif_infos <DeepExtract> result = [] if run_result is not None: motif_annotations = {} for gene in run_result.annotations: for annotation in run_result.annotations[gene]: motif_num = annotation[2] key = abs(motif_num) reverse = motif_num < 0 if key not in motif_annotations: motif_annotations[key] = [] motif_annotations[key].append({'gene': gene, 'position': annotation[1], 'pvalue': annotation[0], 'reverse': reverse}) for motif_info in run_result.motif_infos: motif_num = motif_info.motif_num motif_annot = [] if motif_num in motif_annotations: motif_annot = motif_annotations[motif_num] result.append({'motif_num': motif_num, 'pssm': motif_info.pssm, 'evalue': motif_info.evalue, 'annotations': motif_annot, 'sites': motif_info.sites}) iteration_result[cluster]['motif-info'] = result </DeepExtract> iteration_result[cluster]['pvalues'] = pvalues return cluster_pvalues
def compute_pvalues(iteration_result, num_motifs, force): """Compute motif scores. The result is a dictionary from cluster -> (feature_id, pvalue) containing a sparse gene-to-pvalue mapping for each cluster In order to influence the sequences that go into meme, the user can specify a list of sequence filter functions that have the signature (seqs, feature_ids, distance) -> seqs These filters are applied in the order they appear in the list. """ global SEQUENCE_FILTERS, ORGANISM, MEMBERSHIP cluster_pvalues = {} min_cluster_rows_allowed = self.config_params['memb.min_cluster_rows_allowed'] max_cluster_rows_allowed = self.config_params['memb.max_cluster_rows_allowed'] use_multiprocessing = self.config_params[scoring.KEY_MULTIPROCESSING] start_time = util.current_millis() SEQUENCE_FILTERS = self.__sequence_filters ORGANISM = self.organism MEMBERSHIP = self.membership cluster_seqs_params = [(cluster, self.seqtype) for cluster in xrange(1, self.num_clusters() + 1)] if use_multiprocessing: with util.get_mp_pool(self.config_params) as pool: seqs_list = pool.map(cluster_seqs, cluster_seqs_params) else: seqs_list = [cluster_seqs(p) for p in cluster_seqs_params] SEQUENCE_FILTERS = None ORGANISM = None MEMBERSHIP = None logging.debug('prepared sequences in %d ms.', util.current_millis() - start_time) start_time = util.current_millis() params = {} for cluster in xrange(1, self.num_clusters() + 1): if self.__last_motif_infos is not None: previous_motif_infos = self.__last_motif_infos.get(cluster, None) else: previous_motif_infos = None (seqs, feature_ids) = seqs_list[cluster - 1] params[cluster] = ComputeScoreParams(iteration_result['iteration'], cluster, feature_ids, seqs, self.used_seqs, self.meme_runner(), min_cluster_rows_allowed, max_cluster_rows_allowed, num_motifs, previous_motif_infos, self.config_params['output_dir'], self.config_params['num_iterations'], self.config_params['debug']) logging.debug('prepared MEME parameters in %d ms.', util.current_millis() - start_time) for cluster in xrange(1, self.num_clusters() + 1): if not cluster in iteration_result: iteration_result[cluster] = {} if not force and self.__last_results is not None: oldlen = len(params) params = {cluster: params[cluster] for cluster in xrange(1, self.num_clusters() + 1) if params[cluster].feature_ids != self.__last_results[cluster][0]} newlen = len(params) if oldlen - newlen > 0: logging.debug('%d clusters did not change !!!', oldlen - newlen) self.__last_motif_infos = {} if self.__last_results is None: self.__last_results = {} if use_multiprocessing: with util.get_mp_pool(self.config_params) as pool: results = pool.map(compute_cluster_score, params.values()) results = {r[0]: r[1:] for r in results} for cluster in xrange(1, self.num_clusters() + 1): if cluster in results: (pvalues, run_result) = results[cluster] self.__last_results[cluster] = (params[cluster].feature_ids, pvalues, run_result) else: (feature_ids, pvalues, run_result) = self.__last_results[cluster] cluster_pvalues[cluster] = pvalues if run_result: self.__last_motif_infos[cluster] = run_result.motif_infos result = [] if run_result is not None: motif_annotations = {} for gene in run_result.annotations: for annotation in run_result.annotations[gene]: motif_num = annotation[2] key = abs(motif_num) reverse = motif_num < 0 if key not in motif_annotations: motif_annotations[key] = [] motif_annotations[key].append({'gene': gene, 'position': annotation[1], 'pvalue': annotation[0], 'reverse': reverse}) for motif_info in run_result.motif_infos: motif_num = motif_info.motif_num motif_annot = [] if motif_num in motif_annotations: motif_annot = motif_annotations[motif_num] result.append({'motif_num': motif_num, 'pssm': motif_info.pssm, 'evalue': motif_info.evalue, 'annotations': motif_annot, 'sites': motif_info.sites}) iteration_result[cluster]['motif-info'] = result iteration_result[cluster]['pvalues'] = pvalues else: for cluster in xrange(1, self.num_clusters() + 1): if cluster in params: pvalues = {} run_result = None nseqs = len(params[cluster].seqs) logging.info('running meme/mast on cluster %d, # sequences: %d', params[cluster].cluster, nseqs) if nseqs >= params[cluster].min_cluster_rows and nseqs <= params[cluster].max_cluster_rows: run_result = params[cluster].meme_runner(params[cluster]) pvalues = {feature_id: pvalue for (feature_id, pvalue, evalue) in run_result.pe_values} else: logging.debug('# seqs (= %d) outside of defined limits, skipping cluster %d', len(params[cluster].seqs), params[cluster].cluster) (_, pvalues, run_result) = (params[cluster].cluster, pvalues, run_result) self.__last_results[cluster] = (params[cluster].feature_ids, pvalues, run_result) else: (_, pvalues, run_result) = self.__last_results[cluster] cluster_pvalues[cluster] = pvalues if run_result: self.__last_motif_infos[cluster] = run_result.motif_infos result = [] if run_result is not None: motif_annotations = {} for gene in run_result.annotations: for annotation in run_result.annotations[gene]: motif_num = annotation[2] key = abs(motif_num) reverse = motif_num < 0 if key not in motif_annotations: motif_annotations[key] = [] motif_annotations[key].append({'gene': gene, 'position': annotation[1], 'pvalue': annotation[0], 'reverse': reverse}) for motif_info in run_result.motif_infos: motif_num = motif_info.motif_num motif_annot = [] if motif_num in motif_annotations: motif_annot = motif_annotations[motif_num] result.append({'motif_num': motif_num, 'pssm': motif_info.pssm, 'evalue': motif_info.evalue, 'annotations': motif_annot, 'sites': motif_info.sites}) iteration_result[cluster]['motif-info'] = result iteration_result[cluster]['pvalues'] = pvalues return cluster_pvalues
cmonkey2
positive
def train(self, inputs: List[Vector]) -> None: assignments = [random.randrange(self.k) for _ in inputs] with tqdm.tqdm(itertools.count()) as t: for _ in t: <DeepExtract> clusters = [[] for i in range(self.k)] for (input, assignment) in zip(inputs, assignments): clusters[assignment].append(input) self.means = [vector_mean(cluster) if cluster else random.choice(inputs) for cluster in clusters] </DeepExtract> new_assignments = [self.classify(input) for input in inputs] <DeepExtract> assert len(assignments) == len(new_assignments) num_changed = len([x1 for (x1, x2) in zip(assignments, new_assignments) if x1 != x2]) </DeepExtract> if num_changed == 0: return assignments = new_assignments <DeepExtract> clusters = [[] for i in range(self.k)] for (input, assignment) in zip(inputs, assignments): clusters[assignment].append(input) self.means = [vector_mean(cluster) if cluster else random.choice(inputs) for cluster in clusters] </DeepExtract> t.set_description(f'changed: {num_changed} / {len(inputs)}')
def train(self, inputs: List[Vector]) -> None: assignments = [random.randrange(self.k) for _ in inputs] with tqdm.tqdm(itertools.count()) as t: for _ in t: clusters = [[] for i in range(self.k)] for (input, assignment) in zip(inputs, assignments): clusters[assignment].append(input) self.means = [vector_mean(cluster) if cluster else random.choice(inputs) for cluster in clusters] new_assignments = [self.classify(input) for input in inputs] assert len(assignments) == len(new_assignments) num_changed = len([x1 for (x1, x2) in zip(assignments, new_assignments) if x1 != x2]) if num_changed == 0: return assignments = new_assignments clusters = [[] for i in range(self.k)] for (input, assignment) in zip(inputs, assignments): clusters[assignment].append(input) self.means = [vector_mean(cluster) if cluster else random.choice(inputs) for cluster in clusters] t.set_description(f'changed: {num_changed} / {len(inputs)}')
data-science-from-scratch
positive
def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) <DeepExtract> (inputs, kwargs) = scatter_kwargs(inputs, kwargs, self.device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes) </DeepExtract> if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) <DeepExtract> replicas = replicate(self.module, self.device_ids[:len(inputs)]) </DeepExtract> <DeepExtract> outputs = parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) </DeepExtract> return self.gather(outputs, self.output_device)
def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) (inputs, kwargs) = scatter_kwargs(inputs, kwargs, self.device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes) if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) replicas = replicate(self.module, self.device_ids[:len(inputs)]) outputs = parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) return self.gather(outputs, self.output_device)
CenterNet-CondInst
positive
def test_ignore_url(): from aws_xray_sdk.ext.httplib import add_ignored path = '/status/200' url = 'https://{}{}'.format(BASE_URL, path) add_ignored(urls=[path]) <DeepExtract> parts = urlparse(url) (host, _, port) = parts.netloc.partition(':') if port == '': port = None if True: conn = httplib.HTTPSConnection(parts.netloc, port) else: conn = httplib.HTTPConnection(parts.netloc, port) path = '{}?{}'.format(parts.path, parts.query) if parts.query else parts.path conn.request(method, path) resp = conn.getresponse() </DeepExtract> assert len(xray_recorder.current_segment().subsegments) == 0
def test_ignore_url(): from aws_xray_sdk.ext.httplib import add_ignored path = '/status/200' url = 'https://{}{}'.format(BASE_URL, path) add_ignored(urls=[path]) parts = urlparse(url) (host, _, port) = parts.netloc.partition(':') if port == '': port = None if True: conn = httplib.HTTPSConnection(parts.netloc, port) else: conn = httplib.HTTPConnection(parts.netloc, port) path = '{}?{}'.format(parts.path, parts.query) if parts.query else parts.path conn.request(method, path) resp = conn.getresponse() assert len(xray_recorder.current_segment().subsegments) == 0
aws-xray-sdk-python
positive
@property def ecus(self): if not self._ecus: <DeepExtract> ecus = [] ecu_names = [] for matrixName in self: for ecu in self[matrixName].ecus: if ecu.name not in ecu_names: ecu_names.append(ecu.name) ecus.append(ecu) self._ecus = ecus return ecus </DeepExtract> return self._ecus
@property def ecus(self): if not self._ecus: ecus = [] ecu_names = [] for matrixName in self: for ecu in self[matrixName].ecus: if ecu.name not in ecu_names: ecu_names.append(ecu.name) ecus.append(ecu) self._ecus = ecus return ecus return self._ecus
canmatrix
positive
def reshape_input(input_tensor, *args): <DeepExtract> if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: self.input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] self.input_shape = shape </DeepExtract> self.batch_size = self.input_shape[0] self.seq_length = self.input_shape[1] self.input_width = self.input_shape[2] if self.input_width != hidden_size: raise ValueError('The width of the input tensor (%d) != hidden size (%d)' % (input_width, hidden_size)) with tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE, auxiliary_name_scope=False): with tf.name_scope(self._scope.original_name_scope): <DeepExtract> ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError('Input tensor must have at least rank 2. Shape = %s' % input_tensor.shape) if ndims == 2: reshaped = input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) reshaped = output_tensor </DeepExtract> return reshaped
def reshape_input(input_tensor, *args): if name is None: name = input_tensor.name if 3 is not None: assert_rank(input_tensor, 3, name) shape = input_tensor.shape.as_list() non_static_indexes = [] for (index, dim) in enumerate(shape): if dim is None: non_static_indexes.append(index) if not non_static_indexes: self.input_shape = shape dyn_shape = tf.shape(input_tensor) for index in non_static_indexes: shape[index] = dyn_shape[index] self.input_shape = shape self.batch_size = self.input_shape[0] self.seq_length = self.input_shape[1] self.input_width = self.input_shape[2] if self.input_width != hidden_size: raise ValueError('The width of the input tensor (%d) != hidden size (%d)' % (input_width, hidden_size)) with tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE, auxiliary_name_scope=False): with tf.name_scope(self._scope.original_name_scope): ndims = input_tensor.shape.ndims if ndims < 2: raise ValueError('Input tensor must have at least rank 2. Shape = %s' % input_tensor.shape) if ndims == 2: reshaped = input_tensor width = input_tensor.shape[-1] output_tensor = tf.reshape(input_tensor, [-1, width]) reshaped = output_tensor return reshaped
DAPPLE
positive
def matmul(a: Array, b: Array, transpose_a=False, transpose_b=False): """ Matrix multiplication with a possible transpose of the input. Parameters ---------- a : ds-array First matrix. b : ds-array Second matrix. transpose_a : bool Transpose of the first matrix before multiplication. transpose_b : any Transpose of the second matrix before multiplication. Returns ------- out : ds-array The output array. Raises ------ NotImplementedError If _top_left shape does not match _reg_shape. This case will be implemented in the future. ValueError If any of the block sizes does not match. Examples -------- >>> import dislib as ds >>> >>> >>> if __name__ == "__main__": >>> x = ds.random_array((8, 4), block_size=(2, 2)) >>> y = ds.random_array((5, 8), block_size=(2, 2)) >>> result = ds.matmul(x, y, transpose_a=True, transpose_b=True) >>> print(result.collect()) """ if a._reg_shape != a._top_left_shape: raise NotImplementedError('a._reg_shape != a._top_left_shape') if b._reg_shape != b._top_left_shape: raise NotImplementedError('b._reg_shape != b._top_left_shape') checks = [(False, False, a._reg_shape[1], b._reg_shape[0]), (True, False, a._reg_shape[0], b._reg_shape[0]), (False, True, a._reg_shape[1], b._reg_shape[1]), (True, True, a._reg_shape[0], b._reg_shape[1])] for (ta, tb, size1, size2) in checks: if ta == transpose_a and tb == transpose_b and (size1 != size2): raise ValueError(f'incorrect block sizes for the requested multiplication ({size1} != {size2})') a_blocks = _transpose_blocks(a._blocks) if transpose_a else a._blocks b_blocks = _transpose_blocks(b._blocks) if transpose_b else b._blocks n_blocks = (len(a_blocks), len(b_blocks[0])) blocks = Array._get_out_blocks(n_blocks) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = a_blocks[i] vblock = [b_blocks[k][j] for k in range(len(b_blocks))] <DeepExtract> blocks = deque() if dislib.__gpu_available__: matmul_func = _matmul_gpu add_func = _add_gpu else: matmul_func = _matmul_with_transpose add_func = _add_cpu for (blocki, blockj) in zip(hblock, vblock): blocks.append(matmul_func(blocki, blockj, transpose_a, transpose_b)) while len(blocks) > 1: block1 = blocks.popleft() block2 = blocks.popleft() blocks.append(add_func(block1, block2)) compss_delete_object(block1) compss_delete_object(block2) blocks[i][j] = blocks[0] </DeepExtract> new_block_size = (a._reg_shape[1] if transpose_a else a._reg_shape[0], b._reg_shape[0] if transpose_b else b._reg_shape[1]) new_shape = (a._shape[1] if transpose_a else a._shape[0], b._shape[0] if transpose_b else b._shape[1]) return Array(blocks=blocks, top_left_shape=new_block_size, reg_shape=new_block_size, shape=new_shape, sparse=a._sparse)
def matmul(a: Array, b: Array, transpose_a=False, transpose_b=False): """ Matrix multiplication with a possible transpose of the input. Parameters ---------- a : ds-array First matrix. b : ds-array Second matrix. transpose_a : bool Transpose of the first matrix before multiplication. transpose_b : any Transpose of the second matrix before multiplication. Returns ------- out : ds-array The output array. Raises ------ NotImplementedError If _top_left shape does not match _reg_shape. This case will be implemented in the future. ValueError If any of the block sizes does not match. Examples -------- >>> import dislib as ds >>> >>> >>> if __name__ == "__main__": >>> x = ds.random_array((8, 4), block_size=(2, 2)) >>> y = ds.random_array((5, 8), block_size=(2, 2)) >>> result = ds.matmul(x, y, transpose_a=True, transpose_b=True) >>> print(result.collect()) """ if a._reg_shape != a._top_left_shape: raise NotImplementedError('a._reg_shape != a._top_left_shape') if b._reg_shape != b._top_left_shape: raise NotImplementedError('b._reg_shape != b._top_left_shape') checks = [(False, False, a._reg_shape[1], b._reg_shape[0]), (True, False, a._reg_shape[0], b._reg_shape[0]), (False, True, a._reg_shape[1], b._reg_shape[1]), (True, True, a._reg_shape[0], b._reg_shape[1])] for (ta, tb, size1, size2) in checks: if ta == transpose_a and tb == transpose_b and (size1 != size2): raise ValueError(f'incorrect block sizes for the requested multiplication ({size1} != {size2})') a_blocks = _transpose_blocks(a._blocks) if transpose_a else a._blocks b_blocks = _transpose_blocks(b._blocks) if transpose_b else b._blocks n_blocks = (len(a_blocks), len(b_blocks[0])) blocks = Array._get_out_blocks(n_blocks) for i in range(n_blocks[0]): for j in range(n_blocks[1]): hblock = a_blocks[i] vblock = [b_blocks[k][j] for k in range(len(b_blocks))] blocks = deque() if dislib.__gpu_available__: matmul_func = _matmul_gpu add_func = _add_gpu else: matmul_func = _matmul_with_transpose add_func = _add_cpu for (blocki, blockj) in zip(hblock, vblock): blocks.append(matmul_func(blocki, blockj, transpose_a, transpose_b)) while len(blocks) > 1: block1 = blocks.popleft() block2 = blocks.popleft() blocks.append(add_func(block1, block2)) compss_delete_object(block1) compss_delete_object(block2) blocks[i][j] = blocks[0] new_block_size = (a._reg_shape[1] if transpose_a else a._reg_shape[0], b._reg_shape[0] if transpose_b else b._reg_shape[1]) new_shape = (a._shape[1] if transpose_a else a._shape[0], b._shape[0] if transpose_b else b._shape[1]) return Array(blocks=blocks, top_left_shape=new_block_size, reg_shape=new_block_size, shape=new_shape, sparse=a._sparse)
dislib
positive
@parse_debug def parse_constant_declarators_rest(self): <DeepExtract> array_dimension = self.parse_array_dimension() self.accept('=') initializer = self.parse_variable_initializer() (array_dimension, initializer) = (array_dimension, initializer) </DeepExtract> declarators = [tree.VariableDeclarator(dimensions=array_dimension, initializer=initializer)] while self.try_accept(','): <DeepExtract> name = self.parse_identifier() (additional_dimension, initializer) = self.parse_constant_declarator_rest() declarator = tree.VariableDeclarator(name=name, dimensions=additional_dimension, initializer=initializer) </DeepExtract> declarators.append(declarator) return tree.ConstantDeclaration(declarators=declarators)
@parse_debug def parse_constant_declarators_rest(self): array_dimension = self.parse_array_dimension() self.accept('=') initializer = self.parse_variable_initializer() (array_dimension, initializer) = (array_dimension, initializer) declarators = [tree.VariableDeclarator(dimensions=array_dimension, initializer=initializer)] while self.try_accept(','): name = self.parse_identifier() (additional_dimension, initializer) = self.parse_constant_declarator_rest() declarator = tree.VariableDeclarator(name=name, dimensions=additional_dimension, initializer=initializer) declarators.append(declarator) return tree.ConstantDeclaration(declarators=declarators)
code-transformer
positive
def main(): check_suite = CheckSuite() check_suite.load_all_available_checkers() parser = argparse.ArgumentParser() parser.add_argument('--test', '-t', '--test=', '-t=', default=[], action='append', help='Select the Checks you want to perform. Defaults to \'acdd\' if unspecified. Versions of standards can be specified via `-t <test_standard>:<version>`. If `<version>` is omitted, or is "latest", the latest version of the test standard is used.') parser.add_argument('--criteria', '-c', help='Define the criteria for the checks. Either Strict, Normal, or Lenient. Defaults to Normal.', nargs='?', default='normal', choices=['lenient', 'normal', 'strict']) parser.add_argument('--verbose', '-v', help='Increase output. May be specified up to three times.', action='count', default=0) parser.add_argument('--describe-checks', '-D', help='Describes checks for checkers specified using `-t`. If `-t` is not specified, lists checks from all available checkers.', action='store_true') include_exclude = parser.add_mutually_exclusive_group() include_exclude.add_argument('--skip-checks', '-s', help=dedent('\n Specifies tests to skip. Can take the form\n of either `<check_name>` or\n `<check_name>:<skip_level>`. The first\n form skips any checks matching the name.\n In the second form <skip_level> may be\n specified as "A", "M", or "L". "A" skips\n all checks and is equivalent to calling\n the first form. "M" will only show high\n priority output from the given check and\n will skip medium and low. "L" will show\n both high and medium priority issues, while\n skipping low priority issues. Cannot be\n used with `-i`/`--include-checks` option.\n '), action='append') include_exclude.add_argument('--include-checks', '-i', help=dedent('\n Specifies checks to include. Can only take the form\n of `<check_name>`. Cannot be specified along with\n `-s`/`skip_checks`.\n '), action='append') parser.add_argument('-f', '--format', default=[], action='append', help="Output format(s). Options are 'text', 'html', 'json', 'json_new'. The difference between the 'json' and the 'json_new' formats is that the 'json' format has the check as the top level key, whereas the 'json_new' format has the dataset name(s) as the main key in the output follow by any checks as subkeys. Also, 'json' format can be only be run against one input file, whereas 'json_new' can be run against multiple files.", choices=['text', 'html', 'json', 'json_new']) parser.add_argument('-o', '--output', default=[], action='append', help="Output filename(s). If '-' is supplied, output to stdout. Can either be one or many files. If one file is supplied, but the checker is run against many files, all the output from the checks goes to that file (does not presently work with 'json' format). If more than one output file is supplied, the number of input datasets supplied must match the number of output files.") parser.add_argument('-O', '--option', default=[], action='append', help=dedent("\n Additional options to be passed to the\n checkers. Multiple options can be specified\n via multiple invocations of this switch.\n Options should be prefixed with a the\n checker name followed by the option, e.g.\n '<checker>:<option_name>'\n\n Available options:\n 'cf:enable_appendix_a_checks' - Allow check\n results against CF Appendix A for attribute\n location and data types.\n ")) parser.add_argument('-V', '--version', action='store_true', help='Display the IOOS Compliance Checker version information.') parser.add_argument('dataset_location', nargs='*', help="Defines the location of the dataset to be checked. The location can be a local netCDF file, a remote OPeNDAP endpoint, a remote netCDF file which returns content-type header of 'application/x-netcdf', or an ERDDAP TableDAP endpoint. Note that the ERDDAP TableDAP endpoint will currently attempt to fetch the entire TableDAP dataset.") parser.add_argument('-l', '--list-tests', action='store_true', help='List the available tests') parser.add_argument('-d', '--download-standard-names', help='Specify a version of the cf standard name table to download as packaged version. Either specify a version number (e.g. "72") to fetch a specific version or "latest" to get the latest CF standard name table.') check_suite.add_plugin_args(parser) args = parser.parse_args() check_suite.load_generated_checkers(args) if args.version: print('IOOS compliance checker version %s' % __version__) sys.exit(0) options_dict = parse_options(args.option) if args.option else defaultdict(set) if args.describe_checks: error_stat = 0 if args.test: checker_names = set(args.test) else: checker_names = [c for c in check_suite.checkers if ':' in c and (not c.endswith(':latest'))] for checker_name in sorted(checker_names): if checker_name not in check_suite.checkers: print("Cannot find checker '{}' with which to describe checks".format(checker_name), file=sys.stderr) error_stat = 1 else: <DeepExtract> print('{0}\n {1} \n{0}'.format('=' * (len(checker_name) + 2), checker_name)) </DeepExtract> check_suite._print_checker(check_suite.checkers[checker_name]) sys.exit(error_stat) if args.list_tests: print('IOOS compliance checker available checker suites:') check_suite._print_suites(args.verbose) return 0 if args.download_standard_names: download_cf_standard_name_table(args.download_standard_names) if len(args.dataset_location) == 0: parser.print_help() sys.exit(1) if not args.output: args.output = '-' output_len = len(args.output) if not (output_len == 1 or output_len == len(args.dataset_location)): print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr) sys.exit(2) return_values = [] had_errors = [] if output_len == 1: if args.format != 'json': print('Running Compliance Checker on the datasets from: {}'.format(args.dataset_location), file=sys.stderr) (return_value, errors) = ComplianceChecker.run_checker(args.dataset_location, args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, args.include_checks, args.output[0], args.format or ['text'], options=options_dict) return_values.append(return_value) had_errors.append(errors) else: for (output, dataset) in zip(args.output, args.dataset_location): if args.format != 'json': print('Running Compliance Checker on the dataset from: {}'.format(dataset), file=sys.stderr) (return_value, errors) = ComplianceChecker.run_checker([dataset], args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, args.include_checks, output, args.format or ['text'], options=options_dict) return_values.append(return_value) had_errors.append(errors) if any(had_errors): sys.exit(2) if all(return_values): sys.exit(0) sys.exit(1)
def main(): check_suite = CheckSuite() check_suite.load_all_available_checkers() parser = argparse.ArgumentParser() parser.add_argument('--test', '-t', '--test=', '-t=', default=[], action='append', help='Select the Checks you want to perform. Defaults to \'acdd\' if unspecified. Versions of standards can be specified via `-t <test_standard>:<version>`. If `<version>` is omitted, or is "latest", the latest version of the test standard is used.') parser.add_argument('--criteria', '-c', help='Define the criteria for the checks. Either Strict, Normal, or Lenient. Defaults to Normal.', nargs='?', default='normal', choices=['lenient', 'normal', 'strict']) parser.add_argument('--verbose', '-v', help='Increase output. May be specified up to three times.', action='count', default=0) parser.add_argument('--describe-checks', '-D', help='Describes checks for checkers specified using `-t`. If `-t` is not specified, lists checks from all available checkers.', action='store_true') include_exclude = parser.add_mutually_exclusive_group() include_exclude.add_argument('--skip-checks', '-s', help=dedent('\n Specifies tests to skip. Can take the form\n of either `<check_name>` or\n `<check_name>:<skip_level>`. The first\n form skips any checks matching the name.\n In the second form <skip_level> may be\n specified as "A", "M", or "L". "A" skips\n all checks and is equivalent to calling\n the first form. "M" will only show high\n priority output from the given check and\n will skip medium and low. "L" will show\n both high and medium priority issues, while\n skipping low priority issues. Cannot be\n used with `-i`/`--include-checks` option.\n '), action='append') include_exclude.add_argument('--include-checks', '-i', help=dedent('\n Specifies checks to include. Can only take the form\n of `<check_name>`. Cannot be specified along with\n `-s`/`skip_checks`.\n '), action='append') parser.add_argument('-f', '--format', default=[], action='append', help="Output format(s). Options are 'text', 'html', 'json', 'json_new'. The difference between the 'json' and the 'json_new' formats is that the 'json' format has the check as the top level key, whereas the 'json_new' format has the dataset name(s) as the main key in the output follow by any checks as subkeys. Also, 'json' format can be only be run against one input file, whereas 'json_new' can be run against multiple files.", choices=['text', 'html', 'json', 'json_new']) parser.add_argument('-o', '--output', default=[], action='append', help="Output filename(s). If '-' is supplied, output to stdout. Can either be one or many files. If one file is supplied, but the checker is run against many files, all the output from the checks goes to that file (does not presently work with 'json' format). If more than one output file is supplied, the number of input datasets supplied must match the number of output files.") parser.add_argument('-O', '--option', default=[], action='append', help=dedent("\n Additional options to be passed to the\n checkers. Multiple options can be specified\n via multiple invocations of this switch.\n Options should be prefixed with a the\n checker name followed by the option, e.g.\n '<checker>:<option_name>'\n\n Available options:\n 'cf:enable_appendix_a_checks' - Allow check\n results against CF Appendix A for attribute\n location and data types.\n ")) parser.add_argument('-V', '--version', action='store_true', help='Display the IOOS Compliance Checker version information.') parser.add_argument('dataset_location', nargs='*', help="Defines the location of the dataset to be checked. The location can be a local netCDF file, a remote OPeNDAP endpoint, a remote netCDF file which returns content-type header of 'application/x-netcdf', or an ERDDAP TableDAP endpoint. Note that the ERDDAP TableDAP endpoint will currently attempt to fetch the entire TableDAP dataset.") parser.add_argument('-l', '--list-tests', action='store_true', help='List the available tests') parser.add_argument('-d', '--download-standard-names', help='Specify a version of the cf standard name table to download as packaged version. Either specify a version number (e.g. "72") to fetch a specific version or "latest" to get the latest CF standard name table.') check_suite.add_plugin_args(parser) args = parser.parse_args() check_suite.load_generated_checkers(args) if args.version: print('IOOS compliance checker version %s' % __version__) sys.exit(0) options_dict = parse_options(args.option) if args.option else defaultdict(set) if args.describe_checks: error_stat = 0 if args.test: checker_names = set(args.test) else: checker_names = [c for c in check_suite.checkers if ':' in c and (not c.endswith(':latest'))] for checker_name in sorted(checker_names): if checker_name not in check_suite.checkers: print("Cannot find checker '{}' with which to describe checks".format(checker_name), file=sys.stderr) error_stat = 1 else: print('{0}\n {1} \n{0}'.format('=' * (len(checker_name) + 2), checker_name)) check_suite._print_checker(check_suite.checkers[checker_name]) sys.exit(error_stat) if args.list_tests: print('IOOS compliance checker available checker suites:') check_suite._print_suites(args.verbose) return 0 if args.download_standard_names: download_cf_standard_name_table(args.download_standard_names) if len(args.dataset_location) == 0: parser.print_help() sys.exit(1) if not args.output: args.output = '-' output_len = len(args.output) if not (output_len == 1 or output_len == len(args.dataset_location)): print('The number of output files must either be one or the same as the number of datasets', file=sys.stderr) sys.exit(2) return_values = [] had_errors = [] if output_len == 1: if args.format != 'json': print('Running Compliance Checker on the datasets from: {}'.format(args.dataset_location), file=sys.stderr) (return_value, errors) = ComplianceChecker.run_checker(args.dataset_location, args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, args.include_checks, args.output[0], args.format or ['text'], options=options_dict) return_values.append(return_value) had_errors.append(errors) else: for (output, dataset) in zip(args.output, args.dataset_location): if args.format != 'json': print('Running Compliance Checker on the dataset from: {}'.format(dataset), file=sys.stderr) (return_value, errors) = ComplianceChecker.run_checker([dataset], args.test or ['acdd'], args.verbose, args.criteria, args.skip_checks, args.include_checks, output, args.format or ['text'], options=options_dict) return_values.append(return_value) had_errors.append(errors) if any(had_errors): sys.exit(2) if all(return_values): sys.exit(0) sys.exit(1)
compliance-checker
positive
def mousePressEvent(self, event): if event.button() != Qt.LeftButton and event.button() != Qt.RightButton: return if not self.isMouseEventInBlock(event): self.scroll_base_x = event.x() self.scroll_base_y = event.y() self.scroll_mode = True self.viewport().grabMouse() return <DeepExtract> xofs = self.horizontalScrollBar().value() yofs = self.verticalScrollBar().value() x = event.x() + xofs - self.renderXOfs y = event.y() + yofs - self.renderYOfs for block in self.blocks.values(): blockx = x - (block.x + 2 * self.charWidth) blocky = y - (block.y + 2 * self.charWidth) if blockx < 0 or blockx > block.width - 4 * self.charWidth: continue if blocky < 0 or blocky > block.height - 4 * self.charWidth: continue col = int(blockx / self.charWidth) row = int(blocky / self.charHeight) cur_row = 0 for line in block.block.header_text.tokens: if cur_row == row: for token in line: if col >= token[0] and col < token[0] + token[1]: token = token cur_row += 1 for instr in block.block.instrs: for line in instr.text.tokens: if cur_row == row: for token in line: if col >= token[0] and col < token[0] + token[1]: token = token cur_row += 1 token = None </DeepExtract> if token: self.highlight_token = token[2:] else: self.highlight_token = None <DeepExtract> xofs = self.horizontalScrollBar().value() yofs = self.verticalScrollBar().value() x = event.x() + xofs - self.renderXOfs y = event.y() + yofs - self.renderYOfs for block in self.blocks.values(): blockx = x - (block.x + 2 * self.charWidth) blocky = y - (block.y + 2 * self.charWidth) if blockx < 0 or blockx > block.width - 4 * self.charWidth: continue if blocky < 0 or blocky > block.height - 4 * self.charWidth: continue row = int(blocky / self.charHeight) cur_row = len(block.block.header_text.lines) if row < cur_row: instr = block.block.entry for instr in block.block.instrs: if row < cur_row + len(instr.text.lines): instr = instr.addr cur_row += len(instr.text.lines) instr = None </DeepExtract> if instr != None: self.cur_instr = instr else: self.cur_instr = None self.viewport().update() if instr != None and event.button() == Qt.RightButton: <DeepExtract> popup = QMenu() view_in_hex = popup.addAction('View in &hex editor') view_in_hex.triggered.connect(lambda : self.view_in_hex_editor(instr)) view_in_hex.setShortcut(QKeySequence(Qt.Key_H)) popup.addAction('Copy address', self.copy_address) enter_name_action = popup.addAction('Re&name symbol', self.enter_name) enter_name_action.setShortcut(QKeySequence(Qt.Key_N)) undefine_name_action = popup.addAction('&Undefine symbol', self.undefine_name) undefine_name_action.setShortcut(QKeySequence(Qt.Key_U)) show_address_action = popup.addAction('Show &address', self.show_address) show_address_action.setCheckable(True) show_address_action.setChecked('address' in self.analysis.options) popup.addSeparator() patch = popup.addMenu('&Patch') patch.addAction('Convert to NOP').triggered.connect(lambda : self.nop_out(instr)) instr = self.find_instr(instr) if instr: if instr.is_patch_branch_allowed(): patch.addAction('Never branch').triggered.connect(lambda : self.nop_out(instr)) patch.addAction('Always branch').triggered.connect(lambda : self.always_branch(instr)) patch.addAction('Invert branch').triggered.connect(lambda : self.invert_branch(instr)) if instr.is_patch_to_zero_return_allowed(): patch.addAction('Skip and return zero').triggered.connect(lambda : self.skip_and_return_zero(instr)) if instr.is_patch_to_fixed_return_value_allowed(): patch.addAction('Skip and return value...').triggered.connect(lambda : self.skip_and_return_value(instr)) popup.exec_(QCursor.pos()) </DeepExtract>
def mousePressEvent(self, event): if event.button() != Qt.LeftButton and event.button() != Qt.RightButton: return if not self.isMouseEventInBlock(event): self.scroll_base_x = event.x() self.scroll_base_y = event.y() self.scroll_mode = True self.viewport().grabMouse() return xofs = self.horizontalScrollBar().value() yofs = self.verticalScrollBar().value() x = event.x() + xofs - self.renderXOfs y = event.y() + yofs - self.renderYOfs for block in self.blocks.values(): blockx = x - (block.x + 2 * self.charWidth) blocky = y - (block.y + 2 * self.charWidth) if blockx < 0 or blockx > block.width - 4 * self.charWidth: continue if blocky < 0 or blocky > block.height - 4 * self.charWidth: continue col = int(blockx / self.charWidth) row = int(blocky / self.charHeight) cur_row = 0 for line in block.block.header_text.tokens: if cur_row == row: for token in line: if col >= token[0] and col < token[0] + token[1]: token = token cur_row += 1 for instr in block.block.instrs: for line in instr.text.tokens: if cur_row == row: for token in line: if col >= token[0] and col < token[0] + token[1]: token = token cur_row += 1 token = None if token: self.highlight_token = token[2:] else: self.highlight_token = None xofs = self.horizontalScrollBar().value() yofs = self.verticalScrollBar().value() x = event.x() + xofs - self.renderXOfs y = event.y() + yofs - self.renderYOfs for block in self.blocks.values(): blockx = x - (block.x + 2 * self.charWidth) blocky = y - (block.y + 2 * self.charWidth) if blockx < 0 or blockx > block.width - 4 * self.charWidth: continue if blocky < 0 or blocky > block.height - 4 * self.charWidth: continue row = int(blocky / self.charHeight) cur_row = len(block.block.header_text.lines) if row < cur_row: instr = block.block.entry for instr in block.block.instrs: if row < cur_row + len(instr.text.lines): instr = instr.addr cur_row += len(instr.text.lines) instr = None if instr != None: self.cur_instr = instr else: self.cur_instr = None self.viewport().update() if instr != None and event.button() == Qt.RightButton: popup = QMenu() view_in_hex = popup.addAction('View in &hex editor') view_in_hex.triggered.connect(lambda : self.view_in_hex_editor(instr)) view_in_hex.setShortcut(QKeySequence(Qt.Key_H)) popup.addAction('Copy address', self.copy_address) enter_name_action = popup.addAction('Re&name symbol', self.enter_name) enter_name_action.setShortcut(QKeySequence(Qt.Key_N)) undefine_name_action = popup.addAction('&Undefine symbol', self.undefine_name) undefine_name_action.setShortcut(QKeySequence(Qt.Key_U)) show_address_action = popup.addAction('Show &address', self.show_address) show_address_action.setCheckable(True) show_address_action.setChecked('address' in self.analysis.options) popup.addSeparator() patch = popup.addMenu('&Patch') patch.addAction('Convert to NOP').triggered.connect(lambda : self.nop_out(instr)) instr = self.find_instr(instr) if instr: if instr.is_patch_branch_allowed(): patch.addAction('Never branch').triggered.connect(lambda : self.nop_out(instr)) patch.addAction('Always branch').triggered.connect(lambda : self.always_branch(instr)) patch.addAction('Invert branch').triggered.connect(lambda : self.invert_branch(instr)) if instr.is_patch_to_zero_return_allowed(): patch.addAction('Skip and return zero').triggered.connect(lambda : self.skip_and_return_zero(instr)) if instr.is_patch_to_fixed_return_value_allowed(): patch.addAction('Skip and return value...').triggered.connect(lambda : self.skip_and_return_value(instr)) popup.exec_(QCursor.pos()) </DeepExtract>
deprecated-binaryninja-python
positive
def test_can_be_used_to_implement_auth_example(): roles = ['UNKNOWN', 'USER', 'REVIEWER', 'ADMIN'] class User: def __init__(self, token: str): self.token_index = roles.index(token) def has_role(self, role: str): role_index = roles.index(role) return self.token_index >= role_index >= 0 def _get_user(token: str): return User(token) class AuthDirective(SchemaDirectiveVisitor): def visit_object(self, object_: GraphQLObjectType): <DeepExtract> if hasattr(object_, '_auth_fields_wrapped'): return setattr(object_, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context user = _get_user(context['headers']['authToken']) if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_) </DeepExtract> setattr(object_, '_required_auth_role', self.args['requires']) def visit_field_definition(self, field: GraphQLField, object_type: Union[GraphQLObjectType, GraphQLInterfaceType]) -> GraphQLField: <DeepExtract> if hasattr(object_type, '_auth_fields_wrapped'): return setattr(object_type, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context user = _get_user(context['headers']['authToken']) if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_type.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_type) </DeepExtract> setattr(field, '_required_auth_role', self.args['requires']) def ensure_fields_wrapped(self, object_type: GraphQLObjectType): if hasattr(object_type, '_auth_fields_wrapped'): return setattr(object_type, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context <DeepExtract> user = User(context['headers']['authToken']) </DeepExtract> if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_type.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_type) type_defs = '\n directive @auth(\n requires: Role = ADMIN,\n ) on OBJECT | FIELD_DEFINITION\n\n enum Role {\n ADMIN\n REVIEWER\n USER\n UNKNOWN\n }\n\n type User @auth(requires: USER) {\n name: String\n banned: Boolean @auth(requires: ADMIN)\n canPost: Boolean @auth(requires: REVIEWER)\n }\n\n type Query {\n users: [User]\n }\n ' query = QueryType() @query.field('users') def _users_resolver(_, __): return [{'banned': True, 'canPost': False, 'name': 'Ben'}] schema = make_executable_schema(type_defs, [query], directives={'auth': AuthDirective}) def exec_with_role(role: str): return graphql_sync(schema, '\n query {\n users {\n name\n banned\n canPost\n }\n }\n ', context_value={'headers': {'authToken': role}}) def _check_results(result, *, data=None, errors=None): if errors and result.errors: assert len(errors) == len(result.errors) for e in result.errors: assert e.message == 'not authorized' assert e.path[-1] in errors assert result.data == data <DeepExtract> if ('name', 'banned', 'canPost') and exec_with_role('UNKNOWN').errors: assert len(('name', 'banned', 'canPost')) == len(exec_with_role('UNKNOWN').errors) for e in exec_with_role('UNKNOWN').errors: assert e.message == 'not authorized' assert e.path[-1] in ('name', 'banned', 'canPost') assert exec_with_role('UNKNOWN').data == {'users': [{'name': None, 'banned': None, 'canPost': None}]} </DeepExtract> <DeepExtract> if ('banned', 'canPost') and exec_with_role('USER').errors: assert len(('banned', 'canPost')) == len(exec_with_role('USER').errors) for e in exec_with_role('USER').errors: assert e.message == 'not authorized' assert e.path[-1] in ('banned', 'canPost') assert exec_with_role('USER').data == {'users': [{'name': 'Ben', 'banned': None, 'canPost': None}]} </DeepExtract> <DeepExtract> if ('banned',) and exec_with_role('REVIEWER').errors: assert len(('banned',)) == len(exec_with_role('REVIEWER').errors) for e in exec_with_role('REVIEWER').errors: assert e.message == 'not authorized' assert e.path[-1] in ('banned',) assert exec_with_role('REVIEWER').data == {'users': [{'name': 'Ben', 'banned': None, 'canPost': False}]} </DeepExtract> <DeepExtract> if errors and exec_with_role('ADMIN').errors: assert len(errors) == len(exec_with_role('ADMIN').errors) for e in exec_with_role('ADMIN').errors: assert e.message == 'not authorized' assert e.path[-1] in errors assert exec_with_role('ADMIN').data == {'users': [{'name': 'Ben', 'banned': True, 'canPost': False}]} </DeepExtract>
def test_can_be_used_to_implement_auth_example(): roles = ['UNKNOWN', 'USER', 'REVIEWER', 'ADMIN'] class User: def __init__(self, token: str): self.token_index = roles.index(token) def has_role(self, role: str): role_index = roles.index(role) return self.token_index >= role_index >= 0 def _get_user(token: str): return User(token) class AuthDirective(SchemaDirectiveVisitor): def visit_object(self, object_: GraphQLObjectType): if hasattr(object_, '_auth_fields_wrapped'): return setattr(object_, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context user = _get_user(context['headers']['authToken']) if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_) setattr(object_, '_required_auth_role', self.args['requires']) def visit_field_definition(self, field: GraphQLField, object_type: Union[GraphQLObjectType, GraphQLInterfaceType]) -> GraphQLField: if hasattr(object_type, '_auth_fields_wrapped'): return setattr(object_type, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context user = _get_user(context['headers']['authToken']) if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_type.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_type) setattr(field, '_required_auth_role', self.args['requires']) def ensure_fields_wrapped(self, object_type: GraphQLObjectType): if hasattr(object_type, '_auth_fields_wrapped'): return setattr(object_type, '_auth_fields_wrapped', True) def _resolver(_, info, *, f=None, o=None): required_role = getattr(f, '_required_auth_role', None) or getattr(o, '_required_auth_role', None) if not required_role: return original_resolver(_, info) context = info.context user = User(context['headers']['authToken']) if not user.has_role(required_role): raise ValueError('not authorized') return original_resolver(_, info) for (_, field) in object_type.fields.items(): original_resolver = field.resolve or default_field_resolver field.resolve = partial(_resolver, f=field, o=object_type) type_defs = '\n directive @auth(\n requires: Role = ADMIN,\n ) on OBJECT | FIELD_DEFINITION\n\n enum Role {\n ADMIN\n REVIEWER\n USER\n UNKNOWN\n }\n\n type User @auth(requires: USER) {\n name: String\n banned: Boolean @auth(requires: ADMIN)\n canPost: Boolean @auth(requires: REVIEWER)\n }\n\n type Query {\n users: [User]\n }\n ' query = QueryType() @query.field('users') def _users_resolver(_, __): return [{'banned': True, 'canPost': False, 'name': 'Ben'}] schema = make_executable_schema(type_defs, [query], directives={'auth': AuthDirective}) def exec_with_role(role: str): return graphql_sync(schema, '\n query {\n users {\n name\n banned\n canPost\n }\n }\n ', context_value={'headers': {'authToken': role}}) def _check_results(result, *, data=None, errors=None): if errors and result.errors: assert len(errors) == len(result.errors) for e in result.errors: assert e.message == 'not authorized' assert e.path[-1] in errors assert result.data == data if ('name', 'banned', 'canPost') and exec_with_role('UNKNOWN').errors: assert len(('name', 'banned', 'canPost')) == len(exec_with_role('UNKNOWN').errors) for e in exec_with_role('UNKNOWN').errors: assert e.message == 'not authorized' assert e.path[-1] in ('name', 'banned', 'canPost') assert exec_with_role('UNKNOWN').data == {'users': [{'name': None, 'banned': None, 'canPost': None}]} if ('banned', 'canPost') and exec_with_role('USER').errors: assert len(('banned', 'canPost')) == len(exec_with_role('USER').errors) for e in exec_with_role('USER').errors: assert e.message == 'not authorized' assert e.path[-1] in ('banned', 'canPost') assert exec_with_role('USER').data == {'users': [{'name': 'Ben', 'banned': None, 'canPost': None}]} if ('banned',) and exec_with_role('REVIEWER').errors: assert len(('banned',)) == len(exec_with_role('REVIEWER').errors) for e in exec_with_role('REVIEWER').errors: assert e.message == 'not authorized' assert e.path[-1] in ('banned',) assert exec_with_role('REVIEWER').data == {'users': [{'name': 'Ben', 'banned': None, 'canPost': False}]} if errors and exec_with_role('ADMIN').errors: assert len(errors) == len(exec_with_role('ADMIN').errors) for e in exec_with_role('ADMIN').errors: assert e.message == 'not authorized' assert e.path[-1] in errors assert exec_with_role('ADMIN').data == {'users': [{'name': 'Ben', 'banned': True, 'canPost': False}]} </DeepExtract>
ariadne
positive
def sample(self, N): """Sample N realizations. Returns N-by-M (ndim) sample matrix. Example ------- >>> plt.scatter(*(UniRV(C=randcov(2)).sample(10**4).T)) # doctest: +SKIP """ if self.C == 0: D = np.zeros((N, self.M)) else: <DeepExtract> raise NotImplementedError('Must be implemented in subclass') </DeepExtract> return self.mu + D
def sample(self, N): """Sample N realizations. Returns N-by-M (ndim) sample matrix. Example ------- >>> plt.scatter(*(UniRV(C=randcov(2)).sample(10**4).T)) # doctest: +SKIP """ if self.C == 0: D = np.zeros((N, self.M)) else: raise NotImplementedError('Must be implemented in subclass') return self.mu + D
DAPPER
positive
def test_terraform_get_node(create_terraform, create_temp_dir): from processor.connector.snapshot_custom import get_node data = {'type': 'terraform', 'snapshotId': '1', 'path': 'a/b/c'} terr_data = ['name="azrcterrafstr02"', 'locatio="neastus2"', 'resourceGroup="core-terraf-auto-rg"', 'containerName="states"'] terr_data_dict = {'name': 'azrcterrafstr02', 'locatio': 'neastus2', 'resourceGroup': 'core-terraf-auto-rg', 'containerName': 'states'} snapshot = {'source': 'terraform', 'type': 'custom'} <DeepExtract> connector = {'companyName': 'abcd', 'gitProvider': 'https://ebizframework.visualstudio.com/whitekite/_git/whitekite', 'repoCloneAddress': '/tmp/m', 'branchName': 'master', 'username': 'abcd'} </DeepExtract> ret = get_node('/tmp', data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert {} == ret['json'] newpath = create_temp_dir() os.makedirs('%s/%s' % (newpath, data['path'])) fname = create_terraform('%s/%s' % (newpath, data['path']), '\n'.join(terr_data)) data['path'] = '%s/%s' % (data['path'], fname) data['type'] = 'terraform' ret = get_node(newpath, data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert ret['json'] == terr_data_dict data['type'] = 'terraform1' ret = get_node(newpath, data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert ret['json'] == {}
def test_terraform_get_node(create_terraform, create_temp_dir): from processor.connector.snapshot_custom import get_node data = {'type': 'terraform', 'snapshotId': '1', 'path': 'a/b/c'} terr_data = ['name="azrcterrafstr02"', 'locatio="neastus2"', 'resourceGroup="core-terraf-auto-rg"', 'containerName="states"'] terr_data_dict = {'name': 'azrcterrafstr02', 'locatio': 'neastus2', 'resourceGroup': 'core-terraf-auto-rg', 'containerName': 'states'} snapshot = {'source': 'terraform', 'type': 'custom'} connector = {'companyName': 'abcd', 'gitProvider': 'https://ebizframework.visualstudio.com/whitekite/_git/whitekite', 'repoCloneAddress': '/tmp/m', 'branchName': 'master', 'username': 'abcd'} ret = get_node('/tmp', data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert {} == ret['json'] newpath = create_temp_dir() os.makedirs('%s/%s' % (newpath, data['path'])) fname = create_terraform('%s/%s' % (newpath, data['path']), '\n'.join(terr_data)) data['path'] = '%s/%s' % (data['path'], fname) data['type'] = 'terraform' ret = get_node(newpath, data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert ret['json'] == terr_data_dict data['type'] = 'terraform1' ret = get_node(newpath, data, snapshot, 'master', connector) assert True == isinstance(ret, dict) assert ret['json'] == {}
cloud-validation-framework
positive
def refreshOrgList(): global ORG_LIST print('INFO: Starting org list refresh at %s...' % datetime.datetime.now()) flag_firstorg = True <DeepExtract> merakirequestthrottler() try: r = requests.get('https://api.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 01: Unable to contact Meraki cloud') orglist = None if r.status_code != requests.codes.ok: orglist = None rjson = r.json() orglist = [] listlen = -1 if ARG_ORGNAME.lower() == '/all': for org in rjson: orglist.append(c_Organization()) listlen += 1 orglist[listlen].id = org['id'] orglist[listlen].name = org['name'] else: for org in rjson: if org['name'] == ARG_ORGNAME: orglist.append(c_Organization()) listlen += 1 orglist[listlen].id = org['id'] orglist[listlen].name = org['name'] orglist = orglist </DeepExtract> if not orglist is None: for org in orglist: print('INFO: Processing org "%s"' % org.name) org.shard = 'api.meraki.com' <DeepExtract> orgshard = 'api.meraki.com' </DeepExtract> if not orgshard is None: org.shard = orgshard <DeepExtract> merakirequestthrottler() try: r = requests.get('https://%s/api/v0/organizations/%s/networks' % (org.shard, org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 07: Unable to contact Meraki cloud') netlist = None if r.status_code != requests.codes.ok: netlist = None netlist = r.json() </DeepExtract> <DeepExtract> merakirequestthrottler() try: r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (org.shard, org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 06: Unable to contact Meraki cloud') devlist = None if r.status_code != requests.codes.ok: devlist = None devlist = r.json() </DeepExtract> if not devlist is None and (not netlist is None): db = sqlite3.connect(':memory:') dbcursor = db.cursor() dbcursor.execute('CREATE TABLE devices (serial text, networkId text)') db.commit() for device in devlist: if not device['networkId'] is None: if device['model'].startswith('MR'): dbcursor.execute('INSERT INTO devices VALUES (?,?)', (device['serial'], device['networkId'])) db.commit() flag_firstnet = True for net in netlist: if net['type'] != 'systems manager': dbcursor.execute('SELECT serial FROM devices WHERE networkId = ?', (net['id'],)) devicesofnet = dbcursor.fetchall() if len(devicesofnet) > 0: if flag_firstnet: if flag_firstorg: ORG_LIST = [] lastorg = -1 flag_firstorg = False ORG_LIST.append(org) lastorg += 1 lastnet = -1 ORG_LIST[lastorg].nets = [] flag_firstnet = False ORG_LIST[lastorg].nets.append(c_Net()) lastnet += 1 ORG_LIST[lastorg].nets[lastnet].id = net['id'] ORG_LIST[lastorg].nets[lastnet].name = net['name'] ORG_LIST[lastorg].nets[lastnet].shard = org.shard ORG_LIST[lastorg].nets[lastnet].devices = [] for device in devicesofnet: ORG_LIST[lastorg].nets[lastnet].devices.append(device[0]) db.close() LAST_ORGLIST_REFRESH = datetime.datetime.now() print('INFO: Refresh complete at %s' % LAST_ORGLIST_REFRESH) return None
def refreshOrgList(): global ORG_LIST print('INFO: Starting org list refresh at %s...' % datetime.datetime.now()) flag_firstorg = True merakirequestthrottler() try: r = requests.get('https://api.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 01: Unable to contact Meraki cloud') orglist = None if r.status_code != requests.codes.ok: orglist = None rjson = r.json() orglist = [] listlen = -1 if ARG_ORGNAME.lower() == '/all': for org in rjson: orglist.append(c_Organization()) listlen += 1 orglist[listlen].id = org['id'] orglist[listlen].name = org['name'] else: for org in rjson: if org['name'] == ARG_ORGNAME: orglist.append(c_Organization()) listlen += 1 orglist[listlen].id = org['id'] orglist[listlen].name = org['name'] orglist = orglist if not orglist is None: for org in orglist: print('INFO: Processing org "%s"' % org.name) org.shard = 'api.meraki.com' orgshard = 'api.meraki.com' if not orgshard is None: org.shard = orgshard merakirequestthrottler() try: r = requests.get('https://%s/api/v0/organizations/%s/networks' % (org.shard, org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 07: Unable to contact Meraki cloud') netlist = None if r.status_code != requests.codes.ok: netlist = None netlist = r.json() merakirequestthrottler() try: r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (org.shard, org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT)) except: print('ERROR 06: Unable to contact Meraki cloud') devlist = None if r.status_code != requests.codes.ok: devlist = None devlist = r.json() if not devlist is None and (not netlist is None): db = sqlite3.connect(':memory:') dbcursor = db.cursor() dbcursor.execute('CREATE TABLE devices (serial text, networkId text)') db.commit() for device in devlist: if not device['networkId'] is None: if device['model'].startswith('MR'): dbcursor.execute('INSERT INTO devices VALUES (?,?)', (device['serial'], device['networkId'])) db.commit() flag_firstnet = True for net in netlist: if net['type'] != 'systems manager': dbcursor.execute('SELECT serial FROM devices WHERE networkId = ?', (net['id'],)) devicesofnet = dbcursor.fetchall() if len(devicesofnet) > 0: if flag_firstnet: if flag_firstorg: ORG_LIST = [] lastorg = -1 flag_firstorg = False ORG_LIST.append(org) lastorg += 1 lastnet = -1 ORG_LIST[lastorg].nets = [] flag_firstnet = False ORG_LIST[lastorg].nets.append(c_Net()) lastnet += 1 ORG_LIST[lastorg].nets[lastnet].id = net['id'] ORG_LIST[lastorg].nets[lastnet].name = net['name'] ORG_LIST[lastorg].nets[lastnet].shard = org.shard ORG_LIST[lastorg].nets[lastnet].devices = [] for device in devicesofnet: ORG_LIST[lastorg].nets[lastnet].devices.append(device[0]) db.close() LAST_ORGLIST_REFRESH = datetime.datetime.now() print('INFO: Refresh complete at %s' % LAST_ORGLIST_REFRESH) return None
automation-scripts
positive
def get_matrix(self): for entry in (self.tp, self.fp, self.tn, self.fn): if entry is None: <DeepExtract> if self.test is None or self.reference is None: raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.") assert_shape(self.test, self.reference) self.tp = int(((self.test != 0) * (self.reference != 0)).sum()) self.fp = int(((self.test != 0) * (self.reference == 0)).sum()) self.tn = int(((self.test == 0) * (self.reference == 0)).sum()) self.fn = int(((self.test == 0) * (self.reference != 0)).sum()) self.size = int(np.prod(self.reference.shape, dtype=np.int64)) self.test_empty = not np.any(self.test) self.test_full = np.all(self.test) self.reference_empty = not np.any(self.reference) self.reference_full = np.all(self.reference) </DeepExtract> break return (self.tp, self.fp, self.tn, self.fn)
def get_matrix(self): for entry in (self.tp, self.fp, self.tn, self.fn): if entry is None: if self.test is None or self.reference is None: raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.") assert_shape(self.test, self.reference) self.tp = int(((self.test != 0) * (self.reference != 0)).sum()) self.fp = int(((self.test != 0) * (self.reference == 0)).sum()) self.tn = int(((self.test == 0) * (self.reference == 0)).sum()) self.fn = int(((self.test == 0) * (self.reference != 0)).sum()) self.size = int(np.prod(self.reference.shape, dtype=np.int64)) self.test_empty = not np.any(self.test) self.test_full = np.all(self.test) self.reference_empty = not np.any(self.reference) self.reference_full = np.all(self.reference) break return (self.tp, self.fp, self.tn, self.fn)
CoTr
positive
def preprocess(self, data: Dict[str, torch.Tensor]) -> torch.Tensor: if 'batch' in data: batch = data['batch'] else: batch = data['pos'].new_zeros(data['pos'].shape[0], dtype=torch.long) if 'edge_src' in data and 'edge_dst' in data: edge_src = data['edge_src'] edge_dst = data['edge_dst'] else: <DeepExtract> r = torch.cdist(data['pos'], data['pos']) index = ((r < self.max_radius) & (r > 0)).nonzero().T index = index[:, batch[index[0]] == batch[index[1]]] edge_index = index </DeepExtract> edge_src = edge_index[0] edge_dst = edge_index[1] edge_vec = data['pos'][edge_src] - data['pos'][edge_dst] if 'x' in data: node_input = data['x'] else: node_input = data['node_input'] node_attr = data['node_attr'] edge_attr = data['edge_attr'] return (batch, node_input, node_attr, edge_attr, edge_src, edge_dst, edge_vec)
def preprocess(self, data: Dict[str, torch.Tensor]) -> torch.Tensor: if 'batch' in data: batch = data['batch'] else: batch = data['pos'].new_zeros(data['pos'].shape[0], dtype=torch.long) if 'edge_src' in data and 'edge_dst' in data: edge_src = data['edge_src'] edge_dst = data['edge_dst'] else: r = torch.cdist(data['pos'], data['pos']) index = ((r < self.max_radius) & (r > 0)).nonzero().T index = index[:, batch[index[0]] == batch[index[1]]] edge_index = index edge_src = edge_index[0] edge_dst = edge_index[1] edge_vec = data['pos'][edge_src] - data['pos'][edge_dst] if 'x' in data: node_input = data['x'] else: node_input = data['node_input'] node_attr = data['node_attr'] edge_attr = data['edge_attr'] return (batch, node_input, node_attr, edge_attr, edge_src, edge_dst, edge_vec)
e3nn
positive
@patch('decorators.s3') @patch('decorators.uuid4', MagicMock(side_effect=['a'])) def test_it_overrides_default_bucket_and_prefix(mock_s3): with patch.dict(os.environ, {'StateBucket': 'bucket'}): @s3_state_store(offload_keys=['Dict'], should_load=False, prefix='custom/', bucket='otherbucket') def my_func(event, *_): return event <DeepExtract> res = {'Dict': {'test': 'data'}} </DeepExtract> assert {'Dict': 's3://otherbucket/custom/a'} == res assert ('otherbucket', 'custom/a') == mock_s3.Object.call_args_list[0][0] assert {'Body': '{"test": "data"}'} == mock_s3.Object().put.call_args_list[0][1]
@patch('decorators.s3') @patch('decorators.uuid4', MagicMock(side_effect=['a'])) def test_it_overrides_default_bucket_and_prefix(mock_s3): with patch.dict(os.environ, {'StateBucket': 'bucket'}): @s3_state_store(offload_keys=['Dict'], should_load=False, prefix='custom/', bucket='otherbucket') def my_func(event, *_): return event res = {'Dict': {'test': 'data'}} assert {'Dict': 's3://otherbucket/custom/a'} == res assert ('otherbucket', 'custom/a') == mock_s3.Object.call_args_list[0][0] assert {'Body': '{"test": "data"}'} == mock_s3.Object().put.call_args_list[0][1]
amazon-s3-find-and-forget
positive
def visit_Forall(self, expression: Forall) -> Union[Constant, Or, Symbol]: <DeepExtract> if self._top_level: expression.expression = expression.expression.propagate_constants() expression.expression = SubstituteCalls().visit(expression.expression) expression.expression = expression.expression.propagate_constants() expression.expression = LiftIfThenElse().visit(expression.expression) expression.expression = expression.expression.propagate_constants() expression.expression = RemoveIfThenElse().visit(expression.expression) expression.expression = expression.expression.propagate_constants() if not isinstance(expression.expression, ArithmeticExpression) or isinstance(expression.expression, Symbol): expression.expression = Or(And(expression.expression)) self._infer_expression_details = lambda expression=expression.expression: DetailsInference().visit(expression.expression) expression.expression = super().visit(expression.expression) expr = expression.expression </DeepExtract> assert isinstance(expr, (Constant, Or, Symbol)) return expr
def visit_Forall(self, expression: Forall) -> Union[Constant, Or, Symbol]: if self._top_level: expression.expression = expression.expression.propagate_constants() expression.expression = SubstituteCalls().visit(expression.expression) expression.expression = expression.expression.propagate_constants() expression.expression = LiftIfThenElse().visit(expression.expression) expression.expression = expression.expression.propagate_constants() expression.expression = RemoveIfThenElse().visit(expression.expression) expression.expression = expression.expression.propagate_constants() if not isinstance(expression.expression, ArithmeticExpression) or isinstance(expression.expression, Symbol): expression.expression = Or(And(expression.expression)) self._infer_expression_details = lambda expression=expression.expression: DetailsInference().visit(expression.expression) expression.expression = super().visit(expression.expression) expr = expression.expression assert isinstance(expr, (Constant, Or, Symbol)) return expr
DNNV
positive
def evse_phase(self, station_id: str) -> float: """ Returns the phase angle of the EVSE. Args: station_id (str): The ID of the station for which the allowable rates should be returned. Returns: float: phase angle of the EVSE. [degrees] """ <DeepExtract> if 'infrastructure_info' in self.data: infrastructure = self.data['infrastructure_info'] else: raise NotImplementedError(f"No data provided for {'infrastructure_info'}.") </DeepExtract> i = infrastructure['station_ids'].index(station_id) return infrastructure['phases'][i]
def evse_phase(self, station_id: str) -> float: """ Returns the phase angle of the EVSE. Args: station_id (str): The ID of the station for which the allowable rates should be returned. Returns: float: phase angle of the EVSE. [degrees] """ if 'infrastructure_info' in self.data: infrastructure = self.data['infrastructure_info'] else: raise NotImplementedError(f"No data provided for {'infrastructure_info'}.") i = infrastructure['station_ids'].index(station_id) return infrastructure['phases'][i]
acnportal
positive
@pytest.mark.skipif('ethereum_optimized.london.state_db' not in sys.modules, reason="missing dependency (use `pip install 'ethereum[optimized]'`)") def test_storage_key() -> None: def actions(impl: Any) -> Any: obj = impl.State() impl.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) impl.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) impl.state_root(obj) return obj <DeepExtract> obj = state.State() state.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) state.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) state.state_root(obj) state_normal = obj </DeepExtract> <DeepExtract> obj = state_db.State() state_db.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) state_db.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) state_db.state_root(obj) state_optimized = obj </DeepExtract> assert state.get_storage(state_normal, ADDRESS_FOO, STORAGE_FOO) == state_db.get_storage(state_optimized, ADDRESS_FOO, STORAGE_FOO) assert state.state_root(state_normal) == state_db.state_root(state_optimized)
@pytest.mark.skipif('ethereum_optimized.london.state_db' not in sys.modules, reason="missing dependency (use `pip install 'ethereum[optimized]'`)") def test_storage_key() -> None: def actions(impl: Any) -> Any: obj = impl.State() impl.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) impl.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) impl.state_root(obj) return obj obj = state.State() state.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) state.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) state.state_root(obj) state_normal = obj obj = state_db.State() state_db.set_account(obj, ADDRESS_FOO, EMPTY_ACCOUNT) state_db.set_storage(obj, ADDRESS_FOO, STORAGE_FOO, U256(42)) state_db.state_root(obj) state_optimized = obj assert state.get_storage(state_normal, ADDRESS_FOO, STORAGE_FOO) == state_db.get_storage(state_optimized, ADDRESS_FOO, STORAGE_FOO) assert state.state_root(state_normal) == state_db.state_root(state_optimized)
eth1.0-specs
positive
def _read_config_categories(self): """Read and parse log configurations""" self._log_configs = {'Default': []} log_path = os.path.join(cfclient.config_path, 'log') for cathegory in os.listdir(log_path): cathegory_path = os.path.join(log_path, cathegory) try: if os.path.isdir(cathegory_path): self._log_configs[cathegory] = [] for conf in os.listdir(cathegory_path): if conf.endswith('.json'): conf_path = os.path.join(cathegory_path, conf) <DeepExtract> with open(conf_path) as f: data = json.load(f) infoNode = data['logconfig']['logblock'] logConf = LogConfig(infoNode['name'], int(infoNode['period'])) for v in data['logconfig']['logblock']['variables']: if v['type'] == 'TOC': logConf.add_variable(str(v['name']), v['fetch_as']) else: logConf.add_variable('Mem', v['fetch_as'], v['stored_as'], int(v['address'], 16)) log_conf = logConf </DeepExtract> self._log_configs[cathegory].append(log_conf) elif cathegory_path.endswith('.json'): <DeepExtract> with open(cathegory_path) as f: data = json.load(f) infoNode = data['logconfig']['logblock'] logConf = LogConfig(infoNode['name'], int(infoNode['period'])) for v in data['logconfig']['logblock']['variables']: if v['type'] == 'TOC': logConf.add_variable(str(v['name']), v['fetch_as']) else: logConf.add_variable('Mem', v['fetch_as'], v['stored_as'], int(v['address'], 16)) log_conf = logConf </DeepExtract> self._log_configs['Default'].append(log_conf) except Exception as e: logger.warning('Failed to open log config %s', e)
def _read_config_categories(self): """Read and parse log configurations""" self._log_configs = {'Default': []} log_path = os.path.join(cfclient.config_path, 'log') for cathegory in os.listdir(log_path): cathegory_path = os.path.join(log_path, cathegory) try: if os.path.isdir(cathegory_path): self._log_configs[cathegory] = [] for conf in os.listdir(cathegory_path): if conf.endswith('.json'): conf_path = os.path.join(cathegory_path, conf) with open(conf_path) as f: data = json.load(f) infoNode = data['logconfig']['logblock'] logConf = LogConfig(infoNode['name'], int(infoNode['period'])) for v in data['logconfig']['logblock']['variables']: if v['type'] == 'TOC': logConf.add_variable(str(v['name']), v['fetch_as']) else: logConf.add_variable('Mem', v['fetch_as'], v['stored_as'], int(v['address'], 16)) log_conf = logConf self._log_configs[cathegory].append(log_conf) elif cathegory_path.endswith('.json'): with open(cathegory_path) as f: data = json.load(f) infoNode = data['logconfig']['logblock'] logConf = LogConfig(infoNode['name'], int(infoNode['period'])) for v in data['logconfig']['logblock']['variables']: if v['type'] == 'TOC': logConf.add_variable(str(v['name']), v['fetch_as']) else: logConf.add_variable('Mem', v['fetch_as'], v['stored_as'], int(v['address'], 16)) log_conf = logConf self._log_configs['Default'].append(log_conf) except Exception as e: logger.warning('Failed to open log config %s', e)
crazyflie-clients-python
positive
def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') <DeepExtract> thinkplot.Plot([self.actual, self.actual], [0, 1], color='0.8') </DeepExtract> thinkplot.Cdf(self.test_cdf, label=label)
def PlotCdf(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ def VertLine(x): """Draws a vertical line at x.""" thinkplot.Plot([x, x], [0, 1], color='0.8') thinkplot.Plot([self.actual, self.actual], [0, 1], color='0.8') thinkplot.Cdf(self.test_cdf, label=label)
bayesianGameofThrones
positive
@filter_hook def get_field_attrs(db_field, **kwargs): if db_field.name in self.style_fields: <DeepExtract> if self.style_fields[db_field.name] in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)): attrs = {'widget': widgets.AdminRadioSelect(attrs={'inline': 'inline' if self.style_fields[db_field.name] == 'radio-inline' else ''})} if db_field.choices: attrs['choices'] = db_field.get_choices(include_blank=db_field.blank, blank_choice=[('', _('Null'))]) attrs = attrs if self.style_fields[db_field.name] in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField): attrs = {'widget': widgets.AdminCheckboxSelect(attrs={'inline': self.style_fields[db_field.name] == 'checkbox-inline'}), 'help_text': None} </DeepExtract> if attrs: return attrs if hasattr(db_field, 'rel') and db_field.rel: related_modeladmin = self.admin_site._registry.get(db_field.rel.to) if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'): <DeepExtract> if related_modeladmin.relfield_style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)): attrs = {'widget': widgets.AdminRadioSelect(attrs={'inline': 'inline' if related_modeladmin.relfield_style == 'radio-inline' else ''})} if db_field.choices: attrs['choices'] = db_field.get_choices(include_blank=db_field.blank, blank_choice=[('', _('Null'))]) attrs = attrs if related_modeladmin.relfield_style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField): attrs = {'widget': widgets.AdminCheckboxSelect(attrs={'inline': related_modeladmin.relfield_style == 'checkbox-inline'}), 'help_text': None} </DeepExtract> if attrs: return attrs if db_field.choices: return {'widget': widgets.AdminSelectWidget} for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: return self.formfield_overrides[klass].copy() return {}
@filter_hook def get_field_attrs(db_field, **kwargs): if db_field.name in self.style_fields: if self.style_fields[db_field.name] in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)): attrs = {'widget': widgets.AdminRadioSelect(attrs={'inline': 'inline' if self.style_fields[db_field.name] == 'radio-inline' else ''})} if db_field.choices: attrs['choices'] = db_field.get_choices(include_blank=db_field.blank, blank_choice=[('', _('Null'))]) attrs = attrs if self.style_fields[db_field.name] in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField): attrs = {'widget': widgets.AdminCheckboxSelect(attrs={'inline': self.style_fields[db_field.name] == 'checkbox-inline'}), 'help_text': None} if attrs: return attrs if hasattr(db_field, 'rel') and db_field.rel: related_modeladmin = self.admin_site._registry.get(db_field.rel.to) if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'): if related_modeladmin.relfield_style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)): attrs = {'widget': widgets.AdminRadioSelect(attrs={'inline': 'inline' if related_modeladmin.relfield_style == 'radio-inline' else ''})} if db_field.choices: attrs['choices'] = db_field.get_choices(include_blank=db_field.blank, blank_choice=[('', _('Null'))]) attrs = attrs if related_modeladmin.relfield_style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField): attrs = {'widget': widgets.AdminCheckboxSelect(attrs={'inline': related_modeladmin.relfield_style == 'checkbox-inline'}), 'help_text': None} if attrs: return attrs if db_field.choices: return {'widget': widgets.AdminSelectWidget} for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: return self.formfield_overrides[klass].copy() return {}
Django_Blog
positive
def __init__(self, initial_amount=1000000.0, max_stock=100.0, cost_pct=0.001, gamma=0.99, beg_idx=0, end_idx=1113): self.df_pwd = './elegantrl/envs/China_A_shares.pandas.dataframe' self.npz_pwd = './elegantrl/envs/China_A_shares.numpy.npz' <DeepExtract> tech_id_list = ['macd', 'boll_ub', 'boll_lb', 'rsi_30', 'cci_30', 'dx_30', 'close_30_sma', 'close_60_sma'] if tech_id_list is None else tech_id_list if os.path.exists(self.npz_pwd): ary_dict = np.load(self.npz_pwd, allow_pickle=True) close_ary = ary_dict['close_ary'] tech_ary = ary_dict['tech_ary'] elif os.path.exists(self.df_pwd): df = pd.read_pickle(self.df_pwd) tech_ary = [] close_ary = [] df_len = len(df.index.unique()) for day in range(df_len): item = df.loc[day] tech_items = [item[tech].values.tolist() for tech in tech_id_list] tech_items_flatten = sum(tech_items, []) tech_ary.append(tech_items_flatten) close_ary.append(item.close) close_ary = np.array(close_ary) tech_ary = np.array(tech_ary) np.savez_compressed(self.npz_pwd, close_ary=close_ary, tech_ary=tech_ary) else: error_str = f'| StockTradingEnv need {self.df_pwd} or {self.npz_pwd}\n download the following files and save in `.`\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.numpy.npz\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe' raise FileNotFoundError(error_str) (self.close_ary, self.tech_ary) = (close_ary, tech_ary) </DeepExtract> self.close_ary = self.close_ary[beg_idx:end_idx] self.tech_ary = self.tech_ary[beg_idx:end_idx] self.max_stock = max_stock self.cost_pct = cost_pct self.reward_scale = 2 ** (-12) self.initial_amount = initial_amount self.gamma = gamma self.day = None self.rewards = None self.total_asset = None self.cumulative_returns = 0 self.if_random_reset = True self.amount = None self.shares = None self.shares_num = self.close_ary.shape[1] amount_dim = 1 self.env_name = 'StockTradingEnv-v2' self.state_dim = self.shares_num + self.close_ary.shape[1] + self.tech_ary.shape[1] + amount_dim self.action_dim = self.shares_num self.if_discrete = False self.max_step = self.close_ary.shape[0] - 1 self.target_return = +np.inf
def __init__(self, initial_amount=1000000.0, max_stock=100.0, cost_pct=0.001, gamma=0.99, beg_idx=0, end_idx=1113): self.df_pwd = './elegantrl/envs/China_A_shares.pandas.dataframe' self.npz_pwd = './elegantrl/envs/China_A_shares.numpy.npz' tech_id_list = ['macd', 'boll_ub', 'boll_lb', 'rsi_30', 'cci_30', 'dx_30', 'close_30_sma', 'close_60_sma'] if tech_id_list is None else tech_id_list if os.path.exists(self.npz_pwd): ary_dict = np.load(self.npz_pwd, allow_pickle=True) close_ary = ary_dict['close_ary'] tech_ary = ary_dict['tech_ary'] elif os.path.exists(self.df_pwd): df = pd.read_pickle(self.df_pwd) tech_ary = [] close_ary = [] df_len = len(df.index.unique()) for day in range(df_len): item = df.loc[day] tech_items = [item[tech].values.tolist() for tech in tech_id_list] tech_items_flatten = sum(tech_items, []) tech_ary.append(tech_items_flatten) close_ary.append(item.close) close_ary = np.array(close_ary) tech_ary = np.array(tech_ary) np.savez_compressed(self.npz_pwd, close_ary=close_ary, tech_ary=tech_ary) else: error_str = f'| StockTradingEnv need {self.df_pwd} or {self.npz_pwd}\n download the following files and save in `.`\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.numpy.npz\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe' raise FileNotFoundError(error_str) (self.close_ary, self.tech_ary) = (close_ary, tech_ary) self.close_ary = self.close_ary[beg_idx:end_idx] self.tech_ary = self.tech_ary[beg_idx:end_idx] self.max_stock = max_stock self.cost_pct = cost_pct self.reward_scale = 2 ** (-12) self.initial_amount = initial_amount self.gamma = gamma self.day = None self.rewards = None self.total_asset = None self.cumulative_returns = 0 self.if_random_reset = True self.amount = None self.shares = None self.shares_num = self.close_ary.shape[1] amount_dim = 1 self.env_name = 'StockTradingEnv-v2' self.state_dim = self.shares_num + self.close_ary.shape[1] + self.tech_ary.shape[1] + amount_dim self.action_dim = self.shares_num self.if_discrete = False self.max_step = self.close_ary.shape[0] - 1 self.target_return = +np.inf
ElegantRL
positive
def __init__(self, content, depot_name=None): super(DepotFileInfo, self).__init__() <DeepExtract> object.__setattr__(self, '_frozen', False) </DeepExtract> if isinstance(content, dict): object.__setattr__(self, 'original_content', None) self.update(content) else: object.__setattr__(self, 'original_content', content) if depot_name is None: depot_name = DepotManager.get_default() depot_name = DepotManager.resolve_alias(depot_name) if not depot_name: raise ValueError('Storage has not been found in DEPOT') self['depot_name'] = depot_name self['files'] = [] <DeepExtract> return </DeepExtract> <DeepExtract> object.__setattr__(self, '_frozen', True) </DeepExtract>
def __init__(self, content, depot_name=None): super(DepotFileInfo, self).__init__() object.__setattr__(self, '_frozen', False) if isinstance(content, dict): object.__setattr__(self, 'original_content', None) self.update(content) else: object.__setattr__(self, 'original_content', content) if depot_name is None: depot_name = DepotManager.get_default() depot_name = DepotManager.resolve_alias(depot_name) if not depot_name: raise ValueError('Storage has not been found in DEPOT') self['depot_name'] = depot_name self['files'] = [] return object.__setattr__(self, '_frozen', True) </DeepExtract>
depot
positive
def _infer(self, mix: th.Tensor, mode: str) -> Union[th.Tensor, List[th.Tensor]]: """ Return time signals or frequency TF masks """ (stft, _) = self.enh_transform.encode(mix, None) feats = self.enh_transform(stft) <DeepExtract> x = self.proj(feats) x = self.conv(x) masks = self.non_linear(self.mask(x)) masks = th.chunk(masks, self.num_spks, 1) </DeepExtract> if mode == 'time': bss_stft = [tf_masking(stft, m) for m in masks] bss = self.enh_transform.decode(bss_stft) else: bss = masks return bss[0] if self.num_spks == 1 else bss
def _infer(self, mix: th.Tensor, mode: str) -> Union[th.Tensor, List[th.Tensor]]: """ Return time signals or frequency TF masks """ (stft, _) = self.enh_transform.encode(mix, None) feats = self.enh_transform(stft) x = self.proj(feats) x = self.conv(x) masks = self.non_linear(self.mask(x)) masks = th.chunk(masks, self.num_spks, 1) if mode == 'time': bss_stft = [tf_masking(stft, m) for m in masks] bss = self.enh_transform.decode(bss_stft) else: bss = masks return bss[0] if self.num_spks == 1 else bss
aps
positive
def is_not_inf(self): """Asserts that val is real number and is *not* ``Inf`` (infinity). Examples: Usage:: assert_that(0).is_not_inf() assert_that(123.4).is_not_inf() assert_that(float('nan')).is_not_inf() Returns: AssertionBuilder: returns this instance to chain to the next assertion Raises: AssertionError: if val **is** Inf """ <DeepExtract> if isinstance(self.val, numbers.Number) is False: raise TypeError('val is not numeric') </DeepExtract> <DeepExtract> if isinstance(self.val, numbers.Real) is False: raise TypeError('val is not real number') </DeepExtract> if math.isinf(self.val): return self.error('Expected not <Inf>, but was.') return self
def is_not_inf(self): """Asserts that val is real number and is *not* ``Inf`` (infinity). Examples: Usage:: assert_that(0).is_not_inf() assert_that(123.4).is_not_inf() assert_that(float('nan')).is_not_inf() Returns: AssertionBuilder: returns this instance to chain to the next assertion Raises: AssertionError: if val **is** Inf """ if isinstance(self.val, numbers.Number) is False: raise TypeError('val is not numeric') if isinstance(self.val, numbers.Real) is False: raise TypeError('val is not real number') if math.isinf(self.val): return self.error('Expected not <Inf>, but was.') return self
assertpy
positive
def setup_method(self, method): <DeepExtract> (fd, fqfn) = tempfile.mkstemp(prefix='TestSlicer7x7In_') fp = os.fdopen(fd, 'wt') fp.write('0-0,0-1,0-2,0-3,0-4,0-5,0-6\n') fp.write('1-0,1-1,1-2,1-3,1-4,1-5,1-6\n') fp.write('2-0,2-1,2-2,2-3,2-4,2-5,2-6\n') fp.write('3-0,3-1,3-2,3-3,3-4,3-5,3-6\n') fp.write('4-0,4-1,4-2,4-3,4-4,4-5,4-6\n') fp.write('5-0,5-1,5-2,5-3,5-4,5-5,5-6\n') fp.write('6-0,6-1,6-2,6-3,6-4,6-5,6-6\n') fp.close() self.std_7x7_fqfn = fqfn </DeepExtract> (dummy, self.out_fqfn) = tempfile.mkstemp(prefix='TestSlice7x7Out_')
def setup_method(self, method): (fd, fqfn) = tempfile.mkstemp(prefix='TestSlicer7x7In_') fp = os.fdopen(fd, 'wt') fp.write('0-0,0-1,0-2,0-3,0-4,0-5,0-6\n') fp.write('1-0,1-1,1-2,1-3,1-4,1-5,1-6\n') fp.write('2-0,2-1,2-2,2-3,2-4,2-5,2-6\n') fp.write('3-0,3-1,3-2,3-3,3-4,3-5,3-6\n') fp.write('4-0,4-1,4-2,4-3,4-4,4-5,4-6\n') fp.write('5-0,5-1,5-2,5-3,5-4,5-5,5-6\n') fp.write('6-0,6-1,6-2,6-3,6-4,6-5,6-6\n') fp.close() self.std_7x7_fqfn = fqfn (dummy, self.out_fqfn) = tempfile.mkstemp(prefix='TestSlice7x7Out_')
DataGristle
positive
def make(self, *, initializer=default_initializer) -> Graph: graph = Graph() <DeepExtract> raise NotImplementedError() </DeepExtract> return graph
def make(self, *, initializer=default_initializer) -> Graph: graph = Graph() raise NotImplementedError() return graph
autogoal
positive
def gen_test_df() -> pd.DataFrame: rand = np.random.RandomState(0) nrows = 30 data = {} data[0] = gen_random_dataframe(nrows=nrows, ncols=10, random_state=rand).reset_index(drop=True) data[1] = gen_random_dataframe(nrows=nrows, ncols=10, na_ratio=0.1, random_state=rand).reset_index(drop=True) data[2] = pd.Series([np.nan] * nrows, name='const_na') data[3] = pd.Series(['s'] * nrows, name='const_str') data[4] = pd.Series([0] * nrows, name='const_zero') data[5] = pd.Series([-1] * nrows, name='const_neg') data[6] = pd.Series([1] * nrows, name='const_pos') data[7] = pd.Series([0, 1, np.nan] * (nrows // 3), name='small_distinct_miss') data[8] = gen_random_series(size=nrows, dtype='string', random_state=rand).rename('str_no_miss') data[9] = gen_random_series(size=nrows, dtype='string', na_ratio=0.1, random_state=rand).rename('str_miss') data[10] = gen_random_series(size=nrows, dtype='float', random_state=rand).rename('num_no_miss') data[11] = gen_random_series(size=nrows, dtype='float', na_ratio=0.1, random_state=rand).rename('num_miss') data[12] = pd.Series(['a', 'b'] * (nrows // 2), name='category_no_miss', dtype='category') data[13] = pd.Series(['a', np.nan] * (nrows // 2), name='category_miss', dtype='category') df = pd.concat(data.values(), axis=1) <DeepExtract> gen_func: Mapping[str, Callable[..., pd.Series]] = {'int': _gen_random_int_series, 'float': _gen_random_float_series, 'boolean': _gen_random_bool_series, 'datetime': _gen_random_datatime_series, 'string': _gen_random_string_series} if dtype not in gen_func and dtype != 'object': raise NotImplementedError(f'dtype {dtype} generator is not implemented.') rand = _resolve_random_state(2) population_list = [] for curr_type in gen_func: if dtype in [curr_type, 'object']: if curr_type != 'string': rand_series = gen_func[curr_type](df.index.shape[0], random_state=rand) else: rand_series = gen_func[curr_type](df.index.shape[0], max_len=100, random_state=rand) population_list.append(rand_series) object_population = pd.concat(population_list, ignore_index=True) object_series = pd.Series(rand.choice(object_population, size=df.index.shape[0])) na_pos = object_series.sample(frac=0.1, random_state=rand).index if not na_pos.empty: object_series[na_pos] = np.nan df.index = object_series </DeepExtract> return df
def gen_test_df() -> pd.DataFrame: rand = np.random.RandomState(0) nrows = 30 data = {} data[0] = gen_random_dataframe(nrows=nrows, ncols=10, random_state=rand).reset_index(drop=True) data[1] = gen_random_dataframe(nrows=nrows, ncols=10, na_ratio=0.1, random_state=rand).reset_index(drop=True) data[2] = pd.Series([np.nan] * nrows, name='const_na') data[3] = pd.Series(['s'] * nrows, name='const_str') data[4] = pd.Series([0] * nrows, name='const_zero') data[5] = pd.Series([-1] * nrows, name='const_neg') data[6] = pd.Series([1] * nrows, name='const_pos') data[7] = pd.Series([0, 1, np.nan] * (nrows // 3), name='small_distinct_miss') data[8] = gen_random_series(size=nrows, dtype='string', random_state=rand).rename('str_no_miss') data[9] = gen_random_series(size=nrows, dtype='string', na_ratio=0.1, random_state=rand).rename('str_miss') data[10] = gen_random_series(size=nrows, dtype='float', random_state=rand).rename('num_no_miss') data[11] = gen_random_series(size=nrows, dtype='float', na_ratio=0.1, random_state=rand).rename('num_miss') data[12] = pd.Series(['a', 'b'] * (nrows // 2), name='category_no_miss', dtype='category') data[13] = pd.Series(['a', np.nan] * (nrows // 2), name='category_miss', dtype='category') df = pd.concat(data.values(), axis=1) gen_func: Mapping[str, Callable[..., pd.Series]] = {'int': _gen_random_int_series, 'float': _gen_random_float_series, 'boolean': _gen_random_bool_series, 'datetime': _gen_random_datatime_series, 'string': _gen_random_string_series} if dtype not in gen_func and dtype != 'object': raise NotImplementedError(f'dtype {dtype} generator is not implemented.') rand = _resolve_random_state(2) population_list = [] for curr_type in gen_func: if dtype in [curr_type, 'object']: if curr_type != 'string': rand_series = gen_func[curr_type](df.index.shape[0], random_state=rand) else: rand_series = gen_func[curr_type](df.index.shape[0], max_len=100, random_state=rand) population_list.append(rand_series) object_population = pd.concat(population_list, ignore_index=True) object_series = pd.Series(rand.choice(object_population, size=df.index.shape[0])) na_pos = object_series.sample(frac=0.1, random_state=rand).index if not na_pos.empty: object_series[na_pos] = np.nan df.index = object_series return df
dataprep
positive
def axes_limits_set(data): """ Set the axes limits """ xmax = self.calcs.iterations - 1 if self.calcs.iterations > 1 else 1 if data: <DeepExtract> (ymin, ymax) = (list(), list()) for item in data: dataset = list(filter(lambda x: x is not None, item)) if not dataset: continue ymin.append(min(dataset) * 1000) ymax.append(max(dataset) * 1000) ymin = floor(min(ymin)) / 1000 ymax = ceil(max(ymax)) / 1000 (ymin, ymax) = (ymin, ymax) </DeepExtract> self.ax1.set_ylim(ymin, ymax) self.ax1.set_xlim(0, xmax) else: <DeepExtract> self.ax1.set_ylim(0.0, 100.0) self.ax1.set_xlim(0, 1) </DeepExtract>
def axes_limits_set(data): """ Set the axes limits """ xmax = self.calcs.iterations - 1 if self.calcs.iterations > 1 else 1 if data: (ymin, ymax) = (list(), list()) for item in data: dataset = list(filter(lambda x: x is not None, item)) if not dataset: continue ymin.append(min(dataset) * 1000) ymax.append(max(dataset) * 1000) ymin = floor(min(ymin)) / 1000 ymax = ceil(max(ymax)) / 1000 (ymin, ymax) = (ymin, ymax) self.ax1.set_ylim(ymin, ymax) self.ax1.set_xlim(0, xmax) else: self.ax1.set_ylim(0.0, 100.0) self.ax1.set_xlim(0, 1) </DeepExtract>
DeepFakeTutorial
positive