before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def pose_from_oxts_packet(raw_data, scale): """ Helper method to compute a SE(3) pose matrix from an OXTS packet Parameters ---------- raw_data : dict Oxts data to read from scale : float Oxts scale Returns ------- R : np.array [3,3] Rotation matrix t : np.array [3] Translation vector """ packet = OxtsPacket(*raw_data) er = 6378137.0 tx = scale * packet.lon * np.pi * er / 180.0 ty = scale * er * np.log(np.tan((90.0 + packet.lat) * np.pi / 360.0)) tz = packet.alt t = np.array([tx, ty, tz]) <DeepExtract> c = np.cos(packet.roll) s = np.sin(packet.roll) Rx = np.array([[1, 0, 0], [0, c, -s], [0, s, c]]) </DeepExtract> <DeepExtract> c = np.cos(packet.pitch) s = np.sin(packet.pitch) Ry = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) </DeepExtract> <DeepExtract> c = np.cos(packet.yaw) s = np.sin(packet.yaw) Rz = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) </DeepExtract> R = Rz.dot(Ry.dot(Rx)) return (R, t)
def pose_from_oxts_packet(raw_data, scale): """ Helper method to compute a SE(3) pose matrix from an OXTS packet Parameters ---------- raw_data : dict Oxts data to read from scale : float Oxts scale Returns ------- R : np.array [3,3] Rotation matrix t : np.array [3] Translation vector """ packet = OxtsPacket(*raw_data) er = 6378137.0 tx = scale * packet.lon * np.pi * er / 180.0 ty = scale * er * np.log(np.tan((90.0 + packet.lat) * np.pi / 360.0)) tz = packet.alt t = np.array([tx, ty, tz]) c = np.cos(packet.roll) s = np.sin(packet.roll) Rx = np.array([[1, 0, 0], [0, c, -s], [0, s, c]]) c = np.cos(packet.pitch) s = np.sin(packet.pitch) Ry = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) c = np.cos(packet.yaw) s = np.sin(packet.yaw) Rz = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) R = Rz.dot(Ry.dot(Rx)) return (R, t)
dro-sfm
positive
def findMaximumXOR(self, nums: List[int]) -> int: try: maxbit = floor(log2(max(nums))) + 1 except: maxbit = 0 head = TrieNode() for n in nums: <DeepExtract> if not head: head = None curr = head for i in range(maxbit - 1, -1, -1): bit = n >> i & 1 if bit: if not curr.right: curr.right = TrieNode() curr = curr.right else: if not curr.left: curr.left = TrieNode() curr = curr.left head = head </DeepExtract> maxXor = -float('inf') for n in nums: currXor = 0 curr = head for i in range(maxbit - 1, -1, -1): bit = n >> i & 1 if bit: if curr.left: currXor += 2 ** i curr = curr.left else: curr = curr.right elif curr.right: currXor += 2 ** i curr = curr.right else: curr = curr.left maxXor = max(maxXor, currXor) return maxXor
def findMaximumXOR(self, nums: List[int]) -> int: try: maxbit = floor(log2(max(nums))) + 1 except: maxbit = 0 head = TrieNode() for n in nums: if not head: head = None curr = head for i in range(maxbit - 1, -1, -1): bit = n >> i & 1 if bit: if not curr.right: curr.right = TrieNode() curr = curr.right else: if not curr.left: curr.left = TrieNode() curr = curr.left head = head maxXor = -float('inf') for n in nums: currXor = 0 curr = head for i in range(maxbit - 1, -1, -1): bit = n >> i & 1 if bit: if curr.left: currXor += 2 ** i curr = curr.left else: curr = curr.right elif curr.right: currXor += 2 ** i curr = curr.right else: curr = curr.left maxXor = max(maxXor, currXor) return maxXor
Competitive_Programming
positive
def fmin(loss_fn, space, max_evals, trials, init_random_evals=30, explore_prob=0.2): """Find the minimum of function through hyper parameter optimization. Arguments --------- loss_fn : ``function(*args) -> float`` Function that takes in a dictionary and returns a real value. This is the function to be minimized. space : dictionary Custom dictionary specifying the range and distribution of the hyperparamters. E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1), 'lo':0, 'hi':1}}`` for a 1-dimensional space with variable x in range [0,1] max_evals : int Maximum number of evaluations of loss_fn allowed trials : list Holds the output of the optimization trials. Need not be empty to begin with, new trials are appended at the end. init_random_evals : Optional[int], default 30 Number of random trials to initialize the optimization. explore_prob : Optional[float], default 0.2 Controls the exploration-vs-exploitation ratio. Value should be in [0,1]. By default, 20% of trails are random samples. Returns ------- trial entry (dictionary of hyperparameters) Best hyperparameter setting found. E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter value found and loss is the value of the function for the best hyperparameter value(s). Raises ------ ValueError If the distribution specified in space does not support a ``rvs()`` method to generate random numbers, a ValueError is raised. """ for s in space: if not hasattr(space[s]['dist'], 'rvs'): raise ValueError('Unknown distribution type for variable') if 'lo' not in space[s]: space[s]['lo'] = -np.inf if 'hi' not in space[s]: space[s]['hi'] = np.inf if len(trials) > init_random_evals: init_random_evals = 0 for t in range(max_evals): sdict = {} if t >= init_random_evals and np.random.random() > explore_prob: use_random_sampling = False else: use_random_sampling = True yarray = np.array([tr['loss'] for tr in trials]) for s in space: sarray = np.array([tr[s] for tr in trials]) if use_random_sampling: sdict[s] = space[s]['dist'].rvs() else: <DeepExtract> z = np.array(list(zip(sarray, yarray)), dtype=np.dtype([('x', float), ('y', float)])) z = np.sort(z, order='y') n = yarray.shape[0] g = int(np.round(np.ceil(0.15 * n))) ldata = z[0:g] gdata = z[g:n] lymin = ldata['y'].min() lymax = ldata['y'].max() weights = (lymax - ldata['y']) / (lymax - lymin) lx = gmm_1d_distribution(ldata['x'], min_limit=space[s]['lo'], max_limit=space[s]['hi'], weights=weights) gx = gmm_1d_distribution(gdata['x'], min_limit=space[s]['lo'], max_limit=space[s]['hi']) samples = lx.get_samples(n=1000) ei = lx(samples) / gx(samples) h = (sarray.max() - sarray.min()) / (10 * sarray.size) s = 0 while np.abs(sarray - samples[ei.argmax()]).min() < h: ei[ei.argmax()] = 0 s = s + 1 if s == samples.size: break xnext = samples[ei.argmax()] sdict[s] = xnext </DeepExtract> logger.debug('Explore' if use_random_sampling else 'Exploit') logger.info('Next point ', t, ' = ', sdict) y = loss_fn(sdict) sdict['loss'] = y trials.append(sdict) yarray = np.array([tr['loss'] for tr in trials]) yargmin = yarray.argmin() logger.info('Best point so far = ', trials[yargmin]) return trials[yargmin]
def fmin(loss_fn, space, max_evals, trials, init_random_evals=30, explore_prob=0.2): """Find the minimum of function through hyper parameter optimization. Arguments --------- loss_fn : ``function(*args) -> float`` Function that takes in a dictionary and returns a real value. This is the function to be minimized. space : dictionary Custom dictionary specifying the range and distribution of the hyperparamters. E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1), 'lo':0, 'hi':1}}`` for a 1-dimensional space with variable x in range [0,1] max_evals : int Maximum number of evaluations of loss_fn allowed trials : list Holds the output of the optimization trials. Need not be empty to begin with, new trials are appended at the end. init_random_evals : Optional[int], default 30 Number of random trials to initialize the optimization. explore_prob : Optional[float], default 0.2 Controls the exploration-vs-exploitation ratio. Value should be in [0,1]. By default, 20% of trails are random samples. Returns ------- trial entry (dictionary of hyperparameters) Best hyperparameter setting found. E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter value found and loss is the value of the function for the best hyperparameter value(s). Raises ------ ValueError If the distribution specified in space does not support a ``rvs()`` method to generate random numbers, a ValueError is raised. """ for s in space: if not hasattr(space[s]['dist'], 'rvs'): raise ValueError('Unknown distribution type for variable') if 'lo' not in space[s]: space[s]['lo'] = -np.inf if 'hi' not in space[s]: space[s]['hi'] = np.inf if len(trials) > init_random_evals: init_random_evals = 0 for t in range(max_evals): sdict = {} if t >= init_random_evals and np.random.random() > explore_prob: use_random_sampling = False else: use_random_sampling = True yarray = np.array([tr['loss'] for tr in trials]) for s in space: sarray = np.array([tr[s] for tr in trials]) if use_random_sampling: sdict[s] = space[s]['dist'].rvs() else: z = np.array(list(zip(sarray, yarray)), dtype=np.dtype([('x', float), ('y', float)])) z = np.sort(z, order='y') n = yarray.shape[0] g = int(np.round(np.ceil(0.15 * n))) ldata = z[0:g] gdata = z[g:n] lymin = ldata['y'].min() lymax = ldata['y'].max() weights = (lymax - ldata['y']) / (lymax - lymin) lx = gmm_1d_distribution(ldata['x'], min_limit=space[s]['lo'], max_limit=space[s]['hi'], weights=weights) gx = gmm_1d_distribution(gdata['x'], min_limit=space[s]['lo'], max_limit=space[s]['hi']) samples = lx.get_samples(n=1000) ei = lx(samples) / gx(samples) h = (sarray.max() - sarray.min()) / (10 * sarray.size) s = 0 while np.abs(sarray - samples[ei.argmax()]).min() < h: ei[ei.argmax()] = 0 s = s + 1 if s == samples.size: break xnext = samples[ei.argmax()] sdict[s] = xnext logger.debug('Explore' if use_random_sampling else 'Exploit') logger.info('Next point ', t, ' = ', sdict) y = loss_fn(sdict) sdict['loss'] = y trials.append(sdict) yarray = np.array([tr['loss'] for tr in trials]) yargmin = yarray.argmin() logger.info('Best point so far = ', trials[yargmin]) return trials[yargmin]
brainiak
positive
def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = 'CamelCase' field = field_class(*args, **kwargs) field.set_attributes_from_name('CamelCase') return field model = Author <DeepExtract> kwargs['db_column'] = 'CamelCase' field = field_class(*args, **kwargs) field.set_attributes_from_name('CamelCase') field = field </DeepExtract> table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor.sql_create_index % {'table': editor.quote_name(table), 'name': editor.quote_name(constraint_name), 'using': '', 'columns': editor.quote_name(column), 'extra': '', 'condition': '', 'include': ''}) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor.sql_create_fk % {'table': editor.quote_name(table), 'name': editor.quote_name(constraint_name), 'column': editor.quote_name(column), 'to_table': editor.quote_name(table), 'to_column': editor.quote_name(model._meta.auto_field.column), 'deferrable': connection.ops.deferrable_sql()}) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
def test_remove_constraints_capital_letters(self): """ #23065 - Constraint names must be quoted if they contain capital letters. """ def get_field(*args, field_class=IntegerField, **kwargs): kwargs['db_column'] = 'CamelCase' field = field_class(*args, **kwargs) field.set_attributes_from_name('CamelCase') return field model = Author kwargs['db_column'] = 'CamelCase' field = field_class(*args, **kwargs) field.set_attributes_from_name('CamelCase') field = field table = model._meta.db_table column = field.column identifier_converter = connection.introspection.identifier_converter with connection.schema_editor() as editor: editor.create_model(model) editor.add_field(model, field) constraint_name = 'CamelCaseIndex' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor.sql_create_index % {'table': editor.quote_name(table), 'name': editor.quote_name(constraint_name), 'using': '', 'columns': editor.quote_name(column), 'extra': '', 'condition': '', 'include': ''}) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(db_index=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) constraint_name = 'CamelCaseUniqConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor._create_unique_sql(model, [field.column], constraint_name)) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(unique=True), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) if editor.sql_create_fk: constraint_name = 'CamelCaseFKConstraint' expected_constraint_name = identifier_converter(constraint_name) editor.execute(editor.sql_create_fk % {'table': editor.quote_name(table), 'name': editor.quote_name(constraint_name), 'column': editor.quote_name(column), 'to_table': editor.quote_name(table), 'to_column': editor.quote_name(model._meta.auto_field.column), 'deferrable': connection.ops.deferrable_sql()}) self.assertIn(expected_constraint_name, self.get_constraints(model._meta.db_table)) editor.alter_field(model, get_field(Author, CASCADE, field_class=ForeignKey), field, strict=True) self.assertNotIn(expected_constraint_name, self.get_constraints(model._meta.db_table))
django-firebird
positive
def test_empty_ranges(self): pkRange = routing_range._Range('', 'FF', True, False) <DeepExtract> overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', [pkRange]) </DeepExtract> self.assertEqual(len(overlapping_partition_key_ranges), len(self.partition_key_ranges)) self.assertEqual(overlapping_partition_key_ranges, self.partition_key_ranges) <DeepExtract> overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', []) </DeepExtract> self.assertEqual(len(overlapping_partition_key_ranges), 0) empty_start_range = routing_range._Range('', '', False, True) empty_end_range = routing_range._Range('FF', 'FF', False, True) empty_range = routing_range._Range('AA', 'AA', False, True) <DeepExtract> for queryRanges in queryRangesList: self.validate_overlapping_ranges_results(queryRanges, []) </DeepExtract>
def test_empty_ranges(self): pkRange = routing_range._Range('', 'FF', True, False) overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', [pkRange]) self.assertEqual(len(overlapping_partition_key_ranges), len(self.partition_key_ranges)) self.assertEqual(overlapping_partition_key_ranges, self.partition_key_ranges) overlapping_partition_key_ranges = self.smart_routing_map_provider.get_overlapping_ranges('sample collection id', []) self.assertEqual(len(overlapping_partition_key_ranges), 0) empty_start_range = routing_range._Range('', '', False, True) empty_end_range = routing_range._Range('FF', 'FF', False, True) empty_range = routing_range._Range('AA', 'AA', False, True) for queryRanges in queryRangesList: self.validate_overlapping_ranges_results(queryRanges, []) </DeepExtract>
azure-cosmos-python
positive
def check_python_requirements(return_detail=True, print_warning=True): """ Check installation status and requirements of dependant libraries. """ try: <DeepExtract> path = os.path.abspath(PATH[0]) REQUIREMENTS_PATH = os.path.join(path, 'requirements.txt') with open(REQUIREMENTS_PATH) as f: requirements = f.read().splitlines() REQUIREMENTS = requirements </DeepExtract> except: if print_warning: print('Could not check requirements.') return None status = pd.DataFrame({'package_name': _get_package_names(REQUIREMENTS), 'installed_version': _get_installed_version(REQUIREMENTS), 'required_version': _get_required_version(REQUIREMENTS)}) status['requirement_satisfied'] = [_is_version_OK(ref, que) for (ref, que) in zip(status['installed_version'], status['required_version'])] if print_warning: n_not_ok = (status['requirement_satisfied'] == False).sum() if n_not_ok == 1: print(f'{n_not_ok} package does not meet CellOracle requirement.') elif n_not_ok >= 2: print(f'{n_not_ok} packages do not meet CellOracle requirement.') for (i, (name, installed, required, isok)) in status.iterrows(): if isok is False: print(f' Your {name} version is {installed}. Please install {REQUIREMENTS[i]}') if return_detail: return status
def check_python_requirements(return_detail=True, print_warning=True): """ Check installation status and requirements of dependant libraries. """ try: path = os.path.abspath(PATH[0]) REQUIREMENTS_PATH = os.path.join(path, 'requirements.txt') with open(REQUIREMENTS_PATH) as f: requirements = f.read().splitlines() REQUIREMENTS = requirements except: if print_warning: print('Could not check requirements.') return None status = pd.DataFrame({'package_name': _get_package_names(REQUIREMENTS), 'installed_version': _get_installed_version(REQUIREMENTS), 'required_version': _get_required_version(REQUIREMENTS)}) status['requirement_satisfied'] = [_is_version_OK(ref, que) for (ref, que) in zip(status['installed_version'], status['required_version'])] if print_warning: n_not_ok = (status['requirement_satisfied'] == False).sum() if n_not_ok == 1: print(f'{n_not_ok} package does not meet CellOracle requirement.') elif n_not_ok >= 2: print(f'{n_not_ok} packages do not meet CellOracle requirement.') for (i, (name, installed, required, isok)) in status.iterrows(): if isok is False: print(f' Your {name} version is {installed}. Please install {REQUIREMENTS[i]}') if return_detail: return status
CellOracle
positive
def run(self): self.window.show_all() videowindow = self.drawingarea.get_property('window') if sys.platform == 'win32': ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object] drawingarea_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(videowindow.__gpointer__, None) gdkdll = ctypes.CDLL('libgdk-3-0.dll') self.xid = gdkdll.gdk_win32_window_get_handle(drawingarea_gpointer) self.vlc.set_hwnd(self.xid) else: self.xid = videowindow.get_xid() self.vlc.set_xwindow(self.xid) self.vlc.event_manager().event_attach(vlc.EventType.MediaPlayerEndReached, self.on_eos, 1) self.vlc.event_manager().event_attach(vlc.EventType.MediaPlayerEncounteredError, self.on_error, 1) self.instance.set_user_agent('http', self.user_agent) <DeepExtract> media = self.instance.media_new(self.get_queued_or_random()) if ('http://' in self.get_queued_or_random() or 'https://' in self.get_queued_or_random()) and self.file_save_dir and (not os.path.isfile(self.file_save_dir + '/' + os.path.basename(self.get_queued_or_random()))): media.add_option(':sout=#duplicate{dst=display,dst=std{access=file,dst="' + self.file_save_dir + '/' + os.path.basename(self.get_queued_or_random()) + '"}}') self.vlc.set_media(media) self.window.set_title('Endless Sosuch | ' + os.path.basename(self.get_queued_or_random())) self.uri = self.get_queued_or_random() </DeepExtract> <DeepExtract> if not self.is_paused: return self.vlc.play() self.logger.info('Playing {}'.format(self.uri)) self.is_paused = False </DeepExtract> Gtk.main()
def run(self): self.window.show_all() videowindow = self.drawingarea.get_property('window') if sys.platform == 'win32': ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object] drawingarea_gpointer = ctypes.pythonapi.PyCapsule_GetPointer(videowindow.__gpointer__, None) gdkdll = ctypes.CDLL('libgdk-3-0.dll') self.xid = gdkdll.gdk_win32_window_get_handle(drawingarea_gpointer) self.vlc.set_hwnd(self.xid) else: self.xid = videowindow.get_xid() self.vlc.set_xwindow(self.xid) self.vlc.event_manager().event_attach(vlc.EventType.MediaPlayerEndReached, self.on_eos, 1) self.vlc.event_manager().event_attach(vlc.EventType.MediaPlayerEncounteredError, self.on_error, 1) self.instance.set_user_agent('http', self.user_agent) media = self.instance.media_new(self.get_queued_or_random()) if ('http://' in self.get_queued_or_random() or 'https://' in self.get_queued_or_random()) and self.file_save_dir and (not os.path.isfile(self.file_save_dir + '/' + os.path.basename(self.get_queued_or_random()))): media.add_option(':sout=#duplicate{dst=display,dst=std{access=file,dst="' + self.file_save_dir + '/' + os.path.basename(self.get_queued_or_random()) + '"}}') self.vlc.set_media(media) self.window.set_title('Endless Sosuch | ' + os.path.basename(self.get_queued_or_random())) self.uri = self.get_queued_or_random() if not self.is_paused: return self.vlc.play() self.logger.info('Playing {}'.format(self.uri)) self.is_paused = False Gtk.main()
endless-sosuch
positive
def start(self): <DeepExtract> logger.debug('loading auth provider {}...'.format(AUTH_PROVIDER)) for (loader, modname, is_pkg) in pkgutil.iter_modules(cif.auth.__path__, 'cif.auth.'): if modname == 'cif.auth.{}'.format(self.auth_type): self.auth = loader.find_module(modname).load_module(modname) self.auth = self.auth.Plugin(**kwargs) logger.debug('plugin loaded: {}'.format(modname)) </DeepExtract> context = zmq.Context() auth_s = context.socket(zmq.ROUTER) auth_s.SNDTIMEO = SNDTIMEO logger.debug('connecting to sockets...') auth_s.connect(self.auth_address) logger.debug('connected. starting loop') poller = zmq.Poller() poller.register(auth_s, zmq.POLLIN) while not self.exit.is_set(): try: s = dict(poller.poll(1000)) except SystemExit or KeyboardInterrupt: break except Exception as e: logger.error(e) break if auth_s in s: (id, client_id, token, mtype, data) = Msg().recv(auth_s) start = time.time() try: token = self.auth.handle_token_search(token) <DeepExtract> if not token: raise AuthError('Auth: invalid token provided to API') elif len(token) > 1: raise AuthError('multiple token matches during auth. possible wildcard attempt?') else: token = token[0] if mtype.startswith('tokens') or mtype.endswith('delete'): if not token.get('admin'): raise AuthError('Auth: admin function attempted but supplied token had no admin permission') elif mtype.endswith('create') or mtype.endswith('write'): if not token.get('write'): raise AuthError('Auth: write function attempted but supplied token had no write permission') elif mtype == 'indicators_search': if not token.get('read'): raise AuthError('Auth: read function attempt but supplied token had no read permission') if mtype == 'indicators_create': data = json.loads(data) if isinstance(data, dict): data = [data] for i in data: if i.get('group', 'everyone') not in token['groups']: raise AuthError('Auth: indicator function attempt {} but supplied indicator group {} did not match user token groups {}'.format(mtype, i.get('group', 'everyone'), token['groups'])) token = token </DeepExtract> except AuthError as e: logger.error(e) token = [] data = AUTH_ERR except ValueError as e: logger.error(e) token = [] data = AUTH_ERR token = json.dumps(token) logger.debug('sending auth info back to cif-router: %f' % (time.time() - start)) Msg(id=id, client_id=client_id, token=token, mtype=mtype, data=data).send(auth_s) logger.info('shutting down auth...')
def start(self): logger.debug('loading auth provider {}...'.format(AUTH_PROVIDER)) for (loader, modname, is_pkg) in pkgutil.iter_modules(cif.auth.__path__, 'cif.auth.'): if modname == 'cif.auth.{}'.format(self.auth_type): self.auth = loader.find_module(modname).load_module(modname) self.auth = self.auth.Plugin(**kwargs) logger.debug('plugin loaded: {}'.format(modname)) context = zmq.Context() auth_s = context.socket(zmq.ROUTER) auth_s.SNDTIMEO = SNDTIMEO logger.debug('connecting to sockets...') auth_s.connect(self.auth_address) logger.debug('connected. starting loop') poller = zmq.Poller() poller.register(auth_s, zmq.POLLIN) while not self.exit.is_set(): try: s = dict(poller.poll(1000)) except SystemExit or KeyboardInterrupt: break except Exception as e: logger.error(e) break if auth_s in s: (id, client_id, token, mtype, data) = Msg().recv(auth_s) start = time.time() try: token = self.auth.handle_token_search(token) if not token: raise AuthError('Auth: invalid token provided to API') elif len(token) > 1: raise AuthError('multiple token matches during auth. possible wildcard attempt?') else: token = token[0] if mtype.startswith('tokens') or mtype.endswith('delete'): if not token.get('admin'): raise AuthError('Auth: admin function attempted but supplied token had no admin permission') elif mtype.endswith('create') or mtype.endswith('write'): if not token.get('write'): raise AuthError('Auth: write function attempted but supplied token had no write permission') elif mtype == 'indicators_search': if not token.get('read'): raise AuthError('Auth: read function attempt but supplied token had no read permission') if mtype == 'indicators_create': data = json.loads(data) if isinstance(data, dict): data = [data] for i in data: if i.get('group', 'everyone') not in token['groups']: raise AuthError('Auth: indicator function attempt {} but supplied indicator group {} did not match user token groups {}'.format(mtype, i.get('group', 'everyone'), token['groups'])) token = token except AuthError as e: logger.error(e) token = [] data = AUTH_ERR except ValueError as e: logger.error(e) token = [] data = AUTH_ERR token = json.dumps(token) logger.debug('sending auth info back to cif-router: %f' % (time.time() - start)) Msg(id=id, client_id=client_id, token=token, mtype=mtype, data=data).send(auth_s) logger.info('shutting down auth...')
bearded-avenger
positive
def get_replies_to(self, msg): """ returns all replies to the given message contained in this thread. :param msg: parent message to look up :type msg: :class:`~alot.db.message.Message` :returns: list of :class:`~alot.db.message.Message` or `None` """ mid = msg.get_message_id() <DeepExtract> if not self._messages: with self._dbman._with_notmuch_thread(self._id) as thread: def accumulate(acc, msg): M = Message(self._dbman, msg, thread=self) acc[M] = [] for m in msg.replies(): acc[M].append(accumulate(acc, m)) msg_hash = M self._messages = {} for m in thread.toplevel(): self._toplevel_messages.append(accumulate(self._messages, m)) msg_hash = self._messages </DeepExtract> for m in msg_hash.keys(): if m.get_message_id() == mid: return msg_hash[m] return None
def get_replies_to(self, msg): """ returns all replies to the given message contained in this thread. :param msg: parent message to look up :type msg: :class:`~alot.db.message.Message` :returns: list of :class:`~alot.db.message.Message` or `None` """ mid = msg.get_message_id() if not self._messages: with self._dbman._with_notmuch_thread(self._id) as thread: def accumulate(acc, msg): M = Message(self._dbman, msg, thread=self) acc[M] = [] for m in msg.replies(): acc[M].append(accumulate(acc, m)) msg_hash = M self._messages = {} for m in thread.toplevel(): self._toplevel_messages.append(accumulate(self._messages, m)) msg_hash = self._messages for m in msg_hash.keys(): if m.get_message_id() == mid: return msg_hash[m] return None
alot
positive
def testiter(self): <DeepExtract> raise NotImplementedError </DeepExtract> keys = {'one', 'two', 'three', '1', '2', '3'} for k in keys: M[k] = 100 self.assertEqual(set(M), keys)
def testiter(self): raise NotImplementedError keys = {'one', 'two', 'three', '1', '2', '3'} for k in keys: M[k] = 100 self.assertEqual(set(M), keys)
datastructures
positive
@mock.patch('{}.'.format(__PREPARE_PACKAGE) + 'datacatalog_tag_factory.' + 'DataCatalogTagFactory.make_tag_for_table_container_metadata') @mock.patch('{}.datacatalog_tag_factory.'.format(__PREPARE_PACKAGE) + 'DataCatalogTagFactory.make_tag_for_table_metadata') @mock.patch('{}.datacatalog_tag_factory.'.format(__PREPARE_PACKAGE) + 'DataCatalogTagFactory.make_tags_for_columns_metadata') def test_with_tag_templates_should_be_converted_to_dc_entries_with_tags(self, make_tags_for_columns_metadata, make_tag_for_table_metadata, make_tag_for_table_container_metadata, entry_path): entry_path.return_value = AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH entry_factory = self.__assembled_entry_factory_with_template schema_metadata = utils.Utils.convert_json_to_object(self.__MODULE_PATH, 'metadata.json') prepared_entries = entry_factory.make_entries(schema_metadata) schemas = schema_metadata['schemas'] expected_created_entries_len = sum([len(schema_item['tables']) for schema_item in schemas], len(schemas)) created_entries_len = sum([len(tables) for (schema, tables) in prepared_entries], len(prepared_entries)) self.assertEqual(expected_created_entries_len, created_entries_len) <DeepExtract> for (user_defined_schema, user_defined_tables) in prepared_entries: schema_entry = user_defined_schema.entry self.__assert_required(user_defined_schema.entry_id, user_defined_schema.entry) self.assertEqual('schema', schema_entry.user_specified_type) self.assertEqual('oracle', schema_entry.user_specified_system) self.assertEqual(AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH, schema_entry.name) self.assertIn(AssembledEntryFactoryTestCase.__METADATA_SERVER_HOST, schema_entry.linked_resource) for user_defined_table in user_defined_tables: table_entry = user_defined_table.entry self.__assert_required(user_defined_table.entry_id, table_entry) self.assertIsNotNone(table_entry.source_system_timestamps.create_time) self.assertIsNotNone(table_entry.source_system_timestamps.update_time) self.assertEqual('table', table_entry.user_specified_type) self.assertEqual('oracle', table_entry.user_specified_system) self.assertEqual(AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH, table_entry.name) self.assertIn(AssembledEntryFactoryTestCase.__METADATA_SERVER_HOST, table_entry.linked_resource) self.assertGreater(len(table_entry.schema.columns), 0) for column in table_entry.schema.columns: self.assertIsNotNone(column.type) self.assertIsNotNone(column.column) </DeepExtract> (schema, tables) = prepared_entries[0] self.assertEqual(1, len(schema.tags)) for table in tables: self.assertEqual(1, len(table.tags)) self.assertEqual(14, make_tag_for_table_metadata.call_count) self.assertEqual(14, make_tags_for_columns_metadata.call_count) self.assertEqual(4, make_tag_for_table_container_metadata.call_count)
@mock.patch('{}.'.format(__PREPARE_PACKAGE) + 'datacatalog_tag_factory.' + 'DataCatalogTagFactory.make_tag_for_table_container_metadata') @mock.patch('{}.datacatalog_tag_factory.'.format(__PREPARE_PACKAGE) + 'DataCatalogTagFactory.make_tag_for_table_metadata') @mock.patch('{}.datacatalog_tag_factory.'.format(__PREPARE_PACKAGE) + 'DataCatalogTagFactory.make_tags_for_columns_metadata') def test_with_tag_templates_should_be_converted_to_dc_entries_with_tags(self, make_tags_for_columns_metadata, make_tag_for_table_metadata, make_tag_for_table_container_metadata, entry_path): entry_path.return_value = AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH entry_factory = self.__assembled_entry_factory_with_template schema_metadata = utils.Utils.convert_json_to_object(self.__MODULE_PATH, 'metadata.json') prepared_entries = entry_factory.make_entries(schema_metadata) schemas = schema_metadata['schemas'] expected_created_entries_len = sum([len(schema_item['tables']) for schema_item in schemas], len(schemas)) created_entries_len = sum([len(tables) for (schema, tables) in prepared_entries], len(prepared_entries)) self.assertEqual(expected_created_entries_len, created_entries_len) for (user_defined_schema, user_defined_tables) in prepared_entries: schema_entry = user_defined_schema.entry self.__assert_required(user_defined_schema.entry_id, user_defined_schema.entry) self.assertEqual('schema', schema_entry.user_specified_type) self.assertEqual('oracle', schema_entry.user_specified_system) self.assertEqual(AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH, schema_entry.name) self.assertIn(AssembledEntryFactoryTestCase.__METADATA_SERVER_HOST, schema_entry.linked_resource) for user_defined_table in user_defined_tables: table_entry = user_defined_table.entry self.__assert_required(user_defined_table.entry_id, table_entry) self.assertIsNotNone(table_entry.source_system_timestamps.create_time) self.assertIsNotNone(table_entry.source_system_timestamps.update_time) self.assertEqual('table', table_entry.user_specified_type) self.assertEqual('oracle', table_entry.user_specified_system) self.assertEqual(AssembledEntryFactoryTestCase.__MOCKED_ENTRY_PATH, table_entry.name) self.assertIn(AssembledEntryFactoryTestCase.__METADATA_SERVER_HOST, table_entry.linked_resource) self.assertGreater(len(table_entry.schema.columns), 0) for column in table_entry.schema.columns: self.assertIsNotNone(column.type) self.assertIsNotNone(column.column) (schema, tables) = prepared_entries[0] self.assertEqual(1, len(schema.tags)) for table in tables: self.assertEqual(1, len(table.tags)) self.assertEqual(14, make_tag_for_table_metadata.call_count) self.assertEqual(14, make_tags_for_columns_metadata.call_count) self.assertEqual(4, make_tag_for_table_container_metadata.call_count)
datacatalog-connectors-rdbms
positive
@registry.register_check('kms') def kms_key_exposed_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[KMS.2] KMS keys should not have public access""" <DeepExtract> response = cache.get('list_aliases') if response: response = response cache['list_aliases'] = kms.list_aliases() response = cache['list_aliases'] </DeepExtract> iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat() for alias in response['Aliases']: if 'TargetKeyId' in alias: aliasArn = alias['AliasArn'] keyid = alias['TargetKeyId'] try: policyString = kms.get_key_policy(KeyId=keyid, PolicyName='default') fail = False policy_json = policyString['Policy'] policy = json.loads(policy_json) for sid in policy['Statement']: if sid['Principal'] == '*': access = '*' else: access = sid['Principal'].get('AWS', None) if access != '*' or (access == '*' and 'Condition' in sid): continue else: fail = True break if not fail: finding = {'SchemaVersion': '2018-10-08', 'Id': aliasArn + '/kms-key-exposed-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': aliasArn, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/AWS Security Best Practices', 'Effects/Data Exposure', 'Sensitive Data Identifications'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'INFORMATIONAL'}, 'Confidence': 75, 'Title': '[KMS.2] KMS keys should not have public access', 'Description': 'KMS key ' + keyid + ' does not have public access or limited by a Condition. Refer to the remediation instructions to review kms access policy', 'Remediation': {'Recommendation': {'Text': 'For more information on AWS KMS key policies refer to Using key policies in AWS KMS section of the AWS KMS Developer Guide.', 'Url': 'https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html'}}, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'AwsKmsAlias', 'Id': aliasArn, 'Partition': awsPartition, 'Region': awsRegion}], 'Compliance': {'Status': 'PASSED', 'RelatedRequirements': ['NIST CSF PR.AC-1', 'NIST SP 800-53 AC-1', 'NIST SP 800-53 AC-2', 'NIST SP 800-53 IA-1', 'NIST SP 800-53 IA-2', 'NIST SP 800-53 IA-3', 'NIST SP 800-53 IA-4', 'NIST SP 800-53 IA-5', 'NIST SP 800-53 IA-6', 'NIST SP 800-53 IA-7', 'NIST SP 800-53 IA-8', 'NIST SP 800-53 IA-9', 'NIST SP 800-53 IA-10', 'NIST SP 800-53 IA-11', 'AICPA TSC CC6.1', 'AICPA TSC CC6.2', 'ISO 27001:2013 A.9.2.1', 'ISO 27001:2013 A.9.2.2', 'ISO 27001:2013 A.9.2.3', 'ISO 27001:2013 A.9.2.4', 'ISO 27001:2013 A.9.2.6', 'ISO 27001:2013 A.9.3.1', 'ISO 27001:2013 A.9.4.2', 'ISO 27001:2013 A.9.4.3']}, 'Workflow': {'Status': 'RESOLVED'}, 'RecordState': 'ARCHIVED'} yield finding else: finding = {'SchemaVersion': '2018-10-08', 'Id': aliasArn + '/kms-key-exposed-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': aliasArn, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/AWS Security Best Practices', 'Effects/Data Exposure', 'Sensitive Data Identifications'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'CRITICAL'}, 'Confidence': 99, 'Title': '[KMS.2] KMS keys should not have public access', 'Description': 'KMS key ' + keyid + ' has public access. Refer to the remediation instructions to review kms access policy', 'Remediation': {'Recommendation': {'Text': 'For more information on AWS KMS key policies refer to Using key policies in AWS KMS section of the AWS KMS Developer Guide.', 'Url': 'https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html'}}, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'AwsKmsAlias', 'Id': aliasArn, 'Partition': awsPartition, 'Region': awsRegion}], 'Compliance': {'Status': 'FAILED', 'RelatedRequirements': ['NIST CSF PR.AC-1', 'NIST SP 800-53 AC-1', 'NIST SP 800-53 AC-2', 'NIST SP 800-53 IA-1', 'NIST SP 800-53 IA-2', 'NIST SP 800-53 IA-3', 'NIST SP 800-53 IA-4', 'NIST SP 800-53 IA-5', 'NIST SP 800-53 IA-6', 'NIST SP 800-53 IA-7', 'NIST SP 800-53 IA-8', 'NIST SP 800-53 IA-9', 'NIST SP 800-53 IA-10', 'NIST SP 800-53 IA-11', 'AICPA TSC CC6.1', 'AICPA TSC CC6.2', 'ISO 27001:2013 A.9.2.1', 'ISO 27001:2013 A.9.2.2', 'ISO 27001:2013 A.9.2.3', 'ISO 27001:2013 A.9.2.4', 'ISO 27001:2013 A.9.2.6', 'ISO 27001:2013 A.9.3.1', 'ISO 27001:2013 A.9.4.2', 'ISO 27001:2013 A.9.4.3']}, 'Workflow': {'Status': 'NEW'}, 'RecordState': 'ACTIVE'} yield finding except botocore.exceptions.ClientError as error: if error.response['Error']['Code'] == 'AccessDeniedException': continue else: print(f'We found another error! {error}')
@registry.register_check('kms') def kms_key_exposed_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[KMS.2] KMS keys should not have public access""" response = cache.get('list_aliases') if response: response = response cache['list_aliases'] = kms.list_aliases() response = cache['list_aliases'] iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat() for alias in response['Aliases']: if 'TargetKeyId' in alias: aliasArn = alias['AliasArn'] keyid = alias['TargetKeyId'] try: policyString = kms.get_key_policy(KeyId=keyid, PolicyName='default') fail = False policy_json = policyString['Policy'] policy = json.loads(policy_json) for sid in policy['Statement']: if sid['Principal'] == '*': access = '*' else: access = sid['Principal'].get('AWS', None) if access != '*' or (access == '*' and 'Condition' in sid): continue else: fail = True break if not fail: finding = {'SchemaVersion': '2018-10-08', 'Id': aliasArn + '/kms-key-exposed-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': aliasArn, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/AWS Security Best Practices', 'Effects/Data Exposure', 'Sensitive Data Identifications'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'INFORMATIONAL'}, 'Confidence': 75, 'Title': '[KMS.2] KMS keys should not have public access', 'Description': 'KMS key ' + keyid + ' does not have public access or limited by a Condition. Refer to the remediation instructions to review kms access policy', 'Remediation': {'Recommendation': {'Text': 'For more information on AWS KMS key policies refer to Using key policies in AWS KMS section of the AWS KMS Developer Guide.', 'Url': 'https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html'}}, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'AwsKmsAlias', 'Id': aliasArn, 'Partition': awsPartition, 'Region': awsRegion}], 'Compliance': {'Status': 'PASSED', 'RelatedRequirements': ['NIST CSF PR.AC-1', 'NIST SP 800-53 AC-1', 'NIST SP 800-53 AC-2', 'NIST SP 800-53 IA-1', 'NIST SP 800-53 IA-2', 'NIST SP 800-53 IA-3', 'NIST SP 800-53 IA-4', 'NIST SP 800-53 IA-5', 'NIST SP 800-53 IA-6', 'NIST SP 800-53 IA-7', 'NIST SP 800-53 IA-8', 'NIST SP 800-53 IA-9', 'NIST SP 800-53 IA-10', 'NIST SP 800-53 IA-11', 'AICPA TSC CC6.1', 'AICPA TSC CC6.2', 'ISO 27001:2013 A.9.2.1', 'ISO 27001:2013 A.9.2.2', 'ISO 27001:2013 A.9.2.3', 'ISO 27001:2013 A.9.2.4', 'ISO 27001:2013 A.9.2.6', 'ISO 27001:2013 A.9.3.1', 'ISO 27001:2013 A.9.4.2', 'ISO 27001:2013 A.9.4.3']}, 'Workflow': {'Status': 'RESOLVED'}, 'RecordState': 'ARCHIVED'} yield finding else: finding = {'SchemaVersion': '2018-10-08', 'Id': aliasArn + '/kms-key-exposed-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': aliasArn, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/AWS Security Best Practices', 'Effects/Data Exposure', 'Sensitive Data Identifications'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'CRITICAL'}, 'Confidence': 99, 'Title': '[KMS.2] KMS keys should not have public access', 'Description': 'KMS key ' + keyid + ' has public access. Refer to the remediation instructions to review kms access policy', 'Remediation': {'Recommendation': {'Text': 'For more information on AWS KMS key policies refer to Using key policies in AWS KMS section of the AWS KMS Developer Guide.', 'Url': 'https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html'}}, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'AwsKmsAlias', 'Id': aliasArn, 'Partition': awsPartition, 'Region': awsRegion}], 'Compliance': {'Status': 'FAILED', 'RelatedRequirements': ['NIST CSF PR.AC-1', 'NIST SP 800-53 AC-1', 'NIST SP 800-53 AC-2', 'NIST SP 800-53 IA-1', 'NIST SP 800-53 IA-2', 'NIST SP 800-53 IA-3', 'NIST SP 800-53 IA-4', 'NIST SP 800-53 IA-5', 'NIST SP 800-53 IA-6', 'NIST SP 800-53 IA-7', 'NIST SP 800-53 IA-8', 'NIST SP 800-53 IA-9', 'NIST SP 800-53 IA-10', 'NIST SP 800-53 IA-11', 'AICPA TSC CC6.1', 'AICPA TSC CC6.2', 'ISO 27001:2013 A.9.2.1', 'ISO 27001:2013 A.9.2.2', 'ISO 27001:2013 A.9.2.3', 'ISO 27001:2013 A.9.2.4', 'ISO 27001:2013 A.9.2.6', 'ISO 27001:2013 A.9.3.1', 'ISO 27001:2013 A.9.4.2', 'ISO 27001:2013 A.9.4.3']}, 'Workflow': {'Status': 'NEW'}, 'RecordState': 'ACTIVE'} yield finding except botocore.exceptions.ClientError as error: if error.response['Error']['Code'] == 'AccessDeniedException': continue else: print(f'We found another error! {error}')
ElectricEye
positive
def write(self, path: str=None) -> None: if path is None: <DeepExtract> home = os.path.expanduser('~') path = f'{home}/.{CONTEXT_FILE_NAME}' </DeepExtract> return super().write(path)
def write(self, path: str=None) -> None: if path is None: home = os.path.expanduser('~') path = f'{home}/.{CONTEXT_FILE_NAME}' return super().write(path)
cowait
positive
def build(self): self.items = self.model.objects.listable_objects() self.set_items_for_entity() <DeepExtract> if any((kind in self.item_collections for kind in ('open', 'archived'))): if 'open' in self.item_collections: self.open = self.items.filter(date__gte=self.now) if 'archived' in self.item_collections: self.archived = self.items.filter(date__lt=self.now) self.items = getattr(self, self.item_collections[0]) </DeepExtract> self.filter_on_search_terms() self.itemfilter = self.filter_set(self.items, self.request.GET)
def build(self): self.items = self.model.objects.listable_objects() self.set_items_for_entity() if any((kind in self.item_collections for kind in ('open', 'archived'))): if 'open' in self.item_collections: self.open = self.items.filter(date__gte=self.now) if 'archived' in self.item_collections: self.archived = self.items.filter(date__lt=self.now) self.items = getattr(self, self.item_collections[0]) self.filter_on_search_terms() self.itemfilter = self.filter_set(self.items, self.request.GET)
Arkestra
positive
def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' <DeepExtract> builder = kwargs.pop('builder', self.default_builder) soup = BeautifulSoup(markup, builder=builder, **kwargs) </DeepExtract> self.assertEqual(unicode(soup.p), markup)
def test_closing_namespaced_tag(self): markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>' builder = kwargs.pop('builder', self.default_builder) soup = BeautifulSoup(markup, builder=builder, **kwargs) self.assertEqual(unicode(soup.p), markup)
CnkiSpider
positive
def __init__(self, C_in, C_out, expansion, stride): assert stride in [1, 2] self.res_connect = stride == 1 and C_in == C_out <DeepExtract> ret = int(C_in * expansion) if 8 > 0 and C_in * expansion % 8 != 0: ret = int((_py2_round(C_in * expansion / 8) or 8) * 8) C_mid = ret </DeepExtract> ops = [Conv2d(C_in, C_mid, 1, 1, 0, bias=False), BatchNorm2d(C_mid), nn.ReLU(inplace=True), Shift(C_mid, 5, stride, 2), Conv2d(C_mid, C_out, 1, 1, 0, bias=False), BatchNorm2d(C_out)] super(ShiftBlock5x5, self).__init__(*ops)
def __init__(self, C_in, C_out, expansion, stride): assert stride in [1, 2] self.res_connect = stride == 1 and C_in == C_out ret = int(C_in * expansion) if 8 > 0 and C_in * expansion % 8 != 0: ret = int((_py2_round(C_in * expansion / 8) or 8) * 8) C_mid = ret ops = [Conv2d(C_in, C_mid, 1, 1, 0, bias=False), BatchNorm2d(C_mid), nn.ReLU(inplace=True), Shift(C_mid, 5, stride, 2), Conv2d(C_mid, C_out, 1, 1, 0, bias=False), BatchNorm2d(C_out)] super(ShiftBlock5x5, self).__init__(*ops)
bezier_curve_text_spotting
positive
def test_match_device_no_hostnames(mocker, capsys): gethostbyname_ex_mock = mocker.patch('socket.gethostbyname_ex', return_value=(None, [], None)) <DeepExtract> try: config = ConfigParser.SafeConfigParser() except AttributeError: config = ConfigParser() config.add_section(mount_efs.CONFIG_SECTION) config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION) config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format) config.set(mount_efs.CONFIG_SECTION, 'dns_name_suffix', dns_name_suffix) config.set(mount_efs.CLOUDWATCH_LOG_SECTION, 'enabled', cloudwatch_enabled) if has_fallback_to_mount_target_ip_address_item: config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address)) config = config </DeepExtract> with pytest.raises(SystemExit) as ex: mount_efs.match_device(config, 'custom-cname.example.com', DEFAULT_NFS_OPTIONS) assert 0 != ex.value.code (out, err) = capsys.readouterr() assert 'did not resolve to an EFS mount target' in err utils.assert_called(gethostbyname_ex_mock)
def test_match_device_no_hostnames(mocker, capsys): gethostbyname_ex_mock = mocker.patch('socket.gethostbyname_ex', return_value=(None, [], None)) try: config = ConfigParser.SafeConfigParser() except AttributeError: config = ConfigParser() config.add_section(mount_efs.CONFIG_SECTION) config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION) config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', dns_name_format) config.set(mount_efs.CONFIG_SECTION, 'dns_name_suffix', dns_name_suffix) config.set(mount_efs.CLOUDWATCH_LOG_SECTION, 'enabled', cloudwatch_enabled) if has_fallback_to_mount_target_ip_address_item: config.set(mount_efs.CONFIG_SECTION, mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, str(fallback_to_mount_target_ip_address)) config = config with pytest.raises(SystemExit) as ex: mount_efs.match_device(config, 'custom-cname.example.com', DEFAULT_NFS_OPTIONS) assert 0 != ex.value.code (out, err) = capsys.readouterr() assert 'did not resolve to an EFS mount target' in err utils.assert_called(gethostbyname_ex_mock)
efs-utils
positive
def reset(self, **kwargs): self.env.reset(**kwargs) <DeepExtract> (obs, _, done, _) = self.env.step(1) if done: self.env.reset() (obs, _, done, _) = self.env.step(2) if done: obs = self.env.reset() done = False (obs, _) = (obs, done) </DeepExtract> self.lives = self.env.unwrapped.ale.lives() return obs
def reset(self, **kwargs): self.env.reset(**kwargs) (obs, _, done, _) = self.env.step(1) if done: self.env.reset() (obs, _, done, _) = self.env.step(2) if done: obs = self.env.reset() done = False (obs, _) = (obs, done) self.lives = self.env.unwrapped.ale.lives() return obs
autonomous-learning-library
positive
def register_implicit_nodes(*channels, **named_channels): <DeepExtract> for channel in channels: if isinstance(channel, Channel): _check(type(self), channel) elif isinstance(channel, (list, tuple)): for ch in channel: _check(type(self), ch) elif isinstance(channel, dict): for ch in channel.values(): _check(type(self), ch) else: raise ValueError(f'Do not support {type(channel)}.') for channel in named_channels.values(): if not isinstance(channel, Channel): raise ValueError(f'Do not support {type(channel)}. ') _check(type(self), channel) </DeepExtract> super(CondNeuGroup, self).register_implicit_nodes(*channels, **named_channels)
def register_implicit_nodes(*channels, **named_channels): for channel in channels: if isinstance(channel, Channel): _check(type(self), channel) elif isinstance(channel, (list, tuple)): for ch in channel: _check(type(self), ch) elif isinstance(channel, dict): for ch in channel.values(): _check(type(self), ch) else: raise ValueError(f'Do not support {type(channel)}.') for channel in named_channels.values(): if not isinstance(channel, Channel): raise ValueError(f'Do not support {type(channel)}. ') _check(type(self), channel) super(CondNeuGroup, self).register_implicit_nodes(*channels, **named_channels)
BrainPy
positive
def test_not_found_empty_name_on_server(self): <DeepExtract> async def create(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc(MyHandler(self.loop), bind='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions) client = await aiozmq.rpc.connect_rpc(connect='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, error_table=error_table, timeout=timeout) (client, server) = (client, server) (self.client, self.server) = self.loop.run_until_complete(create()) (client, server) = (self.client, self.server) </DeepExtract> async def communicate(): with self.assertRaises(aiozmq.rpc.NotFoundError) as exc: await client._proto.call('', (), {}) self.assertEqual(('',), exc.exception.args) self.loop.run_until_complete(communicate())
def test_not_found_empty_name_on_server(self): async def create(): port = find_unused_port() server = await aiozmq.rpc.serve_rpc(MyHandler(self.loop), bind='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions) client = await aiozmq.rpc.connect_rpc(connect='tcp://127.0.0.1:{}'.format(port), loop=self.loop if use_loop else None, error_table=error_table, timeout=timeout) (client, server) = (client, server) (self.client, self.server) = self.loop.run_until_complete(create()) (client, server) = (self.client, self.server) async def communicate(): with self.assertRaises(aiozmq.rpc.NotFoundError) as exc: await client._proto.call('', (), {}) self.assertEqual(('',), exc.exception.args) self.loop.run_until_complete(communicate())
aiozmq
positive
def closeEvent(self, event): if self._is_first_close: self._is_first_close = False self._close_timer.start() <DeepExtract> self._pos_ani.setDirection(QtCore.QAbstractAnimation.Backward) self._pos_ani.start() self._opacity_ani.setDirection(QtCore.QAbstractAnimation.Backward) self._opacity_ani.start() </DeepExtract> event.ignore() else: event.accept()
def closeEvent(self, event): if self._is_first_close: self._is_first_close = False self._close_timer.start() self._pos_ani.setDirection(QtCore.QAbstractAnimation.Backward) self._pos_ani.start() self._opacity_ani.setDirection(QtCore.QAbstractAnimation.Backward) self._opacity_ani.start() event.ignore() else: event.accept()
dayu_widgets
positive
def query(self, idx, l, r, a, b): if self.flag[idx] == True: self.st[idx] = self.lazy[idx] self.flag[idx] = False if l != r: self.lazy[self.left(idx)] = self.lazy[idx] self.lazy[self.right(idx)] = self.lazy[idx] self.flag[self.left(idx)] = True self.flag[self.right(idx)] = True if r < a or l > b: return -math.inf if l >= a and r <= b: return self.st[idx] mid = (l + r) // 2 <DeepExtract> if self.flag[self.left(idx)] == True: self.st[self.left(idx)] = self.lazy[self.left(idx)] self.flag[self.left(idx)] = False if l != mid: self.lazy[self.left(self.left(idx))] = self.lazy[self.left(idx)] self.lazy[self.right(self.left(idx))] = self.lazy[self.left(idx)] self.flag[self.left(self.left(idx))] = True self.flag[self.right(self.left(idx))] = True if mid < a or l > b: q1 = -math.inf if l >= a and mid <= b: q1 = self.st[self.left(idx)] mid = (l + mid) // 2 q1 = self.query(self.left(self.left(idx)), l, mid, a, b) q2 = self.query(self.right(self.left(idx)), mid + 1, mid, a, b) q1 = max(q1, q2) </DeepExtract> <DeepExtract> if self.flag[self.right(idx)] == True: self.st[self.right(idx)] = self.lazy[self.right(idx)] self.flag[self.right(idx)] = False if mid + 1 != r: self.lazy[self.left(self.right(idx))] = self.lazy[self.right(idx)] self.lazy[self.right(self.right(idx))] = self.lazy[self.right(idx)] self.flag[self.left(self.right(idx))] = True self.flag[self.right(self.right(idx))] = True if r < a or mid + 1 > b: q2 = -math.inf if mid + 1 >= a and r <= b: q2 = self.st[self.right(idx)] mid = (mid + 1 + r) // 2 q1 = self.query(self.left(self.right(idx)), mid + 1, mid, a, b) q2 = self.query(self.right(self.right(idx)), mid + 1, r, a, b) q2 = max(q1, q2) </DeepExtract> return max(q1, q2)
def query(self, idx, l, r, a, b): if self.flag[idx] == True: self.st[idx] = self.lazy[idx] self.flag[idx] = False if l != r: self.lazy[self.left(idx)] = self.lazy[idx] self.lazy[self.right(idx)] = self.lazy[idx] self.flag[self.left(idx)] = True self.flag[self.right(idx)] = True if r < a or l > b: return -math.inf if l >= a and r <= b: return self.st[idx] mid = (l + r) // 2 if self.flag[self.left(idx)] == True: self.st[self.left(idx)] = self.lazy[self.left(idx)] self.flag[self.left(idx)] = False if l != mid: self.lazy[self.left(self.left(idx))] = self.lazy[self.left(idx)] self.lazy[self.right(self.left(idx))] = self.lazy[self.left(idx)] self.flag[self.left(self.left(idx))] = True self.flag[self.right(self.left(idx))] = True if mid < a or l > b: q1 = -math.inf if l >= a and mid <= b: q1 = self.st[self.left(idx)] mid = (l + mid) // 2 q1 = self.query(self.left(self.left(idx)), l, mid, a, b) q2 = self.query(self.right(self.left(idx)), mid + 1, mid, a, b) q1 = max(q1, q2) if self.flag[self.right(idx)] == True: self.st[self.right(idx)] = self.lazy[self.right(idx)] self.flag[self.right(idx)] = False if mid + 1 != r: self.lazy[self.left(self.right(idx))] = self.lazy[self.right(idx)] self.lazy[self.right(self.right(idx))] = self.lazy[self.right(idx)] self.flag[self.left(self.right(idx))] = True self.flag[self.right(self.right(idx))] = True if r < a or mid + 1 > b: q2 = -math.inf if mid + 1 >= a and r <= b: q2 = self.st[self.right(idx)] mid = (mid + 1 + r) // 2 q1 = self.query(self.left(self.right(idx)), mid + 1, mid, a, b) q2 = self.query(self.right(self.right(idx)), mid + 1, r, a, b) q2 = max(q1, q2) return max(q1, q2)
-
positive
def main(_): ps_hosts = FLAGS.ps_hosts.split(',') worker_hosts = FLAGS.worker_hosts.split(',') cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts}) server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_id) if FLAGS.job_name == 'ps': server.join() elif FLAGS.job_name == 'worker': with tf.device(tf.train.replica_device_setter(worker_device='/job:worker/task:%d' % FLAGS.task_id, cluster=cluster)): hps = resnet_model.HParams(batch_size=FLAGS.batch_size, num_classes=NUM_LABELS, min_lrn_rate=0.0001, lrn_rate=0.1, num_residual_units=5, use_bottleneck=True, weight_decay_rate=0.0002, relu_leakiness=0.1, optimizer='mom') mode = 'train' <DeepExtract> images = np.random.rand(hps.batch_size, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS) - 0.5 images = tf.cast(images, tf.float32) labels = np.random.randint(0, 9, size=hps.batch_size) one_hot = np.zeros((labels.size, NUM_LABELS)) one_hot[np.arange(labels.size), labels] = 1 one_hot = tf.cast(one_hot, tf.float32) (images, labels) = (images, one_hot) </DeepExtract> model = resnet_model.ResNet(hps, images, labels, mode) model.build_graph() cross_entropy = model.cost global_step = tf.Variable(0) gradient_descent_opt = tf.train.GradientDescentOptimizer(LEARNING_RATE) num_workers = len(worker_hosts) sync_rep_opt = tf.train.SyncReplicasOptimizer(gradient_descent_opt, replicas_to_aggregate=num_workers, replica_id=FLAGS.task_id, total_num_replicas=num_workers) train_op = sync_rep_opt.minimize(cross_entropy, global_step=global_step) init_token_op = sync_rep_opt.get_init_tokens_op() chief_queue_runner = sync_rep_opt.get_chief_queue_runner() init_op = tf.initialize_all_variables() is_chief = FLAGS.task_id == 0 sv = tf.train.Supervisor(is_chief=FLAGS.task_id == 0, init_op=init_op, global_step=global_step) with sv.managed_session(server.target) as sess: if is_chief: sv.start_queue_runners(sess, [chief_queue_runner]) sess.run(init_token_op) num_steps_burn_in = 10 total_duration = 0 total_duration_squared = 0 step = 0 lrn_rate = 0.1 while step <= 2000: start_time = time.time() (_, step) = sess.run([train_op, global_step], feed_dict={model.lrn_rate: lrn_rate}) duration = time.time() - start_time examples_per_sec = hps.batch_size / float(duration) format_str = 'Worker %d: %s: step %d, loss = NA(%.4f examples/sec; %.3f sec/batch)' if step > num_steps_burn_in: print(format_str % (FLAGS.task_id, datetime.now(), step, examples_per_sec, duration)) sys.stdout.flush() else: print('Not considering burn-in step %d (%.4f samples/sec; %.3f sec/batch)' % (step, examples_per_sec, duration)) sys.stdout.flush() sv.stop()
def main(_): ps_hosts = FLAGS.ps_hosts.split(',') worker_hosts = FLAGS.worker_hosts.split(',') cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts}) server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_id) if FLAGS.job_name == 'ps': server.join() elif FLAGS.job_name == 'worker': with tf.device(tf.train.replica_device_setter(worker_device='/job:worker/task:%d' % FLAGS.task_id, cluster=cluster)): hps = resnet_model.HParams(batch_size=FLAGS.batch_size, num_classes=NUM_LABELS, min_lrn_rate=0.0001, lrn_rate=0.1, num_residual_units=5, use_bottleneck=True, weight_decay_rate=0.0002, relu_leakiness=0.1, optimizer='mom') mode = 'train' images = np.random.rand(hps.batch_size, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS) - 0.5 images = tf.cast(images, tf.float32) labels = np.random.randint(0, 9, size=hps.batch_size) one_hot = np.zeros((labels.size, NUM_LABELS)) one_hot[np.arange(labels.size), labels] = 1 one_hot = tf.cast(one_hot, tf.float32) (images, labels) = (images, one_hot) model = resnet_model.ResNet(hps, images, labels, mode) model.build_graph() cross_entropy = model.cost global_step = tf.Variable(0) gradient_descent_opt = tf.train.GradientDescentOptimizer(LEARNING_RATE) num_workers = len(worker_hosts) sync_rep_opt = tf.train.SyncReplicasOptimizer(gradient_descent_opt, replicas_to_aggregate=num_workers, replica_id=FLAGS.task_id, total_num_replicas=num_workers) train_op = sync_rep_opt.minimize(cross_entropy, global_step=global_step) init_token_op = sync_rep_opt.get_init_tokens_op() chief_queue_runner = sync_rep_opt.get_chief_queue_runner() init_op = tf.initialize_all_variables() is_chief = FLAGS.task_id == 0 sv = tf.train.Supervisor(is_chief=FLAGS.task_id == 0, init_op=init_op, global_step=global_step) with sv.managed_session(server.target) as sess: if is_chief: sv.start_queue_runners(sess, [chief_queue_runner]) sess.run(init_token_op) num_steps_burn_in = 10 total_duration = 0 total_duration_squared = 0 step = 0 lrn_rate = 0.1 while step <= 2000: start_time = time.time() (_, step) = sess.run([train_op, global_step], feed_dict={model.lrn_rate: lrn_rate}) duration = time.time() - start_time examples_per_sec = hps.batch_size / float(duration) format_str = 'Worker %d: %s: step %d, loss = NA(%.4f examples/sec; %.3f sec/batch)' if step > num_steps_burn_in: print(format_str % (FLAGS.task_id, datetime.now(), step, examples_per_sec, duration)) sys.stdout.flush() else: print('Not considering burn-in step %d (%.4f samples/sec; %.3f sec/batch)' % (step, examples_per_sec, duration)) sys.stdout.flush() sv.stop()
deeplearning-benchmark
positive
def _stateful_validation(self): self.clone = self.validation_data.get(CheckKeys.CLONE) if not self.clone: self.clone_location = self.validation_data.get(CheckKeys.CLONE_LOCATION) else: <DeepExtract> self.state.set_git_env() self.clone_location = self.state.git_update(self.validation_data.get(FieldKeys.GIT_URL), self.validation_data.get(FieldKeys.GIT_BRANCH)) self.state.unset_git_env() </DeepExtract> <DeepExtract> pass </DeepExtract>
def _stateful_validation(self): self.clone = self.validation_data.get(CheckKeys.CLONE) if not self.clone: self.clone_location = self.validation_data.get(CheckKeys.CLONE_LOCATION) else: self.state.set_git_env() self.clone_location = self.state.git_update(self.validation_data.get(FieldKeys.GIT_URL), self.validation_data.get(FieldKeys.GIT_BRANCH)) self.state.unset_git_env() pass </DeepExtract>
container-pipeline-service
positive
def test_integration(self): number_of_data_points = 4 create_csv_file(self.a_data_container_path, number_of_data_points=number_of_data_points) csv_dataset_container = CSVDatasetContainer(self.a_data_container_path, column_names=default_csv_column_name) <DeepExtract> expected = number_of_data_points self.assertEqual(expected, len(csv_dataset_container)) </DeepExtract> number_of_data_points = 5 create_csv_file(self.a_data_container_path, number_of_data_points=number_of_data_points) csv_dataset_container = CSVDatasetContainer(self.a_data_container_path, column_names=default_csv_column_name) <DeepExtract> expected = number_of_data_points self.assertEqual(expected, len(csv_dataset_container)) </DeepExtract>
def test_integration(self): number_of_data_points = 4 create_csv_file(self.a_data_container_path, number_of_data_points=number_of_data_points) csv_dataset_container = CSVDatasetContainer(self.a_data_container_path, column_names=default_csv_column_name) expected = number_of_data_points self.assertEqual(expected, len(csv_dataset_container)) number_of_data_points = 5 create_csv_file(self.a_data_container_path, number_of_data_points=number_of_data_points) csv_dataset_container = CSVDatasetContainer(self.a_data_container_path, column_names=default_csv_column_name) expected = number_of_data_points self.assertEqual(expected, len(csv_dataset_container)) </DeepExtract>
deepparse
positive
def push_setattr(name, value): if self.param_store is None: <DeepExtract> if '.' not in name: return setattr(self, name, value) (name, subname) = name.split('.', 1) return _setattr_recursive(getattr(self, name), subname, value) </DeepExtract> elif isinstance(self.param_store, self.__ray.actor.ActorHandle): self.__ray.get(self.param_store.push_setattr.remote(name, value)) else: self.param_store.push_setattr(name, value)
def push_setattr(name, value): if self.param_store is None: if '.' not in name: return setattr(self, name, value) (name, subname) = name.split('.', 1) return _setattr_recursive(getattr(self, name), subname, value) elif isinstance(self.param_store, self.__ray.actor.ActorHandle): self.__ray.get(self.param_store.push_setattr.remote(name, value)) else: self.param_store.push_setattr(name, value)
coax
positive
def __init__(self, typedef): <DeepExtract> if subclassof(typedef, Type): typedef = typedef() self.inner_typedef = typedef </DeepExtract> super().__init__()
def __init__(self, typedef): if subclassof(typedef, Type): typedef = typedef() self.inner_typedef = typedef super().__init__()
bloop
positive
def reset_camera(self): <DeepExtract> if (5, 0, 0) is not None: self.opts['center'].setX((5, 0, 0)[0]) self.opts['center'].setY((5, 0, 0)[1]) self.opts['center'].setZ((5, 0, 0)[2]) if 20 is not None: self.opts['distance'] = 20 if 30 is not None: self.opts['elevation'] = 30 if -180 is not None: self.opts['azimuth'] = -180 self.update() </DeepExtract> self.update()
def reset_camera(self): if (5, 0, 0) is not None: self.opts['center'].setX((5, 0, 0)[0]) self.opts['center'].setY((5, 0, 0)[1]) self.opts['center'].setZ((5, 0, 0)[2]) if 20 is not None: self.opts['distance'] = 20 if 30 is not None: self.opts['elevation'] = 30 if -180 is not None: self.opts['azimuth'] = -180 self.update() self.update()
CLOCs
positive
def get_absolute_path(file_path): <DeepExtract> is_relative = False BLENDER_RELATIVE_PATH_PREFIX = '//' prefix_length = len(BLENDER_RELATIVE_PATH_PREFIX) if file_path.startswith(BLENDER_RELATIVE_PATH_PREFIX): file_path = file_path[prefix_length:] is_relative = True [is_relative, file_path] = (is_relative, file_path) </DeepExtract> if is_relative: blend_file_path = os.path.dirname(bpy.data.filepath) file_path = '{}/{}'.format(blend_file_path, file_path) return os.path.abspath(file_path)
def get_absolute_path(file_path): is_relative = False BLENDER_RELATIVE_PATH_PREFIX = '//' prefix_length = len(BLENDER_RELATIVE_PATH_PREFIX) if file_path.startswith(BLENDER_RELATIVE_PATH_PREFIX): file_path = file_path[prefix_length:] is_relative = True [is_relative, file_path] = (is_relative, file_path) if is_relative: blend_file_path = os.path.dirname(bpy.data.filepath) file_path = '{}/{}'.format(blend_file_path, file_path) return os.path.abspath(file_path)
BCRYExporter
positive
def websocket(self, rule, **options): def decorator(f): <DeepExtract> endpoint = options.get('endpoint') if endpoint: assert '.' not in endpoint, 'Blueprint endpoints should not contain dots' if f and hasattr(f, '__name__'): assert '.' not in f.__name__, 'Blueprint view function name should not contain dots' self.record(lambda s: s.add_app_url_rule(rule, f, **options)) </DeepExtract> return f return decorator
def websocket(self, rule, **options): def decorator(f): endpoint = options.get('endpoint') if endpoint: assert '.' not in endpoint, 'Blueprint endpoints should not contain dots' if f and hasattr(f, '__name__'): assert '.' not in f.__name__, 'Blueprint view function name should not contain dots' self.record(lambda s: s.add_app_url_rule(rule, f, **options)) return f return decorator
alita
positive
def _resource_apply_dense(self, grad, var, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = (apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype) <DeepExtract> lr_idx = -1 for (i, pattern_lr) in enumerate(self.pattern_lrs): for pattern in pattern_lr['patterns']: if re.search(pattern, var.name): lr_idx = i break if lr_idx != -1: break if lr_idx == -1: lr = coefficients['lr_t'] else: lr = coefficients[f'lr-{lr_idx}_t'] lr = lr </DeepExtract> m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') if not self.amsgrad: return training_ops.resource_apply_adam(var.handle, m.handle, v.handle, coefficients['beta_1_power'], coefficients['beta_2_power'], lr, coefficients['beta_1_t'], coefficients['beta_2_t'], coefficients['epsilon'], grad, use_locking=self._use_locking) else: vhat = self.get_slot(var, 'vhat') return training_ops.resource_apply_adam_with_amsgrad(var.handle, m.handle, v.handle, vhat.handle, coefficients['beta_1_power'], coefficients['beta_2_power'], lr, coefficients['beta_1_t'], coefficients['beta_2_t'], coefficients['epsilon'], grad, use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = (apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype) lr_idx = -1 for (i, pattern_lr) in enumerate(self.pattern_lrs): for pattern in pattern_lr['patterns']: if re.search(pattern, var.name): lr_idx = i break if lr_idx != -1: break if lr_idx == -1: lr = coefficients['lr_t'] else: lr = coefficients[f'lr-{lr_idx}_t'] lr = lr m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') if not self.amsgrad: return training_ops.resource_apply_adam(var.handle, m.handle, v.handle, coefficients['beta_1_power'], coefficients['beta_2_power'], lr, coefficients['beta_1_t'], coefficients['beta_2_t'], coefficients['epsilon'], grad, use_locking=self._use_locking) else: vhat = self.get_slot(var, 'vhat') return training_ops.resource_apply_adam_with_amsgrad(var.handle, m.handle, v.handle, vhat.handle, coefficients['beta_1_power'], coefficients['beta_2_power'], lr, coefficients['beta_1_t'], coefficients['beta_2_t'], coefficients['epsilon'], grad, use_locking=self._use_locking)
capreolus
positive
def fcos_losses(labels, reg_targets, logits_pred, reg_pred, ctrness_pred, focal_loss_alpha, focal_loss_gamma, iou_loss): num_classes = logits_pred.size(1) labels = labels.flatten() pos_inds = torch.nonzero(labels != num_classes).squeeze(1) num_pos_local = pos_inds.numel() num_gpus = get_world_size() total_num_pos = reduce_sum(pos_inds.new_tensor([num_pos_local])).item() num_pos_avg = max(total_num_pos / num_gpus, 1.0) class_target = torch.zeros_like(logits_pred) class_target[pos_inds, labels[pos_inds]] = 1 class_loss = sigmoid_focal_loss_jit(logits_pred, class_target, alpha=focal_loss_alpha, gamma=focal_loss_gamma, reduction='sum') / num_pos_avg reg_pred = reg_pred[pos_inds] reg_targets = reg_targets[pos_inds] ctrness_pred = ctrness_pred[pos_inds] <DeepExtract> if len(reg_targets) == 0: ctrness_targets = reg_targets.new_zeros(len(reg_targets)) left_right = reg_targets[:, [0, 2]] top_bottom = reg_targets[:, [1, 3]] ctrness = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) ctrness_targets = torch.sqrt(ctrness) </DeepExtract> ctrness_targets_sum = ctrness_targets.sum() ctrness_norm = max(reduce_sum(ctrness_targets_sum).item() / num_gpus, 1e-06) reg_loss = iou_loss(reg_pred, reg_targets, ctrness_targets) / ctrness_norm ctrness_loss = F.binary_cross_entropy_with_logits(ctrness_pred, ctrness_targets, reduction='sum') / num_pos_avg losses = {'loss_fcos_cls': class_loss, 'loss_fcos_loc': reg_loss, 'loss_fcos_ctr': ctrness_loss} return (losses, {})
def fcos_losses(labels, reg_targets, logits_pred, reg_pred, ctrness_pred, focal_loss_alpha, focal_loss_gamma, iou_loss): num_classes = logits_pred.size(1) labels = labels.flatten() pos_inds = torch.nonzero(labels != num_classes).squeeze(1) num_pos_local = pos_inds.numel() num_gpus = get_world_size() total_num_pos = reduce_sum(pos_inds.new_tensor([num_pos_local])).item() num_pos_avg = max(total_num_pos / num_gpus, 1.0) class_target = torch.zeros_like(logits_pred) class_target[pos_inds, labels[pos_inds]] = 1 class_loss = sigmoid_focal_loss_jit(logits_pred, class_target, alpha=focal_loss_alpha, gamma=focal_loss_gamma, reduction='sum') / num_pos_avg reg_pred = reg_pred[pos_inds] reg_targets = reg_targets[pos_inds] ctrness_pred = ctrness_pred[pos_inds] if len(reg_targets) == 0: ctrness_targets = reg_targets.new_zeros(len(reg_targets)) left_right = reg_targets[:, [0, 2]] top_bottom = reg_targets[:, [1, 3]] ctrness = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) ctrness_targets = torch.sqrt(ctrness) ctrness_targets_sum = ctrness_targets.sum() ctrness_norm = max(reduce_sum(ctrness_targets_sum).item() / num_gpus, 1e-06) reg_loss = iou_loss(reg_pred, reg_targets, ctrness_targets) / ctrness_norm ctrness_loss = F.binary_cross_entropy_with_logits(ctrness_pred, ctrness_targets, reduction='sum') / num_pos_avg losses = {'loss_fcos_cls': class_loss, 'loss_fcos_loc': reg_loss, 'loss_fcos_ctr': ctrness_loss} return (losses, {})
dance
positive
def _get_default_ui(self) -> UI: from cleo.ui.progress_bar import ProgressBar <DeepExtract> if input is None: input = ArgvInput() input.set_stream(sys.stdin) if output is None: output = StreamOutput(sys.stdout) if error_output is None: error_output = StreamOutput(sys.stderr) io = IO(input, output, error_output) </DeepExtract> return UI([ProgressBar(io)])
def _get_default_ui(self) -> UI: from cleo.ui.progress_bar import ProgressBar if input is None: input = ArgvInput() input.set_stream(sys.stdin) if output is None: output = StreamOutput(sys.stdout) if error_output is None: error_output = StreamOutput(sys.stderr) io = IO(input, output, error_output) return UI([ProgressBar(io)])
cleo
positive
def test_load_dataset(self, get_sentence_field): <DeepExtract> def _get_sentence_field(): sentence_field = SentenceDefault('space', GeneralVocab(), convert_to_lower_letter=True) sentence_field = _get_sentence_field </DeepExtract> sentence_field_content = super().base_test_load_dataset(sentence_field, DummyDataset.get_sentence_iterator()) assert sentence_field_content._original_data == DummyDataset.sentences
def test_load_dataset(self, get_sentence_field): def _get_sentence_field(): sentence_field = SentenceDefault('space', GeneralVocab(), convert_to_lower_letter=True) sentence_field = _get_sentence_field sentence_field_content = super().base_test_load_dataset(sentence_field, DummyDataset.get_sentence_iterator()) assert sentence_field_content._original_data == DummyDataset.sentences
cotk
positive
def _check_send_batch(self, result=None): """Check if we have enough messages/bytes to send Since this can be called from the callback chain, we pass through our first (non-self) arg """ if self.batch_every_n and self.batch_every_n <= self._waitingMsgCount or (self.batch_every_b and self.batch_every_b <= self._waitingByteCount): <DeepExtract> if not self._batch_reqs or self._batch_send_d: return (requests, self._batch_reqs) = (self._batch_reqs, []) self._waitingByteCount = 0 self._waitingMsgCount = 0 d_list = [] for req in requests: d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) d.callback(None) </DeepExtract> return result
def _check_send_batch(self, result=None): """Check if we have enough messages/bytes to send Since this can be called from the callback chain, we pass through our first (non-self) arg """ if self.batch_every_n and self.batch_every_n <= self._waitingMsgCount or (self.batch_every_b and self.batch_every_b <= self._waitingByteCount): if not self._batch_reqs or self._batch_send_d: return (requests, self._batch_reqs) = (self._batch_reqs, []) self._waitingByteCount = 0 self._waitingMsgCount = 0 d_list = [] for req in requests: d_list.append(self._next_partition(req.topic, req.key)) d = self._batch_send_d = Deferred() d.addCallback(lambda r: DeferredList(d_list, consumeErrors=True)) d.addCallback(self._send_requests, requests) d.addBoth(self._complete_batch_send) d.addBoth(self._check_send_batch) d.callback(None) return result
afkak
positive
def model_fn(model, data): if cfg.RPN.ENABLED: (pts_rect, pts_features, pts_input) = (data['pts_rect'], data['pts_features'], data['pts_input']) gt_boxes3d = data['gt_boxes3d'] if not cfg.RPN.FIXED: (rpn_cls_label, rpn_reg_label) = (data['rpn_cls_label'], data['rpn_reg_label']) rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long() rpn_reg_label = torch.from_numpy(rpn_reg_label).cuda(non_blocking=True).float() inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float() gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True).float() input_data = {'pts_input': inputs, 'gt_boxes3d': gt_boxes3d} else: input_data = {} for (key, val) in data.items(): if key != 'sample_id': input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking=True).float() if not cfg.RCNN.ROI_SAMPLE_JIT: pts_input = torch.cat((input_data['pts_input'], input_data['pts_features']), dim=-1) input_data['pts_input'] = pts_input ret_dict = model(input_data) tb_dict = {} disp_dict = {} loss = 0 if cfg.RPN.ENABLED and (not cfg.RPN.FIXED): (rpn_cls, rpn_reg) = (ret_dict['rpn_cls'], ret_dict['rpn_reg']) <DeepExtract> if isinstance(model, nn.DataParallel): rpn_cls_loss_func = model.module.rpn.rpn_cls_loss_func else: rpn_cls_loss_func = model.rpn.rpn_cls_loss_func rpn_cls_label_flat = rpn_cls_label.view(-1) rpn_cls_flat = rpn_cls.view(-1) fg_mask = rpn_cls_label_flat > 0 if cfg.RPN.LOSS_CLS == 'DiceLoss': rpn_loss_cls = rpn_cls_loss_func(rpn_cls, rpn_cls_label_flat) elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss': rpn_cls_target = (rpn_cls_label_flat > 0).float() pos = (rpn_cls_label_flat > 0).float() neg = (rpn_cls_label_flat == 0).float() cls_weights = pos + neg pos_normalizer = pos.sum() cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0) rpn_loss_cls = rpn_cls_loss_func(rpn_cls_flat, rpn_cls_target, cls_weights) rpn_loss_cls_pos = (rpn_loss_cls * pos).sum() rpn_loss_cls_neg = (rpn_loss_cls * neg).sum() rpn_loss_cls = rpn_loss_cls.sum() tb_dict['rpn_loss_cls_pos'] = rpn_loss_cls_pos.item() tb_dict['rpn_loss_cls_neg'] = rpn_loss_cls_neg.item() elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy': weight = rpn_cls_flat.new(rpn_cls_flat.shape[0]).fill_(1.0) weight[fg_mask] = cfg.RPN.FG_WEIGHT rpn_cls_label_target = (rpn_cls_label_flat > 0).float() batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rpn_cls_flat), rpn_cls_label_target, weight=weight, reduction='none') cls_valid_mask = (rpn_cls_label_flat >= 0).float() rpn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) else: raise NotImplementedError point_num = rpn_reg.size(0) * rpn_reg.size(1) fg_sum = fg_mask.long().sum().item() if fg_sum != 0: (loss_loc, loss_angle, loss_size, reg_loss_dict) = loss_utils.get_reg_loss(rpn_reg.view(point_num, -1)[fg_mask], rpn_reg_label.view(point_num, 7)[fg_mask], loc_scope=cfg.RPN.LOC_SCOPE, loc_bin_size=cfg.RPN.LOC_BIN_SIZE, num_head_bin=cfg.RPN.NUM_HEAD_BIN, anchor_size=MEAN_SIZE, get_xz_fine=cfg.RPN.LOC_XZ_FINE, get_y_by_bin=False, get_ry_fine=False) loss_size = 3 * loss_size rpn_loss_reg = loss_loc + loss_angle + loss_size else: loss_loc = loss_angle = loss_size = rpn_loss_reg = rpn_loss_cls * 0 rpn_loss = rpn_loss_cls * cfg.RPN.LOSS_WEIGHT[0] + rpn_loss_reg * cfg.RPN.LOSS_WEIGHT[1] tb_dict.update({'rpn_loss_cls': rpn_loss_cls.item(), 'rpn_loss_reg': rpn_loss_reg.item(), 'rpn_loss': rpn_loss.item(), 'rpn_fg_sum': fg_sum, 'rpn_loss_loc': loss_loc.item(), 'rpn_loss_angle': loss_angle.item(), 'rpn_loss_size': loss_size.item()}) rpn_loss = rpn_loss </DeepExtract> loss += rpn_loss disp_dict['rpn_loss'] = rpn_loss.item() if cfg.RCNN.ENABLED: <DeepExtract> (rcnn_cls, rcnn_reg) = (ret_dict['rcnn_cls'], ret_dict['rcnn_reg']) cls_label = ret_dict['cls_label'].float() reg_valid_mask = ret_dict['reg_valid_mask'] roi_boxes3d = ret_dict['roi_boxes3d'] roi_size = roi_boxes3d[:, 3:6] gt_boxes3d_ct = ret_dict['gt_of_rois'] pts_input = ret_dict['pts_input'] if isinstance(model, nn.DataParallel): cls_loss_func = model.module.rcnn_net.cls_loss_func else: cls_loss_func = model.rcnn_net.cls_loss_func cls_label_flat = cls_label.view(-1) if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss': rcnn_cls_flat = rcnn_cls.view(-1) cls_target = (cls_label_flat > 0).float() pos = (cls_label_flat > 0).float() neg = (cls_label_flat == 0).float() cls_weights = pos + neg pos_normalizer = pos.sum() cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0) rcnn_loss_cls = cls_loss_func(rcnn_cls_flat, cls_target, cls_weights) rcnn_loss_cls_pos = (rcnn_loss_cls * pos).sum() rcnn_loss_cls_neg = (rcnn_loss_cls * neg).sum() rcnn_loss_cls = rcnn_loss_cls.sum() tb_dict['rpn_loss_cls_pos'] = rcnn_loss_cls_pos.item() tb_dict['rpn_loss_cls_neg'] = rcnn_loss_cls_neg.item() elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy': rcnn_cls_flat = rcnn_cls.view(-1) batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), cls_label, reduction='none') cls_valid_mask = (cls_label_flat >= 0).float() rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) elif cfg.TRAIN.LOSS_CLS == 'CrossEntropy': rcnn_cls_reshape = rcnn_cls.view(rcnn_cls.shape[0], -1) cls_target = cls_label_flat.long() cls_valid_mask = (cls_label_flat >= 0).float() batch_loss_cls = cls_loss_func(rcnn_cls_reshape, cls_target) normalizer = torch.clamp(cls_valid_mask.sum(), min=1.0) rcnn_loss_cls = (batch_loss_cls.mean(dim=1) * cls_valid_mask).sum() / normalizer else: raise NotImplementedError batch_size = pts_input.shape[0] fg_mask = reg_valid_mask > 0 fg_sum = fg_mask.long().sum().item() if fg_sum != 0: all_anchor_size = roi_size anchor_size = all_anchor_size[fg_mask] if cfg.RCNN.SIZE_RES_ON_ROI else MEAN_SIZE (loss_loc, loss_angle, loss_size, reg_loss_dict) = loss_utils.get_reg_loss(rcnn_reg.view(batch_size, -1)[fg_mask], gt_boxes3d_ct.view(batch_size, 7)[fg_mask], loc_scope=cfg.RCNN.LOC_SCOPE, loc_bin_size=cfg.RCNN.LOC_BIN_SIZE, num_head_bin=cfg.RCNN.NUM_HEAD_BIN, anchor_size=anchor_size, get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN, loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE, get_ry_fine=True) loss_size = 3 * loss_size rcnn_loss_reg = loss_loc + loss_angle + loss_size tb_dict.update(reg_loss_dict) else: loss_loc = loss_angle = loss_size = rcnn_loss_reg = rcnn_loss_cls * 0 rcnn_loss = rcnn_loss_cls + rcnn_loss_reg tb_dict['rcnn_loss_cls'] = rcnn_loss_cls.item() tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() tb_dict['rcnn_loss'] = rcnn_loss.item() tb_dict['rcnn_loss_loc'] = loss_loc.item() tb_dict['rcnn_loss_angle'] = loss_angle.item() tb_dict['rcnn_loss_size'] = loss_size.item() tb_dict['rcnn_cls_fg'] = (cls_label > 0).sum().item() tb_dict['rcnn_cls_bg'] = (cls_label == 0).sum().item() tb_dict['rcnn_reg_fg'] = reg_valid_mask.sum().item() rcnn_loss = rcnn_loss </DeepExtract> disp_dict['reg_fg_sum'] = tb_dict['rcnn_reg_fg'] loss += rcnn_loss disp_dict['loss'] = loss.item() return ModelReturn(loss, tb_dict, disp_dict)
def model_fn(model, data): if cfg.RPN.ENABLED: (pts_rect, pts_features, pts_input) = (data['pts_rect'], data['pts_features'], data['pts_input']) gt_boxes3d = data['gt_boxes3d'] if not cfg.RPN.FIXED: (rpn_cls_label, rpn_reg_label) = (data['rpn_cls_label'], data['rpn_reg_label']) rpn_cls_label = torch.from_numpy(rpn_cls_label).cuda(non_blocking=True).long() rpn_reg_label = torch.from_numpy(rpn_reg_label).cuda(non_blocking=True).float() inputs = torch.from_numpy(pts_input).cuda(non_blocking=True).float() gt_boxes3d = torch.from_numpy(gt_boxes3d).cuda(non_blocking=True).float() input_data = {'pts_input': inputs, 'gt_boxes3d': gt_boxes3d} else: input_data = {} for (key, val) in data.items(): if key != 'sample_id': input_data[key] = torch.from_numpy(val).contiguous().cuda(non_blocking=True).float() if not cfg.RCNN.ROI_SAMPLE_JIT: pts_input = torch.cat((input_data['pts_input'], input_data['pts_features']), dim=-1) input_data['pts_input'] = pts_input ret_dict = model(input_data) tb_dict = {} disp_dict = {} loss = 0 if cfg.RPN.ENABLED and (not cfg.RPN.FIXED): (rpn_cls, rpn_reg) = (ret_dict['rpn_cls'], ret_dict['rpn_reg']) if isinstance(model, nn.DataParallel): rpn_cls_loss_func = model.module.rpn.rpn_cls_loss_func else: rpn_cls_loss_func = model.rpn.rpn_cls_loss_func rpn_cls_label_flat = rpn_cls_label.view(-1) rpn_cls_flat = rpn_cls.view(-1) fg_mask = rpn_cls_label_flat > 0 if cfg.RPN.LOSS_CLS == 'DiceLoss': rpn_loss_cls = rpn_cls_loss_func(rpn_cls, rpn_cls_label_flat) elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss': rpn_cls_target = (rpn_cls_label_flat > 0).float() pos = (rpn_cls_label_flat > 0).float() neg = (rpn_cls_label_flat == 0).float() cls_weights = pos + neg pos_normalizer = pos.sum() cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0) rpn_loss_cls = rpn_cls_loss_func(rpn_cls_flat, rpn_cls_target, cls_weights) rpn_loss_cls_pos = (rpn_loss_cls * pos).sum() rpn_loss_cls_neg = (rpn_loss_cls * neg).sum() rpn_loss_cls = rpn_loss_cls.sum() tb_dict['rpn_loss_cls_pos'] = rpn_loss_cls_pos.item() tb_dict['rpn_loss_cls_neg'] = rpn_loss_cls_neg.item() elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy': weight = rpn_cls_flat.new(rpn_cls_flat.shape[0]).fill_(1.0) weight[fg_mask] = cfg.RPN.FG_WEIGHT rpn_cls_label_target = (rpn_cls_label_flat > 0).float() batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rpn_cls_flat), rpn_cls_label_target, weight=weight, reduction='none') cls_valid_mask = (rpn_cls_label_flat >= 0).float() rpn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) else: raise NotImplementedError point_num = rpn_reg.size(0) * rpn_reg.size(1) fg_sum = fg_mask.long().sum().item() if fg_sum != 0: (loss_loc, loss_angle, loss_size, reg_loss_dict) = loss_utils.get_reg_loss(rpn_reg.view(point_num, -1)[fg_mask], rpn_reg_label.view(point_num, 7)[fg_mask], loc_scope=cfg.RPN.LOC_SCOPE, loc_bin_size=cfg.RPN.LOC_BIN_SIZE, num_head_bin=cfg.RPN.NUM_HEAD_BIN, anchor_size=MEAN_SIZE, get_xz_fine=cfg.RPN.LOC_XZ_FINE, get_y_by_bin=False, get_ry_fine=False) loss_size = 3 * loss_size rpn_loss_reg = loss_loc + loss_angle + loss_size else: loss_loc = loss_angle = loss_size = rpn_loss_reg = rpn_loss_cls * 0 rpn_loss = rpn_loss_cls * cfg.RPN.LOSS_WEIGHT[0] + rpn_loss_reg * cfg.RPN.LOSS_WEIGHT[1] tb_dict.update({'rpn_loss_cls': rpn_loss_cls.item(), 'rpn_loss_reg': rpn_loss_reg.item(), 'rpn_loss': rpn_loss.item(), 'rpn_fg_sum': fg_sum, 'rpn_loss_loc': loss_loc.item(), 'rpn_loss_angle': loss_angle.item(), 'rpn_loss_size': loss_size.item()}) rpn_loss = rpn_loss loss += rpn_loss disp_dict['rpn_loss'] = rpn_loss.item() if cfg.RCNN.ENABLED: (rcnn_cls, rcnn_reg) = (ret_dict['rcnn_cls'], ret_dict['rcnn_reg']) cls_label = ret_dict['cls_label'].float() reg_valid_mask = ret_dict['reg_valid_mask'] roi_boxes3d = ret_dict['roi_boxes3d'] roi_size = roi_boxes3d[:, 3:6] gt_boxes3d_ct = ret_dict['gt_of_rois'] pts_input = ret_dict['pts_input'] if isinstance(model, nn.DataParallel): cls_loss_func = model.module.rcnn_net.cls_loss_func else: cls_loss_func = model.rcnn_net.cls_loss_func cls_label_flat = cls_label.view(-1) if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss': rcnn_cls_flat = rcnn_cls.view(-1) cls_target = (cls_label_flat > 0).float() pos = (cls_label_flat > 0).float() neg = (cls_label_flat == 0).float() cls_weights = pos + neg pos_normalizer = pos.sum() cls_weights = cls_weights / torch.clamp(pos_normalizer, min=1.0) rcnn_loss_cls = cls_loss_func(rcnn_cls_flat, cls_target, cls_weights) rcnn_loss_cls_pos = (rcnn_loss_cls * pos).sum() rcnn_loss_cls_neg = (rcnn_loss_cls * neg).sum() rcnn_loss_cls = rcnn_loss_cls.sum() tb_dict['rpn_loss_cls_pos'] = rcnn_loss_cls_pos.item() tb_dict['rpn_loss_cls_neg'] = rcnn_loss_cls_neg.item() elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy': rcnn_cls_flat = rcnn_cls.view(-1) batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), cls_label, reduction='none') cls_valid_mask = (cls_label_flat >= 0).float() rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) elif cfg.TRAIN.LOSS_CLS == 'CrossEntropy': rcnn_cls_reshape = rcnn_cls.view(rcnn_cls.shape[0], -1) cls_target = cls_label_flat.long() cls_valid_mask = (cls_label_flat >= 0).float() batch_loss_cls = cls_loss_func(rcnn_cls_reshape, cls_target) normalizer = torch.clamp(cls_valid_mask.sum(), min=1.0) rcnn_loss_cls = (batch_loss_cls.mean(dim=1) * cls_valid_mask).sum() / normalizer else: raise NotImplementedError batch_size = pts_input.shape[0] fg_mask = reg_valid_mask > 0 fg_sum = fg_mask.long().sum().item() if fg_sum != 0: all_anchor_size = roi_size anchor_size = all_anchor_size[fg_mask] if cfg.RCNN.SIZE_RES_ON_ROI else MEAN_SIZE (loss_loc, loss_angle, loss_size, reg_loss_dict) = loss_utils.get_reg_loss(rcnn_reg.view(batch_size, -1)[fg_mask], gt_boxes3d_ct.view(batch_size, 7)[fg_mask], loc_scope=cfg.RCNN.LOC_SCOPE, loc_bin_size=cfg.RCNN.LOC_BIN_SIZE, num_head_bin=cfg.RCNN.NUM_HEAD_BIN, anchor_size=anchor_size, get_xz_fine=True, get_y_by_bin=cfg.RCNN.LOC_Y_BY_BIN, loc_y_scope=cfg.RCNN.LOC_Y_SCOPE, loc_y_bin_size=cfg.RCNN.LOC_Y_BIN_SIZE, get_ry_fine=True) loss_size = 3 * loss_size rcnn_loss_reg = loss_loc + loss_angle + loss_size tb_dict.update(reg_loss_dict) else: loss_loc = loss_angle = loss_size = rcnn_loss_reg = rcnn_loss_cls * 0 rcnn_loss = rcnn_loss_cls + rcnn_loss_reg tb_dict['rcnn_loss_cls'] = rcnn_loss_cls.item() tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() tb_dict['rcnn_loss'] = rcnn_loss.item() tb_dict['rcnn_loss_loc'] = loss_loc.item() tb_dict['rcnn_loss_angle'] = loss_angle.item() tb_dict['rcnn_loss_size'] = loss_size.item() tb_dict['rcnn_cls_fg'] = (cls_label > 0).sum().item() tb_dict['rcnn_cls_bg'] = (cls_label == 0).sum().item() tb_dict['rcnn_reg_fg'] = reg_valid_mask.sum().item() rcnn_loss = rcnn_loss disp_dict['reg_fg_sum'] = tb_dict['rcnn_reg_fg'] loss += rcnn_loss disp_dict['loss'] = loss.item() return ModelReturn(loss, tb_dict, disp_dict)
3D_adapt_auto_driving
positive
def fetch_balance(self, public_key): <DeepExtract> utxos = [tx_out for tx_out in self.utxo_set.values() if tx_out.public_key == public_key] </DeepExtract> return sum([tx_out.amount for tx_out in utxos])
def fetch_balance(self, public_key): utxos = [tx_out for tx_out in self.utxo_set.values() if tx_out.public_key == public_key] return sum([tx_out.amount for tx_out in utxos])
digital-cash
positive
def __init__(self, *args, **kwargs): self._shared_size = mp.Value(ctypes.c_long, 0) ObsDictRelabelingBuffer.__init__(self, *args, **kwargs) self._mp_array_info = {} self._shared_obs_info = {} self._shared_next_obs_info = {} for (obs_key, obs_arr) in self._obs.items(): ctype = ctypes.c_double if obs_arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._shared_obs_info[obs_key] = (mp.Array(ctype, obs_arr.size), obs_arr.dtype, obs_arr.shape) self._shared_next_obs_info[obs_key] = (mp.Array(ctype, obs_arr.size), obs_arr.dtype, obs_arr.shape) <DeepExtract> self._obs[obs_key] = np.frombuffer(*self._shared_obs_info[obs_key].get_obj(), dtype=np_dtype).reshape(shape) </DeepExtract> <DeepExtract> self._next_obs[obs_key] = np.frombuffer(*self._shared_next_obs_info[obs_key].get_obj(), dtype=np_dtype).reshape(shape) </DeepExtract> <DeepExtract> assert hasattr(self, '_actions'), '_actions' arr = getattr(self, '_actions') ctype = ctypes.c_double if arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._mp_array_info['_actions'] = (mp.Array(ctype, arr.size), arr.dtype, arr.shape) setattr(self, '_actions', to_np(*self._mp_array_info['_actions'])) </DeepExtract> <DeepExtract> assert hasattr(self, '_terminals'), '_terminals' arr = getattr(self, '_terminals') ctype = ctypes.c_double if arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._mp_array_info['_terminals'] = (mp.Array(ctype, arr.size), arr.dtype, arr.shape) setattr(self, '_terminals', to_np(*self._mp_array_info['_terminals'])) </DeepExtract>
def __init__(self, *args, **kwargs): self._shared_size = mp.Value(ctypes.c_long, 0) ObsDictRelabelingBuffer.__init__(self, *args, **kwargs) self._mp_array_info = {} self._shared_obs_info = {} self._shared_next_obs_info = {} for (obs_key, obs_arr) in self._obs.items(): ctype = ctypes.c_double if obs_arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._shared_obs_info[obs_key] = (mp.Array(ctype, obs_arr.size), obs_arr.dtype, obs_arr.shape) self._shared_next_obs_info[obs_key] = (mp.Array(ctype, obs_arr.size), obs_arr.dtype, obs_arr.shape) self._obs[obs_key] = np.frombuffer(*self._shared_obs_info[obs_key].get_obj(), dtype=np_dtype).reshape(shape) self._next_obs[obs_key] = np.frombuffer(*self._shared_next_obs_info[obs_key].get_obj(), dtype=np_dtype).reshape(shape) assert hasattr(self, '_actions'), '_actions' arr = getattr(self, '_actions') ctype = ctypes.c_double if arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._mp_array_info['_actions'] = (mp.Array(ctype, arr.size), arr.dtype, arr.shape) setattr(self, '_actions', to_np(*self._mp_array_info['_actions'])) assert hasattr(self, '_terminals'), '_terminals' arr = getattr(self, '_terminals') ctype = ctypes.c_double if arr.dtype == np.uint8: ctype = ctypes.c_uint8 self._mp_array_info['_terminals'] = (mp.Array(ctype, arr.size), arr.dtype, arr.shape) setattr(self, '_terminals', to_np(*self._mp_array_info['_terminals'])) </DeepExtract>
DoorGym
positive
def tree_search(self, show_progress: bool=False) -> float: """ Perform the actual tree search :param show_progress: if True, shows a progress bar :return: the time past in seconds """ if not self.tree: <DeepExtract> if not self.target_mol: raise ValueError('No target molecule set') try: self.target_mol.sanitize() except MoleculeException: raise ValueError('Target molecule unsanitizable') self.stock.reset_exclusion_list() if self.config.exclude_target_from_stock and self.target_mol in self.stock: self.stock.exclude(self.target_mol) self._logger.debug('Excluding the target compound from the stock') self._setup_search_tree() self.analysis = None self.routes = RouteCollection([]) </DeepExtract> assert self.tree is not None self.search_stats = {'returned_first': False, 'iterations': 0} time0 = time.time() i = 1 self._logger.debug('Starting search') time_past = time.time() - time0 if show_progress: pbar = tqdm(total=self.config.iteration_limit, leave=False) while time_past < self.config.time_limit and i <= self.config.iteration_limit: if show_progress: pbar.update(1) self.search_stats['iterations'] += 1 try: is_solved = self.tree.one_iteration() except StopIteration: break if is_solved and 'first_solution_time' not in self.search_stats: self.search_stats['first_solution_time'] = time.time() - time0 self.search_stats['first_solution_iteration'] = i if self.config.return_first and is_solved: self._logger.debug('Found first solved route') self.search_stats['returned_first'] = True break i = i + 1 time_past = time.time() - time0 if show_progress: pbar.close() time_past = time.time() - time0 self._logger.debug('Search completed') self.search_stats['time'] = time_past return time_past
def tree_search(self, show_progress: bool=False) -> float: """ Perform the actual tree search :param show_progress: if True, shows a progress bar :return: the time past in seconds """ if not self.tree: if not self.target_mol: raise ValueError('No target molecule set') try: self.target_mol.sanitize() except MoleculeException: raise ValueError('Target molecule unsanitizable') self.stock.reset_exclusion_list() if self.config.exclude_target_from_stock and self.target_mol in self.stock: self.stock.exclude(self.target_mol) self._logger.debug('Excluding the target compound from the stock') self._setup_search_tree() self.analysis = None self.routes = RouteCollection([]) assert self.tree is not None self.search_stats = {'returned_first': False, 'iterations': 0} time0 = time.time() i = 1 self._logger.debug('Starting search') time_past = time.time() - time0 if show_progress: pbar = tqdm(total=self.config.iteration_limit, leave=False) while time_past < self.config.time_limit and i <= self.config.iteration_limit: if show_progress: pbar.update(1) self.search_stats['iterations'] += 1 try: is_solved = self.tree.one_iteration() except StopIteration: break if is_solved and 'first_solution_time' not in self.search_stats: self.search_stats['first_solution_time'] = time.time() - time0 self.search_stats['first_solution_iteration'] = i if self.config.return_first and is_solved: self._logger.debug('Found first solved route') self.search_stats['returned_first'] = True break i = i + 1 time_past = time.time() - time0 if show_progress: pbar.close() time_past = time.time() - time0 self._logger.debug('Search completed') self.search_stats['time'] = time_past return time_past
aizynthfinder
positive
@registry.register_check('ecr') def ecr_latest_image_vuln_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities""" <DeepExtract> response = cache.get('describe_repositories') if response: response = response cache['describe_repositories'] = ecr.describe_repositories(maxResults=1000) response = cache['describe_repositories'] </DeepExtract> myRepos = response['repositories'] for repo in myRepos: repoArn = str(repo['repositoryArn']) repoName = str(repo['repositoryName']) scanningConfig = str(repo['imageScanningConfiguration']['scanOnPush']) if scanningConfig == 'True': try: response = ecr.describe_images(repositoryName=repoName, filter={'tagStatus': 'TAGGED'}, maxResults=1000) for images in response['imageDetails']: imageDigest = str(images['imageDigest']) imageTag = str(images['imageTags'][0]) imageVulnCheck = str(images['imageScanFindingsSummary']['findingSeverityCounts']) iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() if imageVulnCheck != '{}': vulnDeepLink = 'https://console.aws.amazon.com/ecr/repositories/' + repoName + '/image/' + imageDigest + '/scan-results?region=' + awsRegion finding = {'SchemaVersion': '2018-10-08', 'Id': repoName + '/' + imageDigest + '/ecr-latest-image-vuln-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': imageDigest, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/Vulnerabilities/CVE', 'Software and Configuration Checks/AWS Security Best Practices'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'MEDIUM'}, 'Confidence': 99, 'Title': '[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities', 'Description': 'The latest image in the ECR repository ' + repoName + ' has the following vulnerabilities reported: ' + imageVulnCheck + '. Refer to the SourceUrl or Remediation.Recommendation.Url to review the specific vulnerabilities and remediation information from ECR.', 'Remediation': {'Recommendation': {'Text': 'Click here to navigate to the ECR Vulnerability console for this image', 'Url': vulnDeepLink}}, 'SourceUrl': vulnDeepLink, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'Container', 'Id': repoName + ':' + imageTag, 'Partition': awsPartition, 'Region': awsRegion, 'Details': {'Container': {'Name': repoName + ':' + imageTag, 'ImageId': imageDigest}, 'Other': {'RepositoryName': repoName, 'RepositoryArn': repoArn}}}], 'Compliance': {'Status': 'FAILED', 'RelatedRequirements': ['NIST CSF DE.CM-8', 'NIST SP 800-53 RA-5', 'AICPA TSC CC7.1', 'ISO 27001:2013 A.12.6.1']}, 'Workflow': {'Status': 'NEW'}, 'RecordState': 'ACTIVE'} yield finding else: finding = {'SchemaVersion': '2018-10-08', 'Id': repoName + '/' + imageDigest + '/ecr-latest-image-vuln-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': imageDigest, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/Vulnerabilities/CVE', 'Software and Configuration Checks/AWS Security Best Practices'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'INFORMATIONAL'}, 'Confidence': 99, 'Title': '[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities', 'Description': 'The latest image in the ECR repository ' + repoName + ' does not have any vulnerabilities reported.', 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'Container', 'Id': repoName + ':' + imageTag, 'Partition': awsPartition, 'Region': awsRegion, 'Details': {'Container': {'Name': repoName + ':' + imageTag, 'ImageId': imageDigest}, 'Other': {'RepositoryName': repoName, 'RepositoryArn': repoArn}}}], 'Compliance': {'Status': 'PASSED', 'RelatedRequirements': ['NIST CSF DE.CM-8', 'NIST SP 800-53 RA-5', 'AICPA TSC CC7.1', 'ISO 27001:2013 A.12.6.1']}, 'Workflow': {'Status': 'RESOLVED'}, 'RecordState': 'ARCHIVED'} yield finding except Exception as e: print(e) else: pass
@registry.register_check('ecr') def ecr_latest_image_vuln_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities""" response = cache.get('describe_repositories') if response: response = response cache['describe_repositories'] = ecr.describe_repositories(maxResults=1000) response = cache['describe_repositories'] myRepos = response['repositories'] for repo in myRepos: repoArn = str(repo['repositoryArn']) repoName = str(repo['repositoryName']) scanningConfig = str(repo['imageScanningConfiguration']['scanOnPush']) if scanningConfig == 'True': try: response = ecr.describe_images(repositoryName=repoName, filter={'tagStatus': 'TAGGED'}, maxResults=1000) for images in response['imageDetails']: imageDigest = str(images['imageDigest']) imageTag = str(images['imageTags'][0]) imageVulnCheck = str(images['imageScanFindingsSummary']['findingSeverityCounts']) iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() if imageVulnCheck != '{}': vulnDeepLink = 'https://console.aws.amazon.com/ecr/repositories/' + repoName + '/image/' + imageDigest + '/scan-results?region=' + awsRegion finding = {'SchemaVersion': '2018-10-08', 'Id': repoName + '/' + imageDigest + '/ecr-latest-image-vuln-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': imageDigest, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/Vulnerabilities/CVE', 'Software and Configuration Checks/AWS Security Best Practices'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'MEDIUM'}, 'Confidence': 99, 'Title': '[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities', 'Description': 'The latest image in the ECR repository ' + repoName + ' has the following vulnerabilities reported: ' + imageVulnCheck + '. Refer to the SourceUrl or Remediation.Recommendation.Url to review the specific vulnerabilities and remediation information from ECR.', 'Remediation': {'Recommendation': {'Text': 'Click here to navigate to the ECR Vulnerability console for this image', 'Url': vulnDeepLink}}, 'SourceUrl': vulnDeepLink, 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'Container', 'Id': repoName + ':' + imageTag, 'Partition': awsPartition, 'Region': awsRegion, 'Details': {'Container': {'Name': repoName + ':' + imageTag, 'ImageId': imageDigest}, 'Other': {'RepositoryName': repoName, 'RepositoryArn': repoArn}}}], 'Compliance': {'Status': 'FAILED', 'RelatedRequirements': ['NIST CSF DE.CM-8', 'NIST SP 800-53 RA-5', 'AICPA TSC CC7.1', 'ISO 27001:2013 A.12.6.1']}, 'Workflow': {'Status': 'NEW'}, 'RecordState': 'ACTIVE'} yield finding else: finding = {'SchemaVersion': '2018-10-08', 'Id': repoName + '/' + imageDigest + '/ecr-latest-image-vuln-check', 'ProductArn': f'arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default', 'GeneratorId': imageDigest, 'AwsAccountId': awsAccountId, 'Types': ['Software and Configuration Checks/Vulnerabilities/CVE', 'Software and Configuration Checks/AWS Security Best Practices'], 'FirstObservedAt': iso8601Time, 'CreatedAt': iso8601Time, 'UpdatedAt': iso8601Time, 'Severity': {'Label': 'INFORMATIONAL'}, 'Confidence': 99, 'Title': '[ECR.4] The latest image in an ECR Repository should not have any vulnerabilities', 'Description': 'The latest image in the ECR repository ' + repoName + ' does not have any vulnerabilities reported.', 'ProductFields': {'Product Name': 'ElectricEye'}, 'Resources': [{'Type': 'Container', 'Id': repoName + ':' + imageTag, 'Partition': awsPartition, 'Region': awsRegion, 'Details': {'Container': {'Name': repoName + ':' + imageTag, 'ImageId': imageDigest}, 'Other': {'RepositoryName': repoName, 'RepositoryArn': repoArn}}}], 'Compliance': {'Status': 'PASSED', 'RelatedRequirements': ['NIST CSF DE.CM-8', 'NIST SP 800-53 RA-5', 'AICPA TSC CC7.1', 'ISO 27001:2013 A.12.6.1']}, 'Workflow': {'Status': 'RESOLVED'}, 'RecordState': 'ARCHIVED'} yield finding except Exception as e: print(e) else: pass
ElectricEye
positive
@classmethod def _decode_typed_data(cls, data: Datum, *, python_type: Union[type, Tuple[type, ...]], context: str='data') -> Any: if data is None: return None data_type = data.type if data_type == 'json': result = json.loads(data.value) elif data_type == 'string': result = data.value elif data_type == 'int': result = data.value elif data_type == 'double': result = data.value elif data_type == 'collection_bytes': result = data.value elif data_type == 'collection_string': result = data.value elif data_type == 'collection_sint64': result = data.value elif data_type is None: return None else: raise ValueError(f'unsupported type of {context}: {data_type}') if not isinstance(result, python_type): if isinstance(python_type, (tuple, list, dict)): raise ValueError(f"unexpected value type in {context}: {type(result).__name__}, expected one of: {', '.join((t.__name__ for t in python_type))}") else: try: <DeepExtract> result = type(self.python_value) </DeepExtract> except (TypeError, ValueError) as e: raise ValueError(f'cannot convert value of {context} into {python_type.__name__}: {e}') from None return result
@classmethod def _decode_typed_data(cls, data: Datum, *, python_type: Union[type, Tuple[type, ...]], context: str='data') -> Any: if data is None: return None data_type = data.type if data_type == 'json': result = json.loads(data.value) elif data_type == 'string': result = data.value elif data_type == 'int': result = data.value elif data_type == 'double': result = data.value elif data_type == 'collection_bytes': result = data.value elif data_type == 'collection_string': result = data.value elif data_type == 'collection_sint64': result = data.value elif data_type is None: return None else: raise ValueError(f'unsupported type of {context}: {data_type}') if not isinstance(result, python_type): if isinstance(python_type, (tuple, list, dict)): raise ValueError(f"unexpected value type in {context}: {type(result).__name__}, expected one of: {', '.join((t.__name__ for t in python_type))}") else: try: result = type(self.python_value) except (TypeError, ValueError) as e: raise ValueError(f'cannot convert value of {context} into {python_type.__name__}: {e}') from None return result
azure-functions-python-worker
positive
def main(args, parser): ctglens = dict() with open(args.fai, 'r') as fai: for l in fai: s = l.rstrip().split() ctglens[s[0]] = s[1] winlist = defaultdict(list) ctgoffset = dict() lastbp = 0 for c in ctglens: ctgoffset[c] = lastbp + 100 for i in range(0, ctglens[c], args.binsize): winlist[c].append(window(c, i, i + args.binsize)) lastbp += ctglens[c] with pysam.AlignmentFile(args.bam, 'rb') as bamfile: for (c, w) in winlist.items(): for (i, win) in enumerate(w): count = 0 for s in bamfile.fetch(c, win.start, win.end): if s.is_secondary: continue count += 1 <DeepExtract> winlist[c].count[haplotype] = count winlist = winlist </DeepExtract> hapset = set() with open(args.human, 'r') as human: human.readline() for l in human: s = l.rstrip().split() for (i, win) in enumerate(winlist[s[2]]): if int(s[3]) < win.end and int(s[3]) >= win.start: <DeepExtract> winlist[s[2]].count[s[4]] = int(s[6]) winlist = winlist </DeepExtract> print(f'Updating window: {s[2]} {win.start} {win.end} to {s[6]} for Hap {s[4]}') hapset.add(s[4]) raw = defaultdict(list) bars = list() for (c, w) in winlist.items(): bars.append([ctgoffset[c], ctglens[c]]) for win in winlist: for h in hapset: raw['contig'].append(c) raw['start'].append(win.start + ctgoffset[c]) raw['end'].append(win.end + ctgoffset[c]) raw['hap'].append(h) raw['count'].append(win.getCount(h)) df = pandas.DataFrame(raw) df.to_csv(args.output + '.wins', sep='\t', header=True) fig = plt.figure(figsize=(6, 8)) ax = df[['start', 'hap', 'count']].plot.area(x='start', y='count', colormap='viridis') ax.add_collection(BrokenBarHCollection(bars, [-1, 1], facecolors=plt.get_cmap('tab20'))) ax.axis('tight') plt.savefig(args.output + '.pdf')
def main(args, parser): ctglens = dict() with open(args.fai, 'r') as fai: for l in fai: s = l.rstrip().split() ctglens[s[0]] = s[1] winlist = defaultdict(list) ctgoffset = dict() lastbp = 0 for c in ctglens: ctgoffset[c] = lastbp + 100 for i in range(0, ctglens[c], args.binsize): winlist[c].append(window(c, i, i + args.binsize)) lastbp += ctglens[c] with pysam.AlignmentFile(args.bam, 'rb') as bamfile: for (c, w) in winlist.items(): for (i, win) in enumerate(w): count = 0 for s in bamfile.fetch(c, win.start, win.end): if s.is_secondary: continue count += 1 winlist[c].count[haplotype] = count winlist = winlist hapset = set() with open(args.human, 'r') as human: human.readline() for l in human: s = l.rstrip().split() for (i, win) in enumerate(winlist[s[2]]): if int(s[3]) < win.end and int(s[3]) >= win.start: winlist[s[2]].count[s[4]] = int(s[6]) winlist = winlist print(f'Updating window: {s[2]} {win.start} {win.end} to {s[6]} for Hap {s[4]}') hapset.add(s[4]) raw = defaultdict(list) bars = list() for (c, w) in winlist.items(): bars.append([ctgoffset[c], ctglens[c]]) for win in winlist: for h in hapset: raw['contig'].append(c) raw['start'].append(win.start + ctgoffset[c]) raw['end'].append(win.end + ctgoffset[c]) raw['hap'].append(h) raw['count'].append(win.getCount(h)) df = pandas.DataFrame(raw) df.to_csv(args.output + '.wins', sep='\t', header=True) fig = plt.figure(figsize=(6, 8)) ax = df[['start', 'hap', 'count']].plot.area(x='start', y='count', colormap='viridis') ax.add_collection(BrokenBarHCollection(bars, [-1, 1], facecolors=plt.get_cmap('tab20'))) ax.axis('tight') plt.savefig(args.output + '.pdf')
cDNA_Cupcake
positive
def _union_lcs(evaluated_sentences, reference_sentence): """ Returns LCS_u(r_i, C) which is the LCS score of the union longest common subsequence between reference sentence ri and candidate summary C. For example if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5". The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5" and LCS_u(r_i, C) = 4/5. Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: float: LCS_u(r_i, C) ValueError: Raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0: raise ValueError('Collections must contain at least 1 sentence.') lcs_union = set() <DeepExtract> reference_words = list(itertools.chain(*[_.split(' ') for _ in [reference_sentence]])) </DeepExtract> combined_lcs_length = 0 for eval_s in evaluated_sentences: <DeepExtract> evaluated_words = list(itertools.chain(*[_.split(' ') for _ in [eval_s]])) </DeepExtract> lcs = set(_recon_lcs(reference_words, evaluated_words)) combined_lcs_length += len(lcs) lcs_union = lcs_union.union(lcs) union_lcs_count = len(lcs_union) union_lcs_value = union_lcs_count / combined_lcs_length return union_lcs_value
def _union_lcs(evaluated_sentences, reference_sentence): """ Returns LCS_u(r_i, C) which is the LCS score of the union longest common subsequence between reference sentence ri and candidate summary C. For example if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5". The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5" and LCS_u(r_i, C) = 4/5. Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: float: LCS_u(r_i, C) ValueError: Raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0: raise ValueError('Collections must contain at least 1 sentence.') lcs_union = set() reference_words = list(itertools.chain(*[_.split(' ') for _ in [reference_sentence]])) combined_lcs_length = 0 for eval_s in evaluated_sentences: evaluated_words = list(itertools.chain(*[_.split(' ') for _ in [eval_s]])) lcs = set(_recon_lcs(reference_words, evaluated_words)) combined_lcs_length += len(lcs) lcs_union = lcs_union.union(lcs) union_lcs_count = len(lcs_union) union_lcs_value = union_lcs_count / combined_lcs_length return union_lcs_value
DAPPLE
positive
@pytest.mark.parametrize('address', invalid_email_addresses, ids=itertools.count()) def test_rcpt_invalid_address(self, client, address): <DeepExtract> (code, mesg) = client.ehlo(domain) assert code == 250 return mesg </DeepExtract> resp = client.docmd('MAIL FROM: <anne@example.com>') assert resp == S.S250_OK resp = client.docmd(f'RCPT TO: {address}') assert resp == S.S553_MALFORMED
@pytest.mark.parametrize('address', invalid_email_addresses, ids=itertools.count()) def test_rcpt_invalid_address(self, client, address): (code, mesg) = client.ehlo(domain) assert code == 250 return mesg resp = client.docmd('MAIL FROM: <anne@example.com>') assert resp == S.S250_OK resp = client.docmd(f'RCPT TO: {address}') assert resp == S.S553_MALFORMED
aiosmtpd
positive
def load_py2string(self): <DeepExtract> length = self._read_int4() as_bytes = self.stream.read(length) as_bytes = as_bytes </DeepExtract> if self.py2str_as_py3str: s = as_bytes.decode('latin-1') else: s = as_bytes self.stack.append(s)
def load_py2string(self): length = self._read_int4() as_bytes = self.stream.read(length) as_bytes = as_bytes if self.py2str_as_py3str: s = as_bytes.decode('latin-1') else: s = as_bytes self.stack.append(s)
execnet
positive
def csvexport(sliceno, filename, labelsonfirstline): d = datasets.source[0] if not options.labels: options.labels = sorted(d.columns) if options.chain_source: if jobs.previous: prev_source = jobs.previous.params.datasets.source assert len(datasets.source) == len(prev_source) else: prev_source = [None] * len(datasets.source) lst = [] for (src, stop) in zip(datasets.source, prev_source): lst.extend(src.chain(stop_ds=stop)) datasets.source = lst if options.filename.lower().endswith('.gz') or '.gz.' in options.filename.lower(): open_func = partial(gzip.open, compresslevel=options.compression) else: open_func = open if PY2: open_func = partial(open_func, mode='wb') else: open_func = partial(open_func, mode='xt', encoding='utf-8') if options.none_as: if isinstance(options.none_as, dict): bad_none = set(options.none_as) - set(options.labels) assert not bad_none, 'Unknown labels in none_as: %r' % (bad_none,) else: assert isinstance(options.none_as, str), 'What did you pass as none_as?' def resolve_none(label, col): d = options.none_as or {} if col.type in ('json', 'pickle'): if isinstance(options.none_as, str): return options.none_as return d.get(label) elif col.none_support: if isinstance(options.none_as, str): return options.none_as return d.get(label, 'None') q = options.quote_fields qq = q + q sep = options.separator def quote_always(v): return q + v.replace(q, qq) + q if q in '"\'': def quote_if_needed(v): if v and (v[0] in '"\'' or v[-1] in '"\'' or sep in v): return q + v.replace(q, qq) + q else: return v else: def quote_if_needed(v): if v.startswith(q) or v.endswith(q) or sep in v: return q + v.replace(q, qq) + q else: return v if not q: quote_func = str elif options.lazy_quotes and sep: quote_func = quote_if_needed else: quote_func = quote_always def needs_quoting(typ): if not q: return False if not options.lazy_quotes: return True if typ in ('int32', 'int64', 'bits32', 'bits64'): possible = '0123456789-' elif typ in ('float32', 'float64', 'number'): possible = '0123456789-+einfa.' else: possible = False if possible: q_s = set(q) sep_s = set(sep) possible_s = set(possible) if q_s - possible_s and sep_s - possible_s: return False return True def column_iterator(d, label, first): col = d.columns[label] f = format.get(col.type, str) it = d.iterate(sliceno, label, status_reporting=first) <DeepExtract> d = options.none_as or {} if col.type in ('json', 'pickle'): if isinstance(options.none_as, str): none_as = options.none_as none_as = d.get(label) elif col.none_support: if isinstance(options.none_as, str): none_as = options.none_as none_as = d.get(label, 'None') </DeepExtract> if none_as is not None: none_as = quote_func(none_as) if needs_quoting(col.type): if f: it = (none_as if v is None else quote_func(f(v)) for v in it) else: it = (none_as if v is None else quote_func(v) for v in it) elif f: it = (none_as if v is None else f(v) for v in it) else: it = (none_as if v is None else v for v in it) elif f: if needs_quoting(col.type): it = (quote_func(f(v)) for v in it) else: it = imap(f, it) elif needs_quoting(col.type): it = imap(quote_func, it) return it def outer_iterator(label, first): return chain.from_iterable((column_iterator(d, label, first) for d in datasets.source)) iters = [] first = True for label in options.labels: iters.append(outer_iterator(label, first)) first = False it = izip(*iters) with writer(open_func(filename)) as write: if labelsonfirstline: write(enc(sep.join(map(quote_func, options.labels)))) for data in it: write(sep.join(data))
def csvexport(sliceno, filename, labelsonfirstline): d = datasets.source[0] if not options.labels: options.labels = sorted(d.columns) if options.chain_source: if jobs.previous: prev_source = jobs.previous.params.datasets.source assert len(datasets.source) == len(prev_source) else: prev_source = [None] * len(datasets.source) lst = [] for (src, stop) in zip(datasets.source, prev_source): lst.extend(src.chain(stop_ds=stop)) datasets.source = lst if options.filename.lower().endswith('.gz') or '.gz.' in options.filename.lower(): open_func = partial(gzip.open, compresslevel=options.compression) else: open_func = open if PY2: open_func = partial(open_func, mode='wb') else: open_func = partial(open_func, mode='xt', encoding='utf-8') if options.none_as: if isinstance(options.none_as, dict): bad_none = set(options.none_as) - set(options.labels) assert not bad_none, 'Unknown labels in none_as: %r' % (bad_none,) else: assert isinstance(options.none_as, str), 'What did you pass as none_as?' def resolve_none(label, col): d = options.none_as or {} if col.type in ('json', 'pickle'): if isinstance(options.none_as, str): return options.none_as return d.get(label) elif col.none_support: if isinstance(options.none_as, str): return options.none_as return d.get(label, 'None') q = options.quote_fields qq = q + q sep = options.separator def quote_always(v): return q + v.replace(q, qq) + q if q in '"\'': def quote_if_needed(v): if v and (v[0] in '"\'' or v[-1] in '"\'' or sep in v): return q + v.replace(q, qq) + q else: return v else: def quote_if_needed(v): if v.startswith(q) or v.endswith(q) or sep in v: return q + v.replace(q, qq) + q else: return v if not q: quote_func = str elif options.lazy_quotes and sep: quote_func = quote_if_needed else: quote_func = quote_always def needs_quoting(typ): if not q: return False if not options.lazy_quotes: return True if typ in ('int32', 'int64', 'bits32', 'bits64'): possible = '0123456789-' elif typ in ('float32', 'float64', 'number'): possible = '0123456789-+einfa.' else: possible = False if possible: q_s = set(q) sep_s = set(sep) possible_s = set(possible) if q_s - possible_s and sep_s - possible_s: return False return True def column_iterator(d, label, first): col = d.columns[label] f = format.get(col.type, str) it = d.iterate(sliceno, label, status_reporting=first) d = options.none_as or {} if col.type in ('json', 'pickle'): if isinstance(options.none_as, str): none_as = options.none_as none_as = d.get(label) elif col.none_support: if isinstance(options.none_as, str): none_as = options.none_as none_as = d.get(label, 'None') if none_as is not None: none_as = quote_func(none_as) if needs_quoting(col.type): if f: it = (none_as if v is None else quote_func(f(v)) for v in it) else: it = (none_as if v is None else quote_func(v) for v in it) elif f: it = (none_as if v is None else f(v) for v in it) else: it = (none_as if v is None else v for v in it) elif f: if needs_quoting(col.type): it = (quote_func(f(v)) for v in it) else: it = imap(f, it) elif needs_quoting(col.type): it = imap(quote_func, it) return it def outer_iterator(label, first): return chain.from_iterable((column_iterator(d, label, first) for d in datasets.source)) iters = [] first = True for label in options.labels: iters.append(outer_iterator(label, first)) first = False it = izip(*iters) with writer(open_func(filename)) as write: if labelsonfirstline: write(enc(sep.join(map(quote_func, options.labels)))) for data in it: write(sep.join(data))
accelerator
positive
@classmethod def _call_goodbye_sync(cls, bus_address): """Say sync goodbye to Bar.""" proxy = cls._get_proxy(bus_address) fd = proxy.Goodbye('Bar') <DeepExtract> with open(os.dup(fd), 'rb', closefd=True) as i: value = i.read().decode('utf-8') greeting = value </DeepExtract> assert greeting == 'Goodbye, Bar!', greeting
@classmethod def _call_goodbye_sync(cls, bus_address): """Say sync goodbye to Bar.""" proxy = cls._get_proxy(bus_address) fd = proxy.Goodbye('Bar') with open(os.dup(fd), 'rb', closefd=True) as i: value = i.read().decode('utf-8') greeting = value assert greeting == 'Goodbye, Bar!', greeting
dasbus
positive
def parser(value): """Parse an Imagenet record from value.""" keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) encoded_image = tf.reshape(parsed['image/encoded'], shape=[], name='encoded_image') image_format = parsed['image/format'] xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0) bbox = tf.concat([ymin, xmin, ymax, xmax], 0) bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) def decode_png(): return tf.image.decode_png(encoded_image, 3) def decode_jpg(): return tf.image.decode_jpeg(encoded_image, 3) pred_fn_pairs = {tf.logical_or(tf.equal(image_format, 'png'), tf.equal(image_format, 'PNG')): decode_png} image = tf.case(pred_fn_pairs, default=decode_jpg, exclusive=True) image.set_shape([None, None, 3]) <DeepExtract> del bbox image = tf.image.convert_image_dtype(image, dtype=tf.float32) image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, [0]) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) image = image </DeepExtract> label = tf.cast(tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32, name='cast_label') label = tf.reshape(label, [1]) return (tf.cast(image, tf.float32), label)
def parser(value): """Parse an Imagenet record from value.""" keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) encoded_image = tf.reshape(parsed['image/encoded'], shape=[], name='encoded_image') image_format = parsed['image/format'] xmin = tf.expand_dims(parsed['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(parsed['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(parsed['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(parsed['image/object/bbox/ymax'].values, 0) bbox = tf.concat([ymin, xmin, ymax, xmax], 0) bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) def decode_png(): return tf.image.decode_png(encoded_image, 3) def decode_jpg(): return tf.image.decode_jpeg(encoded_image, 3) pred_fn_pairs = {tf.logical_or(tf.equal(image_format, 'png'), tf.equal(image_format, 'PNG')): decode_png} image = tf.case(pred_fn_pairs, default=decode_jpg, exclusive=True) image.set_shape([None, None, 3]) del bbox image = tf.image.convert_image_dtype(image, dtype=tf.float32) image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, [0]) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) image = image label = tf.cast(tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32, name='cast_label') label = tf.reshape(label, [1]) return (tf.cast(image, tf.float32), label)
class-balanced-loss
positive
def ellipse(self, box, **kwargs): if 'filter' in kwargs: del kwargs['filter'] style = kwargs.get('style') if 'style' in kwargs: del kwargs['style'] if style: if kwargs.get('fill') != 'none': kwargs2 = dict(kwargs) if 'outline' in kwargs2: del kwargs2['outline'] self.draw.ellipse(box, **kwargs2) if 'outline' in kwargs: kwargs['fill'] = kwargs['outline'] del kwargs['outline'] <DeepExtract> if kwargs.get('width') is None: kwargs.get('width') = 1 if style == 'dotted': length = [2 * kwargs.get('width'), 2 * kwargs.get('width')] elif style == 'dashed': length = [4 * kwargs.get('width'), 4 * kwargs.get('width')] elif style == 'none': length = [0, 65535 * kwargs.get('width')] elif re.search('^\\d+(,\\d+)*$', style or ''): length = [int(n) * kwargs.get('width') for n in style.split(',')] else: length = None cycle = length </DeepExtract> for pt in ellipse_dots(box, cycle): self.draw.line([pt, pt], fill=kwargs['fill']) else: if kwargs.get('fill') == 'none': del kwargs['fill'] self.draw.ellipse(box.to_integer_point(), **kwargs)
def ellipse(self, box, **kwargs): if 'filter' in kwargs: del kwargs['filter'] style = kwargs.get('style') if 'style' in kwargs: del kwargs['style'] if style: if kwargs.get('fill') != 'none': kwargs2 = dict(kwargs) if 'outline' in kwargs2: del kwargs2['outline'] self.draw.ellipse(box, **kwargs2) if 'outline' in kwargs: kwargs['fill'] = kwargs['outline'] del kwargs['outline'] if kwargs.get('width') is None: kwargs.get('width') = 1 if style == 'dotted': length = [2 * kwargs.get('width'), 2 * kwargs.get('width')] elif style == 'dashed': length = [4 * kwargs.get('width'), 4 * kwargs.get('width')] elif style == 'none': length = [0, 65535 * kwargs.get('width')] elif re.search('^\\d+(,\\d+)*$', style or ''): length = [int(n) * kwargs.get('width') for n in style.split(',')] else: length = None cycle = length for pt in ellipse_dots(box, cycle): self.draw.line([pt, pt], fill=kwargs['fill']) else: if kwargs.get('fill') == 'none': del kwargs['fill'] self.draw.ellipse(box.to_integer_point(), **kwargs)
blockdiag
positive
def _OpenMappingVersionFile(path): """Opens a mapping version file. Args: path (str): path to the CIM repository. Returns: file: file-like object or None if not available. """ <DeepExtract> glob_parts = [] for character in 'mapping.ver': if character.isalpha(): character_upper = character.upper() character_lower = character.lower() glob_part = f'[{character_upper:s}{character_lower:s}]' else: glob_part = character glob_parts.append(glob_part) filename_as_glob = ''.join(glob_parts) </DeepExtract> mapping_version_file_glob = os.path.join(path, filename_as_glob) mapping_version_file_path = glob.glob(mapping_version_file_glob) if not mapping_version_file_path: return None return open(mapping_version_file_path[0], 'rb')
def _OpenMappingVersionFile(path): """Opens a mapping version file. Args: path (str): path to the CIM repository. Returns: file: file-like object or None if not available. """ glob_parts = [] for character in 'mapping.ver': if character.isalpha(): character_upper = character.upper() character_lower = character.lower() glob_part = f'[{character_upper:s}{character_lower:s}]' else: glob_part = character glob_parts.append(glob_part) filename_as_glob = ''.join(glob_parts) mapping_version_file_glob = os.path.join(path, filename_as_glob) mapping_version_file_path = glob.glob(mapping_version_file_glob) if not mapping_version_file_path: return None return open(mapping_version_file_path[0], 'rb')
dtformats
positive
def make_index(cleaned_data, filename): """ generate only the index file of the plugin. used eg for configuration change management. """ <DeepExtract> plugs = [] if cleaned_data['databaseconfiguration']: plugs += database2plug(models.channel) + database2plug(models.partner) + database2plug(models.chanpar) + database2plug(models.translate) + database2plug(models.routes) + database2plug(models.confirmrule) if cleaned_data['umlists']: plugs += database2plug(models.ccodetrigger) + database2plug(models.ccode) if cleaned_data['databasetransactions']: plugs += database2plug(models.uniek) + database2plug(models.mutex) + database2plug(models.ta) + database2plug(models.filereport) + database2plug(models.report) plugs = plugs </DeepExtract> <DeepExtract> lijst = ['# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals', 'import datetime', "version = '%s'" % botsglobal.version, 'plugins = ['] lijst.extend((plug2string(plug['fields']) for plug in plugs)) lijst.append(']\n') plugsasstring = '\n'.join(lijst) </DeepExtract> filehandler = codecs.open(filename, 'w', 'utf-8') filehandler.write(plugsasstring) filehandler.close()
def make_index(cleaned_data, filename): """ generate only the index file of the plugin. used eg for configuration change management. """ plugs = [] if cleaned_data['databaseconfiguration']: plugs += database2plug(models.channel) + database2plug(models.partner) + database2plug(models.chanpar) + database2plug(models.translate) + database2plug(models.routes) + database2plug(models.confirmrule) if cleaned_data['umlists']: plugs += database2plug(models.ccodetrigger) + database2plug(models.ccode) if cleaned_data['databasetransactions']: plugs += database2plug(models.uniek) + database2plug(models.mutex) + database2plug(models.ta) + database2plug(models.filereport) + database2plug(models.report) plugs = plugs lijst = ['# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals', 'import datetime', "version = '%s'" % botsglobal.version, 'plugins = ['] lijst.extend((plug2string(plug['fields']) for plug in plugs)) lijst.append(']\n') plugsasstring = '\n'.join(lijst) filehandler = codecs.open(filename, 'w', 'utf-8') filehandler.write(plugsasstring) filehandler.close()
bots
positive
def _assert_format_inner(source: str, expected: Optional[str]=None, mode: black.Mode=DEFAULT_MODE, *, fast: bool=False, minimum_version: Optional[Tuple[int, int]]=None) -> None: actual = black.format_str(source, mode=mode) if expected is not None: <DeepExtract> if actual != expected and (not os.environ.get('SKIP_AST_PRINT')): bdv: DebugVisitor[Any] out('Expected tree:', fg='green') try: exp_node = black.lib2to3_parse(expected) bdv = DebugVisitor() list(bdv.visit(exp_node)) except Exception as ve: err(str(ve)) out('Actual tree:', fg='red') try: exp_node = black.lib2to3_parse(actual) bdv = DebugVisitor() list(bdv.visit(exp_node)) except Exception as ve: err(str(ve)) if actual != expected: out(diff(expected, actual, 'expected', 'actual')) assert actual == expected </DeepExtract> if not fast and source != actual: if minimum_version is None or sys.version_info >= minimum_version: black.assert_equivalent(source, actual) black.assert_stable(source, actual, mode=mode)
def _assert_format_inner(source: str, expected: Optional[str]=None, mode: black.Mode=DEFAULT_MODE, *, fast: bool=False, minimum_version: Optional[Tuple[int, int]]=None) -> None: actual = black.format_str(source, mode=mode) if expected is not None: if actual != expected and (not os.environ.get('SKIP_AST_PRINT')): bdv: DebugVisitor[Any] out('Expected tree:', fg='green') try: exp_node = black.lib2to3_parse(expected) bdv = DebugVisitor() list(bdv.visit(exp_node)) except Exception as ve: err(str(ve)) out('Actual tree:', fg='red') try: exp_node = black.lib2to3_parse(actual) bdv = DebugVisitor() list(bdv.visit(exp_node)) except Exception as ve: err(str(ve)) if actual != expected: out(diff(expected, actual, 'expected', 'actual')) assert actual == expected if not fast and source != actual: if minimum_version is None or sys.version_info >= minimum_version: black.assert_equivalent(source, actual) black.assert_stable(source, actual, mode=mode)
black
positive
@wraps(old_setup) def setUp(self): """Wrap setUp to change to given directory first.""" <DeepExtract> directory = os.path.abspath(directory) last_wd = os.getcwd() os.chdir(directory) try: yield finally: os.chdir(last_wd) unload_modules = [] unload_path_prefix = os.path.join(directory, '') for (module_name, module) in sys.modules.items(): try: path = module.__file__ except AttributeError: try: if module.__spec__.origin == 'namespace': path = module.__path__._path[0] else: continue except AttributeError: continue if not path: continue path = os.path.abspath(path) if not path.startswith(unload_path_prefix): continue path = path[len(unload_path_prefix):] if module_name == path_to_module_name(path): unload_modules.append(module_name) for module in unload_modules: del sys.modules[module] </DeepExtract> in_directory_cm[0].__enter__() old_setup(self)
@wraps(old_setup) def setUp(self): """Wrap setUp to change to given directory first.""" directory = os.path.abspath(directory) last_wd = os.getcwd() os.chdir(directory) try: yield finally: os.chdir(last_wd) unload_modules = [] unload_path_prefix = os.path.join(directory, '') for (module_name, module) in sys.modules.items(): try: path = module.__file__ except AttributeError: try: if module.__spec__.origin == 'namespace': path = module.__path__._path[0] else: continue except AttributeError: continue if not path: continue path = os.path.abspath(path) if not path.startswith(unload_path_prefix): continue path = path[len(unload_path_prefix):] if module_name == path_to_module_name(path): unload_modules.append(module_name) for module in unload_modules: del sys.modules[module] in_directory_cm[0].__enter__() old_setup(self)
aloe
positive
def advance_conversation(context, speaker, model, prompt_text=None, targets=None, **kwargs): """ Advance conversation by one speaker, updating context strings for all speakers. Args: context: A dictionary of context strings for all speakers. speaker: The agent who is speaking this round. model: The language model object. prompt_text: Text which is added to the context before querying the model. If None (default), it consists of the speaker's name, with suitable whitespace and punctuation. targets: A list of possible statements the speaker must choose between. If None, the speaker can make an open ended statement. Returns: context: An updated dictionary of context strings, including the most recent speaker's statement. response: The statement made by the speaker. """ if prompt_text is None: prompt_text = f'\n{speaker}: ' <DeepExtract> new_context = {} for speaker in context.keys(): new_context[speaker] = context[speaker] + prompt_text context = new_context </DeepExtract> if targets: logprobs = model.cond_log_prob(context[speaker], targets, **kwargs) response = targets[np.argmax(logprobs)] else: response = model.generate_text(context[speaker], **kwargs) response = (response.lstrip('\n ') + '\n').splitlines()[0] <DeepExtract> new_context = {} for speaker in context.keys(): new_context[speaker] = context[speaker] + response context = new_context </DeepExtract> return (context, response)
def advance_conversation(context, speaker, model, prompt_text=None, targets=None, **kwargs): """ Advance conversation by one speaker, updating context strings for all speakers. Args: context: A dictionary of context strings for all speakers. speaker: The agent who is speaking this round. model: The language model object. prompt_text: Text which is added to the context before querying the model. If None (default), it consists of the speaker's name, with suitable whitespace and punctuation. targets: A list of possible statements the speaker must choose between. If None, the speaker can make an open ended statement. Returns: context: An updated dictionary of context strings, including the most recent speaker's statement. response: The statement made by the speaker. """ if prompt_text is None: prompt_text = f'\n{speaker}: ' new_context = {} for speaker in context.keys(): new_context[speaker] = context[speaker] + prompt_text context = new_context if targets: logprobs = model.cond_log_prob(context[speaker], targets, **kwargs) response = targets[np.argmax(logprobs)] else: response = model.generate_text(context[speaker], **kwargs) response = (response.lstrip('\n ') + '\n').splitlines()[0] new_context = {} for speaker in context.keys(): new_context[speaker] = context[speaker] + response context = new_context return (context, response)
BIG-bench
positive
def test_convert_timedelta(self): <DeepExtract> d = {'hours': 789, 'minutes': 12, 'seconds': 34} s = '%(hours)s:%(minutes)s:%(seconds)s' % d if False: d['microseconds'] = 511581 s += '.%(microseconds)s' % d expected = datetime.timedelta(**d) if False: expected = -expected s = '-' + s tdelta = converters.convert_timedelta(s) self.assertEqual(tdelta, expected) </DeepExtract> <DeepExtract> d = {'hours': 789, 'minutes': 12, 'seconds': 34} s = '%(hours)s:%(minutes)s:%(seconds)s' % d if False: d['microseconds'] = 511581 s += '.%(microseconds)s' % d expected = datetime.timedelta(**d) if True: expected = -expected s = '-' + s tdelta = converters.convert_timedelta(s) self.assertEqual(tdelta, expected) </DeepExtract>
def test_convert_timedelta(self): d = {'hours': 789, 'minutes': 12, 'seconds': 34} s = '%(hours)s:%(minutes)s:%(seconds)s' % d if False: d['microseconds'] = 511581 s += '.%(microseconds)s' % d expected = datetime.timedelta(**d) if False: expected = -expected s = '-' + s tdelta = converters.convert_timedelta(s) self.assertEqual(tdelta, expected) d = {'hours': 789, 'minutes': 12, 'seconds': 34} s = '%(hours)s:%(minutes)s:%(seconds)s' % d if False: d['microseconds'] = 511581 s += '.%(microseconds)s' % d expected = datetime.timedelta(**d) if True: expected = -expected s = '-' + s tdelta = converters.convert_timedelta(s) self.assertEqual(tdelta, expected) </DeepExtract>
aws-servicebroker
positive
def collate_nested_dict(dct, path, shapes, dtype, padding_value): tmp = dct shape = shapes for key in path: tmp = tmp[key] shape = shape[key] <DeepExtract> t = torch.ones(*shape, dtype=dtype, **kwargs) * padding_value shape_ranges = [list(range(n)) for n in shape[:-1]] for item in itertools.product(*shape_ranges): tmp = tmp for (axis, idx) in enumerate(item): if idx < len(tmp): tmp = tmp[idx] else: tmp = None break if tmp is None: continue t[tuple(item)][:len(tmp)] = tmp tmp = t </DeepExtract> return tmp
def collate_nested_dict(dct, path, shapes, dtype, padding_value): tmp = dct shape = shapes for key in path: tmp = tmp[key] shape = shape[key] t = torch.ones(*shape, dtype=dtype, **kwargs) * padding_value shape_ranges = [list(range(n)) for n in shape[:-1]] for item in itertools.product(*shape_ranges): tmp = tmp for (axis, idx) in enumerate(item): if idx < len(tmp): tmp = tmp[idx] else: tmp = None break if tmp is None: continue t[tuple(item)][:len(tmp)] = tmp tmp = t return tmp
dnn.cool
positive
@profiler.profile def get(self, v2d, within): delta = Vec2D((within, within)) <DeepExtract> n = v2d - delta - self.min i = int(self.bin_cols * n.x / self.size.x) j = int(self.bin_rows * n.y / self.size.y) i = max(0, min(self.bin_cols - 1, i)) j = max(0, min(self.bin_rows - 1, j)) (i0, j0) = (i, j) </DeepExtract> <DeepExtract> n = v2d + delta - self.min i = int(self.bin_cols * n.x / self.size.x) j = int(self.bin_rows * n.y / self.size.y) i = max(0, min(self.bin_cols - 1, i)) j = max(0, min(self.bin_rows - 1, j)) (i1, j1) = (i, j) </DeepExtract> l = set() for i in range(i0, i1 + 1): for j in range(j0, j1 + 1): l |= self._get(i, j) return {v for v in l if v.is_valid}
@profiler.profile def get(self, v2d, within): delta = Vec2D((within, within)) n = v2d - delta - self.min i = int(self.bin_cols * n.x / self.size.x) j = int(self.bin_rows * n.y / self.size.y) i = max(0, min(self.bin_cols - 1, i)) j = max(0, min(self.bin_rows - 1, j)) (i0, j0) = (i, j) n = v2d + delta - self.min i = int(self.bin_cols * n.x / self.size.x) j = int(self.bin_rows * n.y / self.size.y) i = max(0, min(self.bin_cols - 1, i)) j = max(0, min(self.bin_rows - 1, j)) (i1, j1) = (i, j) l = set() for i in range(i0, i1 + 1): for j in range(j0, j1 + 1): l |= self._get(i, j) return {v for v in l if v.is_valid}
addon_common
positive
def is_file(self, follow_symlinks=True): """ Return 'True' if this entry is a file or a symbolic link pointing to a file; return 'False' if the entry is or points to a directory or other non-file entry. If follow_symlinks is 'False', return 'True' only if this entry is a file (without following symlinks); return 'False' if entry is a directory or other non-file entry. The result is cached on the 'smcblient.DirEntry' object, with a separate cache for follow_symlinks 'True' and 'False'. Call 'smbclient.path.isfile(entry.path)' to fetch up-to-date information. On the first, uncached call, no SMB call is required unless the path is a reparse point. :param follow_symlinks: Whether to check if the entry's target is a file (True) or the entry itself (False) if the entry is a symlink. :return: bool that states whether the entry is a file or not. """ <DeepExtract> if self._dir_info['file_attributes'].has_flag(FileAttributes.FILE_ATTRIBUTE_REPARSE_POINT): lstat = self.stat(follow_symlinks=False) is_lnk = lstat.st_reparse_tag == ReparseTags.IO_REPARSE_TAG_SYMLINK else: is_lnk = False </DeepExtract> if follow_symlinks and is_lnk: return self._link_target_type_check(py_stat.S_ISREG) else: return not is_lnk and (not self._dir_info['file_attributes'].has_flag(FileAttributes.FILE_ATTRIBUTE_DIRECTORY))
def is_file(self, follow_symlinks=True): """ Return 'True' if this entry is a file or a symbolic link pointing to a file; return 'False' if the entry is or points to a directory or other non-file entry. If follow_symlinks is 'False', return 'True' only if this entry is a file (without following symlinks); return 'False' if entry is a directory or other non-file entry. The result is cached on the 'smcblient.DirEntry' object, with a separate cache for follow_symlinks 'True' and 'False'. Call 'smbclient.path.isfile(entry.path)' to fetch up-to-date information. On the first, uncached call, no SMB call is required unless the path is a reparse point. :param follow_symlinks: Whether to check if the entry's target is a file (True) or the entry itself (False) if the entry is a symlink. :return: bool that states whether the entry is a file or not. """ if self._dir_info['file_attributes'].has_flag(FileAttributes.FILE_ATTRIBUTE_REPARSE_POINT): lstat = self.stat(follow_symlinks=False) is_lnk = lstat.st_reparse_tag == ReparseTags.IO_REPARSE_TAG_SYMLINK else: is_lnk = False if follow_symlinks and is_lnk: return self._link_target_type_check(py_stat.S_ISREG) else: return not is_lnk and (not self._dir_info['file_attributes'].has_flag(FileAttributes.FILE_ATTRIBUTE_DIRECTORY))
CVE-2020-0796-PoC
positive
def create(self, success_codes=None): if success_codes is None: success_codes = [None, 0] if self.values_dict == {}: raise Exception('Cannot create NITRO object without any attribute values') post_data = {self.resource: self.values_dict} result = self.fetcher.post(post_data=post_data, resource=self.resource) <DeepExtract> loglines.append('post data: %s' % post_data) </DeepExtract> <DeepExtract> loglines.append('result of post: %s' % result) </DeepExtract> if result['nitro_errorcode'] not in success_codes: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
def create(self, success_codes=None): if success_codes is None: success_codes = [None, 0] if self.values_dict == {}: raise Exception('Cannot create NITRO object without any attribute values') post_data = {self.resource: self.values_dict} result = self.fetcher.post(post_data=post_data, resource=self.resource) loglines.append('post data: %s' % post_data) loglines.append('result of post: %s' % result) if result['nitro_errorcode'] not in success_codes: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
citrix-adc-ansible-modules
positive
def get_annotation(self, index): """To conform the eval_ssd implementation that is based on the VOC dataset.""" <DeepExtract> image_info = self.data[index] image = self._read_image(image_info['image_id']) boxes = copy.copy(image_info['boxes']) boxes[:, 0] *= image.shape[1] boxes[:, 1] *= image.shape[0] boxes[:, 2] *= image.shape[1] boxes[:, 3] *= image.shape[0] labels = copy.copy(image_info['labels']) if self.transform: (image, boxes, labels) = self.transform(image, boxes, labels) if self.target_transform: (boxes, labels) = self.target_transform(boxes, labels) (image_id, image, boxes, labels) = (image_info['image_id'], image, boxes, labels) </DeepExtract> is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8) return (image_id, (boxes, labels, is_difficult))
def get_annotation(self, index): """To conform the eval_ssd implementation that is based on the VOC dataset.""" image_info = self.data[index] image = self._read_image(image_info['image_id']) boxes = copy.copy(image_info['boxes']) boxes[:, 0] *= image.shape[1] boxes[:, 1] *= image.shape[0] boxes[:, 2] *= image.shape[1] boxes[:, 3] *= image.shape[0] labels = copy.copy(image_info['labels']) if self.transform: (image, boxes, labels) = self.transform(image, boxes, labels) if self.target_transform: (boxes, labels) = self.target_transform(boxes, labels) (image_id, image, boxes, labels) = (image_info['image_id'], image, boxes, labels) is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8) return (image_id, (boxes, labels, is_difficult))
DFQ
positive
def build(self, worker_id, blueprint_id): """Builds a given blueprint, increasing its health by the worker's build amount. If raised to maximum health, the blueprint becomes a completed structure. * NoSuchUnit - either unit does not exist (within the vision range). * TeamNotAllowed - either unit is not on the current player's team. * UnitNotOnMap - the worker is not on the map. * InappropriateUnitType - the unit is not a worker, or the blueprint is not a structure. * Overheated - the worker has already performed an action this turn. * OutOfRange - the worker is not adjacent to the blueprint. * StructureAlreadyBuilt - the blueprint has already been completed. :type self: GameController :type worker_id: int :type blueprint_id: int :rtype: None """ assert type(worker_id) is int, 'incorrect type of arg worker_id: should be int, is {}'.format(type(worker_id)) assert type(blueprint_id) is int, 'incorrect type of arg blueprint_id: should be int, is {}'.format(type(blueprint_id)) result = _lib.bc_GameController_build(self._ptr, worker_id, blueprint_id) <DeepExtract> if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) </DeepExtract> return result
def build(self, worker_id, blueprint_id): """Builds a given blueprint, increasing its health by the worker's build amount. If raised to maximum health, the blueprint becomes a completed structure. * NoSuchUnit - either unit does not exist (within the vision range). * TeamNotAllowed - either unit is not on the current player's team. * UnitNotOnMap - the worker is not on the map. * InappropriateUnitType - the unit is not a worker, or the blueprint is not a structure. * Overheated - the worker has already performed an action this turn. * OutOfRange - the worker is not adjacent to the blueprint. * StructureAlreadyBuilt - the blueprint has already been completed. :type self: GameController :type worker_id: int :type blueprint_id: int :rtype: None """ assert type(worker_id) is int, 'incorrect type of arg worker_id: should be int, is {}'.format(type(worker_id)) assert type(blueprint_id) is int, 'incorrect type of arg blueprint_id: should be int, is {}'.format(type(blueprint_id)) result = _lib.bc_GameController_build(self._ptr, worker_id, blueprint_id) if _lib.bc_has_err(): _lasterror = _ffi.new('char**') err = _lib.bc_get_last_err(_lasterror) errtext = _ffi.string(_lasterror[0]) _lib.bc_free_string(_lasterror[0]) raise Exception(errtext) return result
bc18-scaffold
positive
def write_catalog_as_profile_markdown(self, context: ControlContext, part_id_map: Dict[str, Dict[str, str]], md_alters: List[prof.Alter]) -> None: """Write out the catalog as profile markdown.""" profile_set_param_dict = CatalogInterface._get_full_profile_param_dict(context.profile) for control in self._catalog_interface.get_all_controls_from_catalog(True): new_context = ControlContext.clone(context) new_context.merged_header = {} <DeepExtract> if new_context.inherited_props: inherited_props = new_context.inherited_props.get(control.id, None) if inherited_props: unique_props = list({prop['name']: prop for prop in inherited_props}.values()) new_context.merged_header[const.TRESTLE_INHERITED_PROPS_TAG] = unique_props new_context = new_context </DeepExtract> control_param_dict = ControlInterface.get_control_param_dict(control, False) <DeepExtract> set_param_dict: Dict[str, Dict[str, Any]] = {} for (param_id, param_dict) in control_param_dict.items(): display_name = '' if param_id in profile_set_param_dict: param = profile_set_param_dict[param_id] (display_name, _) = CatalogInterface._get_display_name_and_ns(param) new_dict = ModelUtils.parameter_to_dict(param, True) if const.VALUES in new_dict: if context.purpose == ContextPurpose.PROFILE: new_dict[const.PROFILE_VALUES] = new_dict[const.VALUES] new_dict.pop(const.VALUES) if param_id in control_param_dict: orig_param = control_param_dict[param_id] orig_dict = ModelUtils.parameter_to_dict(orig_param, True) new_dict[const.VALUES] = orig_dict.get(const.VALUES, None) else: tmp_dict = ModelUtils.parameter_to_dict(param_dict, True) values = tmp_dict.get('values', None) new_dict = {'id': param_id, 'values': values} new_dict.pop('id', None) if display_name: new_dict[const.DISPLAY_NAME] = display_name set_param_dict[param_id] = new_dict set_param_dict = set_param_dict </DeepExtract> if set_param_dict: <DeepExtract> if const.SET_PARAMS_TAG not in new_context.cli_yaml_header: new_context.cli_yaml_header[const.SET_PARAMS_TAG] = {} if new_context.cli_yaml_header: if new_context.overwrite_header_values: for (key, value) in new_context.cli_yaml_header[const.SET_PARAMS_TAG].items(): if key in control_param_dict: set_param_dict[key] = value else: for (key, value) in new_context.cli_yaml_header[const.SET_PARAMS_TAG].items(): if key in control_param_dict and key not in set_param_dict: set_param_dict[key] = value new_context.merged_header[const.SET_PARAMS_TAG] = set_param_dict </DeepExtract> elif const.SET_PARAMS_TAG in new_context.merged_header: pop_list: List[str] = [] for key in new_context.merged_header[const.SET_PARAMS_TAG].keys(): if key not in control_param_dict: pop_list.append(key) for pop in pop_list: new_context.merged_header[const.SET_PARAMS_TAG].pop(pop) found_control_alters = [alter for alter in md_alters if alter.control_id == control.id] <DeepExtract> (_, group_title, _) = self._catalog_interface.get_group_info_by_control(control.id) group_dir = new_context.md_root control_path = self._catalog_interface.get_control_path(control.id) for sub_dir in control_path: group_dir = group_dir / sub_dir if not group_dir.exists(): group_dir.mkdir(parents=True, exist_ok=True) writer = ControlWriter() writer.write_control_for_editing(new_context, control, group_dir, group_title, part_id_map, found_control_alters) </DeepExtract>
def write_catalog_as_profile_markdown(self, context: ControlContext, part_id_map: Dict[str, Dict[str, str]], md_alters: List[prof.Alter]) -> None: """Write out the catalog as profile markdown.""" profile_set_param_dict = CatalogInterface._get_full_profile_param_dict(context.profile) for control in self._catalog_interface.get_all_controls_from_catalog(True): new_context = ControlContext.clone(context) new_context.merged_header = {} if new_context.inherited_props: inherited_props = new_context.inherited_props.get(control.id, None) if inherited_props: unique_props = list({prop['name']: prop for prop in inherited_props}.values()) new_context.merged_header[const.TRESTLE_INHERITED_PROPS_TAG] = unique_props new_context = new_context control_param_dict = ControlInterface.get_control_param_dict(control, False) set_param_dict: Dict[str, Dict[str, Any]] = {} for (param_id, param_dict) in control_param_dict.items(): display_name = '' if param_id in profile_set_param_dict: param = profile_set_param_dict[param_id] (display_name, _) = CatalogInterface._get_display_name_and_ns(param) new_dict = ModelUtils.parameter_to_dict(param, True) if const.VALUES in new_dict: if context.purpose == ContextPurpose.PROFILE: new_dict[const.PROFILE_VALUES] = new_dict[const.VALUES] new_dict.pop(const.VALUES) if param_id in control_param_dict: orig_param = control_param_dict[param_id] orig_dict = ModelUtils.parameter_to_dict(orig_param, True) new_dict[const.VALUES] = orig_dict.get(const.VALUES, None) else: tmp_dict = ModelUtils.parameter_to_dict(param_dict, True) values = tmp_dict.get('values', None) new_dict = {'id': param_id, 'values': values} new_dict.pop('id', None) if display_name: new_dict[const.DISPLAY_NAME] = display_name set_param_dict[param_id] = new_dict set_param_dict = set_param_dict if set_param_dict: if const.SET_PARAMS_TAG not in new_context.cli_yaml_header: new_context.cli_yaml_header[const.SET_PARAMS_TAG] = {} if new_context.cli_yaml_header: if new_context.overwrite_header_values: for (key, value) in new_context.cli_yaml_header[const.SET_PARAMS_TAG].items(): if key in control_param_dict: set_param_dict[key] = value else: for (key, value) in new_context.cli_yaml_header[const.SET_PARAMS_TAG].items(): if key in control_param_dict and key not in set_param_dict: set_param_dict[key] = value new_context.merged_header[const.SET_PARAMS_TAG] = set_param_dict elif const.SET_PARAMS_TAG in new_context.merged_header: pop_list: List[str] = [] for key in new_context.merged_header[const.SET_PARAMS_TAG].keys(): if key not in control_param_dict: pop_list.append(key) for pop in pop_list: new_context.merged_header[const.SET_PARAMS_TAG].pop(pop) found_control_alters = [alter for alter in md_alters if alter.control_id == control.id] (_, group_title, _) = self._catalog_interface.get_group_info_by_control(control.id) group_dir = new_context.md_root control_path = self._catalog_interface.get_control_path(control.id) for sub_dir in control_path: group_dir = group_dir / sub_dir if not group_dir.exists(): group_dir.mkdir(parents=True, exist_ok=True) writer = ControlWriter() writer.write_control_for_editing(new_context, control, group_dir, group_title, part_id_map, found_control_alters) </DeepExtract>
compliance-trestle
positive
@activation.setter def activation(self, value): <DeepExtract> if self.built: raise RuntimeError("Can't modify layer attributes after it has been built.") </DeepExtract> self._activation = tf.keras.activations.get(value)
@activation.setter def activation(self, value): if self.built: raise RuntimeError("Can't modify layer attributes after it has been built.") self._activation = tf.keras.activations.get(value)
compression
positive
def aug_test_rpn(self, feats, img_metas, rpn_test_cfg): imgs_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(imgs_per_gpu)] for (x, img_meta) in zip(feats, img_metas): <DeepExtract> rpn_outs = self.rpn_head(x) proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) proposal_list = proposal_list </DeepExtract> for (i, proposals) in enumerate(proposal_list): aug_proposals[i].append(proposals) merged_proposals = [merge_aug_proposals(proposals, img_meta, rpn_test_cfg) for (proposals, img_meta) in zip(aug_proposals, img_metas)] return merged_proposals
def aug_test_rpn(self, feats, img_metas, rpn_test_cfg): imgs_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(imgs_per_gpu)] for (x, img_meta) in zip(feats, img_metas): rpn_outs = self.rpn_head(x) proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) proposal_list = proposal_list for (i, proposals) in enumerate(proposal_list): aug_proposals[i].append(proposals) merged_proposals = [merge_aug_proposals(proposals, img_meta, rpn_test_cfg) for (proposals, img_meta) in zip(aug_proposals, img_metas)] return merged_proposals
Cross-iterationBatchNorm
positive
def valid_elements(self, tensorsig, grid_space, elements): if grid_space[2]: return self.S2_basis().valid_elements(tensorsig, grid_space, elements) else: <DeepExtract> raise NotImplementedError </DeepExtract> tshape = tuple((cs.dim for cs in tensorsig)) valid = np.ones(tshape + m.shape, dtype=bool) if tensorsig: regindices = self.radial_basis.regularity_indices(tensorsig) for regcomp in np.ndindex(tshape): regindex = regindices[regcomp] valid[regcomp] = self.radial_basis.regularity_allowed_vectorized(l, regindex) if self.dtype == np.float64: if len(tensorsig) == 0: valid[(l == 0) * (elements[0] % 2 == 1)] = False elif len(tensorsig) == 1: valid[:, (l == 0) * (elements[0] % 2 == 1)] = False return valid
def valid_elements(self, tensorsig, grid_space, elements): if grid_space[2]: return self.S2_basis().valid_elements(tensorsig, grid_space, elements) else: raise NotImplementedError tshape = tuple((cs.dim for cs in tensorsig)) valid = np.ones(tshape + m.shape, dtype=bool) if tensorsig: regindices = self.radial_basis.regularity_indices(tensorsig) for regcomp in np.ndindex(tshape): regindex = regindices[regcomp] valid[regcomp] = self.radial_basis.regularity_allowed_vectorized(l, regindex) if self.dtype == np.float64: if len(tensorsig) == 0: valid[(l == 0) * (elements[0] % 2 == 1)] = False elif len(tensorsig) == 1: valid[:, (l == 0) * (elements[0] % 2 == 1)] = False return valid
dedalus
positive
@meta.command(name='dynamodb') @click.option('--resource_name', required=True, type=str, help='DynamoDB table name') @click.option('--hash_key_name', required=True, type=str, help='DynamoDB table hash key') @click.option('--hash_key_type', required=True, type=click.Choice(['S', 'N', 'B']), help='DynamoDB hash key type') @click.option('--sort_key_name', type=str, help='DynamoDB sort key. If not specified, the table will have only a hash key') @click.option('--sort_key_type', type=dynamodb_type_param, cls=OptionRequiredIf, required_if='sort_key_name', help='Required if sort key name is specified') @click.option('--read_capacity', type=int, help='The maximum number of strongly consistent reads that canbe performed per second. If not specified, sets the default value to 1') @click.option('--write_capacity', type=int, help='The maximum number of writing processes consumed persecond. If not specified, sets the default value to 1') @click.pass_context @timeit() def dynamodb(ctx, **kwargs): """Generates dynamoDB deployment resources template""" kwargs[PROJECT_PATH_PARAM] = ctx.obj[PROJECT_PATH_PARAM] generator = DynamoDBGenerator(**kwargs) <DeepExtract> try: generator.write() except ValueError as e: raise click.BadParameter(e) except RuntimeError as e: raise click.Abort(e) except Exception as e: raise Exception(f'An unexpected error occurred: {e}') </DeepExtract> click.echo(f"Table '{kwargs['resource_name']}' was added successfully!")
@meta.command(name='dynamodb') @click.option('--resource_name', required=True, type=str, help='DynamoDB table name') @click.option('--hash_key_name', required=True, type=str, help='DynamoDB table hash key') @click.option('--hash_key_type', required=True, type=click.Choice(['S', 'N', 'B']), help='DynamoDB hash key type') @click.option('--sort_key_name', type=str, help='DynamoDB sort key. If not specified, the table will have only a hash key') @click.option('--sort_key_type', type=dynamodb_type_param, cls=OptionRequiredIf, required_if='sort_key_name', help='Required if sort key name is specified') @click.option('--read_capacity', type=int, help='The maximum number of strongly consistent reads that canbe performed per second. If not specified, sets the default value to 1') @click.option('--write_capacity', type=int, help='The maximum number of writing processes consumed persecond. If not specified, sets the default value to 1') @click.pass_context @timeit() def dynamodb(ctx, **kwargs): """Generates dynamoDB deployment resources template""" kwargs[PROJECT_PATH_PARAM] = ctx.obj[PROJECT_PATH_PARAM] generator = DynamoDBGenerator(**kwargs) try: generator.write() except ValueError as e: raise click.BadParameter(e) except RuntimeError as e: raise click.Abort(e) except Exception as e: raise Exception(f'An unexpected error occurred: {e}') click.echo(f"Table '{kwargs['resource_name']}' was added successfully!")
aws-syndicate
positive
def __init__(self, raw_file_path, mol_graph_path, mol_to_graph=mol_to_bigraph, node_featurizer=default_node_featurizer_center, edge_featurizer=default_edge_featurizer_center, atom_pair_featurizer=default_atom_pair_featurizer, load=True, num_processes=1, check_reaction_validity=True, reaction_validity_result_prefix='', cache=True, **kwargs): super(WLNCenterDataset, self).__init__() self._atom_pair_featurizer = atom_pair_featurizer self.atom_pair_features = [] self.atom_pair_labels = [] self.complete_graphs = dict() self.cache = cache path_to_reaction_file = raw_file_path + '.proc' built_in = kwargs.get('built_in', False) if not built_in: print('Pre-processing graph edits from reaction data') <DeepExtract> with open(raw_file_path, 'r') as input_file: lines = input_file.readlines() if num_processes == 1: results = [] for li in lines: results.append(process_line(li)) else: with Pool(processes=num_processes) as pool: results = pool.map(process_line, lines) with open(raw_file_path + '.proc', 'w') as output_file: for line in results: output_file.write(line) print('Finished processing {}'.format(raw_file_path)) </DeepExtract> if check_reaction_validity: print('Start checking validity of input reactions for modeling...') <DeepExtract> valid_reactions = [] invalid_reactions = [] with open(path_to_reaction_file, 'r') as file: for line in file: try: (mol, reaction, graph_edits) = load_one_reaction(line) assert mol is not None product_mol = Chem.MolFromSmiles(reaction.split('>')[2]) assert product_mol is not None get_pair_label(mol, graph_edits) valid_reactions.append(line) except: invalid_reactions.append(line) (valid_reactions, invalid_reactions) = (valid_reactions, invalid_reactions) </DeepExtract> print('# valid reactions {:d}'.format(len(valid_reactions))) print('# invalid reactions {:d}'.format(len(invalid_reactions))) path_to_valid_reactions = reaction_validity_result_prefix + '_valid_reactions.proc' path_to_invalid_reactions = reaction_validity_result_prefix + '_invalid_reactions.proc' with open(path_to_valid_reactions, 'w') as f: for line in valid_reactions: f.write(line) with open(path_to_invalid_reactions, 'w') as f: for line in invalid_reactions: f.write(line) path_to_reaction_file = path_to_valid_reactions import time t0 = time.time() <DeepExtract> all_mols = [] all_reactions = [] all_graph_edits = [] with open(path_to_reaction_file, 'r') as f: lines = f.readlines() if num_processes == 1: results = [] for li in lines: (mol, reaction, graph_edits) = load_one_reaction(li) results.append((mol, reaction, graph_edits)) else: with Pool(processes=num_processes) as pool: results = pool.map(load_one_reaction, lines) for (mol, reaction, graph_edits) in results: if mol is None: continue all_mols.append(mol) all_reactions.append(reaction) all_graph_edits.append(graph_edits) (full_mols, full_reactions, full_graph_edits) = (all_mols, all_reactions, all_graph_edits) </DeepExtract> self.mols = full_mols self.reactions = full_reactions self.graph_edits = full_graph_edits print('Time spent', time.time() - t0) if self.cache: if load and os.path.isfile(mol_graph_path): print('Loading previously saved graphs...') (self.reactant_mol_graphs, _) = load_graphs(mol_graph_path) else: print('Constructing graphs from scratch...') if num_processes == 1: self.reactant_mol_graphs = [] for mol in full_mols: self.reactant_mol_graphs.append(mol_to_graph(mol, node_featurizer=node_featurizer, edge_featurizer=edge_featurizer, canonical_atom_order=False)) else: torch.multiprocessing.set_sharing_strategy('file_system') with Pool(processes=num_processes) as pool: self.reactant_mol_graphs = pool.map(partial(mol_to_graph, node_featurizer=node_featurizer, edge_featurizer=edge_featurizer, canonical_atom_order=False), full_mols) save_graphs(mol_graph_path, self.reactant_mol_graphs) else: self.mol_to_graph = mol_to_graph self.node_featurizer = node_featurizer self.edge_featurizer = edge_featurizer self.atom_pair_features.extend([None for _ in range(len(self.mols))]) self.atom_pair_labels.extend([None for _ in range(len(self.mols))])
def __init__(self, raw_file_path, mol_graph_path, mol_to_graph=mol_to_bigraph, node_featurizer=default_node_featurizer_center, edge_featurizer=default_edge_featurizer_center, atom_pair_featurizer=default_atom_pair_featurizer, load=True, num_processes=1, check_reaction_validity=True, reaction_validity_result_prefix='', cache=True, **kwargs): super(WLNCenterDataset, self).__init__() self._atom_pair_featurizer = atom_pair_featurizer self.atom_pair_features = [] self.atom_pair_labels = [] self.complete_graphs = dict() self.cache = cache path_to_reaction_file = raw_file_path + '.proc' built_in = kwargs.get('built_in', False) if not built_in: print('Pre-processing graph edits from reaction data') with open(raw_file_path, 'r') as input_file: lines = input_file.readlines() if num_processes == 1: results = [] for li in lines: results.append(process_line(li)) else: with Pool(processes=num_processes) as pool: results = pool.map(process_line, lines) with open(raw_file_path + '.proc', 'w') as output_file: for line in results: output_file.write(line) print('Finished processing {}'.format(raw_file_path)) if check_reaction_validity: print('Start checking validity of input reactions for modeling...') valid_reactions = [] invalid_reactions = [] with open(path_to_reaction_file, 'r') as file: for line in file: try: (mol, reaction, graph_edits) = load_one_reaction(line) assert mol is not None product_mol = Chem.MolFromSmiles(reaction.split('>')[2]) assert product_mol is not None get_pair_label(mol, graph_edits) valid_reactions.append(line) except: invalid_reactions.append(line) (valid_reactions, invalid_reactions) = (valid_reactions, invalid_reactions) print('# valid reactions {:d}'.format(len(valid_reactions))) print('# invalid reactions {:d}'.format(len(invalid_reactions))) path_to_valid_reactions = reaction_validity_result_prefix + '_valid_reactions.proc' path_to_invalid_reactions = reaction_validity_result_prefix + '_invalid_reactions.proc' with open(path_to_valid_reactions, 'w') as f: for line in valid_reactions: f.write(line) with open(path_to_invalid_reactions, 'w') as f: for line in invalid_reactions: f.write(line) path_to_reaction_file = path_to_valid_reactions import time t0 = time.time() all_mols = [] all_reactions = [] all_graph_edits = [] with open(path_to_reaction_file, 'r') as f: lines = f.readlines() if num_processes == 1: results = [] for li in lines: (mol, reaction, graph_edits) = load_one_reaction(li) results.append((mol, reaction, graph_edits)) else: with Pool(processes=num_processes) as pool: results = pool.map(load_one_reaction, lines) for (mol, reaction, graph_edits) in results: if mol is None: continue all_mols.append(mol) all_reactions.append(reaction) all_graph_edits.append(graph_edits) (full_mols, full_reactions, full_graph_edits) = (all_mols, all_reactions, all_graph_edits) self.mols = full_mols self.reactions = full_reactions self.graph_edits = full_graph_edits print('Time spent', time.time() - t0) if self.cache: if load and os.path.isfile(mol_graph_path): print('Loading previously saved graphs...') (self.reactant_mol_graphs, _) = load_graphs(mol_graph_path) else: print('Constructing graphs from scratch...') if num_processes == 1: self.reactant_mol_graphs = [] for mol in full_mols: self.reactant_mol_graphs.append(mol_to_graph(mol, node_featurizer=node_featurizer, edge_featurizer=edge_featurizer, canonical_atom_order=False)) else: torch.multiprocessing.set_sharing_strategy('file_system') with Pool(processes=num_processes) as pool: self.reactant_mol_graphs = pool.map(partial(mol_to_graph, node_featurizer=node_featurizer, edge_featurizer=edge_featurizer, canonical_atom_order=False), full_mols) save_graphs(mol_graph_path, self.reactant_mol_graphs) else: self.mol_to_graph = mol_to_graph self.node_featurizer = node_featurizer self.edge_featurizer = edge_featurizer self.atom_pair_features.extend([None for _ in range(len(self.mols))]) self.atom_pair_labels.extend([None for _ in range(len(self.mols))])
dgl-lifesci
positive
def _execute_py_run(script): <DeepExtract> rm(MODULE_INFO_DIR, 'dist', '*.egg-info', '*.egg', 'version.txt') </DeepExtract> version = VersionUtils().write_version() <DeepExtract> (base, ext) = split_ext(script) build_filename = '{base}-{version}.{ext}'.format(base=base, version=version, ext=ext) destination = os.path.join('dist', build_filename) </DeepExtract> ensure_path_exists(os.path.dirname(destination)) shutil.copy2(script, destination)
def _execute_py_run(script): rm(MODULE_INFO_DIR, 'dist', '*.egg-info', '*.egg', 'version.txt') version = VersionUtils().write_version() (base, ext) = split_ext(script) build_filename = '{base}-{version}.{ext}'.format(base=base, version=version, ext=ext) destination = os.path.join('dist', build_filename) ensure_path_exists(os.path.dirname(destination)) shutil.copy2(script, destination)
defend_against_fruit
positive
def run_first_stage(image, net, scale, threshold): """Run P-Net, generate bounding boxes, and do NMS. Arguments: image: an instance of PIL.Image. net: an instance of pytorch's nn.Module, P-Net. scale: a float number, scale width and height of the image by this number. threshold: a float number, threshold on the probability of a face when generating bounding boxes from predictions of the net. Returns: a float numpy array of shape [n_boxes, 9], bounding boxes with scores and offsets (4 + 1 + 4). """ (width, height) = image.size (sw, sh) = (math.ceil(width * scale), math.ceil(height * scale)) img = image.resize((sw, sh), Image.BILINEAR) img = np.asarray(img, 'float32') with torch.no_grad(): img = Variable(torch.FloatTensor(_preprocess(img)), volatile=True) output = net(img.to('cuda')) probs = output[1].cpu().data.numpy()[0, 1, :, :] offsets = output[0].cpu().data.numpy() <DeepExtract> stride = 2 cell_size = 12 inds = np.where(probs > threshold) if inds[0].size == 0: boxes = np.array([]) (tx1, ty1, tx2, ty2) = [offsets[0, i, inds[0], inds[1]] for i in range(4)] offsets = np.array([tx1, ty1, tx2, ty2]) score = probs[inds[0], inds[1]] bounding_boxes = np.vstack([np.round((stride * inds[1] + 1.0) / scale), np.round((stride * inds[0] + 1.0) / scale), np.round((stride * inds[1] + 1.0 + cell_size) / scale), np.round((stride * inds[0] + 1.0 + cell_size) / scale), score, offsets]) boxes = bounding_boxes.T </DeepExtract> if len(boxes) == 0: return None keep = nms(boxes[:, 0:5], overlap_threshold=0.5) return boxes[keep]
def run_first_stage(image, net, scale, threshold): """Run P-Net, generate bounding boxes, and do NMS. Arguments: image: an instance of PIL.Image. net: an instance of pytorch's nn.Module, P-Net. scale: a float number, scale width and height of the image by this number. threshold: a float number, threshold on the probability of a face when generating bounding boxes from predictions of the net. Returns: a float numpy array of shape [n_boxes, 9], bounding boxes with scores and offsets (4 + 1 + 4). """ (width, height) = image.size (sw, sh) = (math.ceil(width * scale), math.ceil(height * scale)) img = image.resize((sw, sh), Image.BILINEAR) img = np.asarray(img, 'float32') with torch.no_grad(): img = Variable(torch.FloatTensor(_preprocess(img)), volatile=True) output = net(img.to('cuda')) probs = output[1].cpu().data.numpy()[0, 1, :, :] offsets = output[0].cpu().data.numpy() stride = 2 cell_size = 12 inds = np.where(probs > threshold) if inds[0].size == 0: boxes = np.array([]) (tx1, ty1, tx2, ty2) = [offsets[0, i, inds[0], inds[1]] for i in range(4)] offsets = np.array([tx1, ty1, tx2, ty2]) score = probs[inds[0], inds[1]] bounding_boxes = np.vstack([np.round((stride * inds[1] + 1.0) / scale), np.round((stride * inds[0] + 1.0) / scale), np.round((stride * inds[1] + 1.0 + cell_size) / scale), np.round((stride * inds[0] + 1.0 + cell_size) / scale), score, offsets]) boxes = bounding_boxes.T if len(boxes) == 0: return None keep = nms(boxes[:, 0:5], overlap_threshold=0.5) return boxes[keep]
Cross-Resolution-Face-Recognition
positive
def delete_records(self, model_id): <DeepExtract> results = [r['record'] for r in self.get_records_with_authors(model_id, raw_records)] </DeepExtract> del self._db['records'][model_id] return results
def delete_records(self, model_id): results = [r['record'] for r in self.get_records_with_authors(model_id, raw_records)] del self._db['records'][model_id] return results
daybed
positive
def commandTroops(): for (index, friend) in enumerate(hero.findFriends()): if friend.type == 'archer': <DeepExtract> if enemyattack: hero.command(friend, 'attack', enemyattack) </DeepExtract> elif friend.type == 'paladin': <DeepExtract> if friend.canCast('heal'): if hero.health < hero.maxHealth * 0.6: target = self else: target = lowestHealthFriend() if target: hero.command(friend, 'cast', 'heal', target) elif friend.health < 100: hero.command(friend, 'shield') elif enemyattack: hero.command(friend, 'attack', enemyattack) </DeepExtract> elif friend.type == 'soldier': <DeepExtract> pass </DeepExtract> elif friend.type == 'peasant': <DeepExtract> if enemyattack: hero.command(friend, 'attack', enemyattack) </DeepExtract>
def commandTroops(): for (index, friend) in enumerate(hero.findFriends()): if friend.type == 'archer': if enemyattack: hero.command(friend, 'attack', enemyattack) elif friend.type == 'paladin': if friend.canCast('heal'): if hero.health < hero.maxHealth * 0.6: target = self else: target = lowestHealthFriend() if target: hero.command(friend, 'cast', 'heal', target) elif friend.health < 100: hero.command(friend, 'shield') elif enemyattack: hero.command(friend, 'attack', enemyattack) elif friend.type == 'soldier': pass elif friend.type == 'peasant': if enemyattack: hero.command(friend, 'attack', enemyattack) </DeepExtract>
CodeCombat
positive
def get_body_rates(self, t, dt=0.001): """ Return the body rates input at time `t`, in inertial frame. Returns the body rates required at time `t` along the trajectory, in units of [rad/s]. This is done by discretizing the normal direction trajectory, with discretization `dt`. **To get (p,q,r) rates, rotate these with the vehicle's attitude.** Args: t (float): time argument. dt (float, optional): discretization time, default is 1ms Returns: np.array() containing the rates, in the inertial frame. """ <DeepExtract> v = self.get_acceleration(t) - self._grav n0 = v / np.linalg.norm(v) </DeepExtract> <DeepExtract> v = self.get_acceleration(t + dt) - self._grav n1 = v / np.linalg.norm(v) </DeepExtract> crossProd = np.cross(n0, n1) if np.linalg.norm(crossProd) > 1e-06: return np.arccos(np.dot(n0, n1)) / dt * (crossProd / np.linalg.norm(crossProd)) else: return np.array([0, 0, 0])
def get_body_rates(self, t, dt=0.001): """ Return the body rates input at time `t`, in inertial frame. Returns the body rates required at time `t` along the trajectory, in units of [rad/s]. This is done by discretizing the normal direction trajectory, with discretization `dt`. **To get (p,q,r) rates, rotate these with the vehicle's attitude.** Args: t (float): time argument. dt (float, optional): discretization time, default is 1ms Returns: np.array() containing the rates, in the inertial frame. """ v = self.get_acceleration(t) - self._grav n0 = v / np.linalg.norm(v) v = self.get_acceleration(t + dt) - self._grav n1 = v / np.linalg.norm(v) crossProd = np.cross(n0, n1) if np.linalg.norm(crossProd) > 1e-06: return np.arccos(np.dot(n0, n1)) / dt * (crossProd / np.linalg.norm(crossProd)) else: return np.array([0, 0, 0])
crossgap_il_rl
positive
def offset_by_time(self, origin): """ Returns the time unit offset with respect to the time origin. Args: * origin (float): Time origin as returned by the :func:`cf_units.encode_time` method. Returns: None. For example: >>> import cf_units >>> u = cf_units.Unit('hours') >>> u.offset_by_time(cf_units.encode_time(1970, 1, 1, 0, 0, 0)) Unit('h @ 19700101T000000.0000000 UTC') """ if not isinstance(origin, (float, (int,))): raise TypeError('a numeric type for the origin argument is required') try: ut_unit = _ud.offset_by_time(self.ut_unit, origin) except _ud.UdunitsError as exception: <DeepExtract> ud_msg = exception.error_msg() if ud_msg: 'Failed to offset {!r}'.format(self) = '{}: {}'.format('Failed to offset {!r}'.format(self), ud_msg) 'Failed to offset {!r}'.format(self) = '[{status}] {message}'.format(status=exception.status_msg(), message='Failed to offset {!r}'.format(self)) value_error = ValueError('Failed to offset {!r}'.format(self)) </DeepExtract> raise value_error from None calendar = None return Unit._new_from_existing_ut(_CATEGORY_UDUNIT, ut_unit, calendar)
def offset_by_time(self, origin): """ Returns the time unit offset with respect to the time origin. Args: * origin (float): Time origin as returned by the :func:`cf_units.encode_time` method. Returns: None. For example: >>> import cf_units >>> u = cf_units.Unit('hours') >>> u.offset_by_time(cf_units.encode_time(1970, 1, 1, 0, 0, 0)) Unit('h @ 19700101T000000.0000000 UTC') """ if not isinstance(origin, (float, (int,))): raise TypeError('a numeric type for the origin argument is required') try: ut_unit = _ud.offset_by_time(self.ut_unit, origin) except _ud.UdunitsError as exception: ud_msg = exception.error_msg() if ud_msg: 'Failed to offset {!r}'.format(self) = '{}: {}'.format('Failed to offset {!r}'.format(self), ud_msg) 'Failed to offset {!r}'.format(self) = '[{status}] {message}'.format(status=exception.status_msg(), message='Failed to offset {!r}'.format(self)) value_error = ValueError('Failed to offset {!r}'.format(self)) raise value_error from None calendar = None return Unit._new_from_existing_ut(_CATEGORY_UDUNIT, ut_unit, calendar)
cf-units
positive
def get(self, query, *, promote=True): if not self._max_size: return entry = self._entries.get(query) if entry is None: return if entry._statement.closed: self._entries.pop(query) <DeepExtract> if entry._cleanup_cb is not None: entry._cleanup_cb.cancel() </DeepExtract> return if promote: self._entries.move_to_end(query, last=True) return entry._statement
def get(self, query, *, promote=True): if not self._max_size: return entry = self._entries.get(query) if entry is None: return if entry._statement.closed: self._entries.pop(query) if entry._cleanup_cb is not None: entry._cleanup_cb.cancel() return if promote: self._entries.move_to_end(query, last=True) return entry._statement
asyncpg
positive
def test_post_ratelimited(self): url = 'https://example.com' protocol = self.account.protocol credentials = protocol.credentials protocol.credentials = Credentials(username=credentials.username, password=credentials.password) session = protocol.get_session() <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = 'foo'.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=200, headers={}, text='foo', content=c, request=req, history=None, url=url) </DeepExtract> (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(r.text, 'foo') for err_cls in CONNECTION_ERRORS: <DeepExtract> def raise_exc(**kwargs): raise err_cls() session.post = raise_exc </DeepExtract> with self.assertRaises(err_cls): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=401, headers={}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(UnauthorizedError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': '/ews/genericerrorpage.htm?aspxerrorpath=/ews/exchange.asmx'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': url}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': url + '/foo'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(RedirectError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': 'https://contoso.com'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') import exchangelib.util exchangelib.util.MAX_REDIRECTS = 0 <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': 'https://contoso.com'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='', allow_redirects=True) <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={'X-CasErrorCode': 'AAARGH!'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(CASError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = '<?xml version="1.0" ?><foo></foo>'.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=500, headers={}, text='<?xml version="1.0" ?><foo></foo>', content=c, request=req, history=None, url=url) </DeepExtract> (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') self.assertEqual(r.text, '<?xml version="1.0" ?><foo></foo>') <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') protocol.credentials = ServiceAccount(username=credentials.username, password=credentials.password, max_wait=1) <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> protocol.renew_session = lambda s: s with self.assertRaises(RateLimitError) as rle: (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.url, url) self.assertEqual(rle.exception.status_code, 503) protocol.credentials.max_wait = 15 <DeepExtract> req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) </DeepExtract> with self.assertRaises(RateLimitError) as rle: (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.url, url) self.assertEqual(rle.exception.status_code, 503) protocol.release_session(session) protocol.credentials = credentials
def test_post_ratelimited(self): url = 'https://example.com' protocol = self.account.protocol credentials = protocol.credentials protocol.credentials = Credentials(username=credentials.username, password=credentials.password) session = protocol.get_session() req = namedtuple('request', ['headers'])(headers={}) c = 'foo'.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=200, headers={}, text='foo', content=c, request=req, history=None, url=url) (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(r.text, 'foo') for err_cls in CONNECTION_ERRORS: def raise_exc(**kwargs): raise err_cls() session.post = raise_exc with self.assertRaises(err_cls): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=401, headers={}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(UnauthorizedError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': '/ews/genericerrorpage.htm?aspxerrorpath=/ews/exchange.asmx'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': url}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': url + '/foo'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(RedirectError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': 'https://contoso.com'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') import exchangelib.util exchangelib.util.MAX_REDIRECTS = 0 req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=302, headers={'location': 'https://contoso.com'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='', allow_redirects=True) req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={'X-CasErrorCode': 'AAARGH!'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(CASError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') req = namedtuple('request', ['headers'])(headers={}) c = '<?xml version="1.0" ?><foo></foo>'.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=500, headers={}, text='<?xml version="1.0" ?><foo></foo>', content=c, request=req, history=None, url=url) (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') self.assertEqual(r.text, '<?xml version="1.0" ?><foo></foo>') req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=999, headers={}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(TransportError): (r, session) = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') protocol.credentials = ServiceAccount(username=credentials.username, password=credentials.password, max_wait=1) req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) protocol.renew_session = lambda s: s with self.assertRaises(RateLimitError) as rle: (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.url, url) self.assertEqual(rle.exception.status_code, 503) protocol.credentials.max_wait = 15 req = namedtuple('request', ['headers'])(headers={}) c = text.encode('utf-8') session.post = lambda **kwargs: namedtuple('response', ['status_code', 'headers', 'text', 'content', 'request', 'history', 'url'])(status_code=503, headers={'connection': 'close'}, text=text, content=c, request=req, history=None, url=url) with self.assertRaises(RateLimitError) as rle: (r, session) = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.url, url) self.assertEqual(rle.exception.status_code, 503) protocol.release_session(session) protocol.credentials = credentials
exchangelib
positive
def token_oauth_query(self, access_token): <DeepExtract> self.headers.pop('Authorization', None) query = getattr(self, 'query', {}) for k in ['client_id', 'client_secret', 'access_token']: query.pop(k, None) self.query = query </DeepExtract> query = getattr(self, 'query', {}) query.update({'access_token': access_token}) self.query = query
def token_oauth_query(self, access_token): self.headers.pop('Authorization', None) query = getattr(self, 'query', {}) for k in ['client_id', 'client_secret', 'access_token']: query.pop(k, None) self.query = query query = getattr(self, 'query', {}) query.update({'access_token': access_token}) self.query = query
endpoints
positive
def __init__(self, filepath='animation.csv', config=None): <DeepExtract> self.id = None self.original_frames = [] self.transformed_frames = [] self.static_begin_index = 0 self.takeoff_index = 0 self.route_index = 0 self.land_index = 0 self.static_end_index = 0 self.output_frames = [] self.output_frames_min_z = None self.output_frames_takeoff = [] self.output_frames_takeoff_min_z = None self.start_time = None self.filepath = filepath self.config = config self.state = None </DeepExtract> if config is not None: <DeepExtract> if config is None: config = self.config self.reset(self.filepath, config) self.load() if self.original_frames: self.on_config_update(self.config) </DeepExtract>
def __init__(self, filepath='animation.csv', config=None): self.id = None self.original_frames = [] self.transformed_frames = [] self.static_begin_index = 0 self.takeoff_index = 0 self.route_index = 0 self.land_index = 0 self.static_end_index = 0 self.output_frames = [] self.output_frames_min_z = None self.output_frames_takeoff = [] self.output_frames_takeoff_min_z = None self.start_time = None self.filepath = filepath self.config = config self.state = None if config is not None: if config is None: config = self.config self.reset(self.filepath, config) self.load() if self.original_frames: self.on_config_update(self.config) </DeepExtract>
clever-show
positive
def test_None(cursor): <DeepExtract> cursor.execute(xddl1) cursor.execute(ddl1) </DeepExtract> cursor.execute('insert into %sbooze values (NULL)' % table_prefix) cursor.execute('select name from %sbooze' % table_prefix) r: typing.Tuple[typing.List[None]] = cursor.fetchall() assert len(r) == 1 assert len(r[0]) == 1 assert r[0][0] is None, 'NULL value not returned as None'
def test_None(cursor): cursor.execute(xddl1) cursor.execute(ddl1) cursor.execute('insert into %sbooze values (NULL)' % table_prefix) cursor.execute('select name from %sbooze' % table_prefix) r: typing.Tuple[typing.List[None]] = cursor.fetchall() assert len(r) == 1 assert len(r[0]) == 1 assert r[0][0] is None, 'NULL value not returned as None'
amazon-redshift-python-driver
positive
def getOrganizationConfigTemplates(apiKey, organizationId): endpoint = '/organizations/%s/configTemplates' % organizationId <DeepExtract> if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: query = '?' + urlencode(p_queryItems, True) url = API_BASE_URL + endpoint + query verb = 'GET'.upper() session = NoRebuildAuthSession() try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb == 'GET': r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'PUT': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'POST': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'DELETE': r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = 2 if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) </DeepExtract> return (success, errors, response)
def getOrganizationConfigTemplates(apiKey, organizationId): endpoint = '/organizations/%s/configTemplates' % organizationId if p_retry > API_MAX_RETRIES: if FLAG_REQUEST_VERBOSE: print('ERROR: Reached max retries') (success, errors, headers, response) = (False, None, None, None) bearerString = 'Bearer ' + str(apiKey) headers = {'Authorization': bearerString} if not p_additionalHeaders is None: headers.update(p_additionalHeaders) query = '' if not p_queryItems is None: query = '?' + urlencode(p_queryItems, True) url = API_BASE_URL + endpoint + query verb = 'GET'.upper() session = NoRebuildAuthSession() try: if FLAG_REQUEST_VERBOSE: print(verb, url) if verb == 'GET': r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'PUT': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'POST': if not p_requestBody is None: if FLAG_REQUEST_VERBOSE: print('body', p_requestBody) r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) elif verb == 'DELETE': r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)) else: (success, errors, headers, response) = (False, None, None, None) except: (success, errors, headers, response) = (False, None, None, None) if FLAG_REQUEST_VERBOSE: print(r.status_code) success = r.status_code in range(200, 299) errors = None responseHeaders = None responseBody = None if r.status_code == API_STATUS_RATE_LIMIT: retryInterval = 2 if 'Retry-After' in r.headers: retryInterval = r.headers['Retry-After'] if 'retry-after' in r.headers: retryInterval = r.headers['retry-after'] if FLAG_REQUEST_VERBOSE: print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval)) time.sleep(int(retryInterval)) (success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1) (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) try: rjson = r.json() except: rjson = None if not rjson is None: if 'errors' in rjson: errors = rjson['errors'] if FLAG_REQUEST_VERBOSE: print(errors) else: responseBody = rjson if 'Link' in r.headers: parsedLinks = utils.parse_header_links(r.headers['Link']) for link in parsedLinks: if link['rel'] == 'next': if FLAG_REQUEST_VERBOSE: print('Next page:', link['url']) splitLink = link['url'].split('/api/v1') (success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE) if success: if not responseBody is None: responseBody = responseBody + nextBody else: responseBody = None (success, errors, headers, response) = (success, errors, responseHeaders, responseBody) return (success, errors, response)
automation-scripts
positive
def test_resource_meta_unique(self): <DeepExtract> if resource_json_definition is None: resource_json_definition = {} if resource_json_definitions is None: resource_json_definitions = {} service_context = ServiceContext(service_name='test', resource_json_definitions=resource_json_definitions, service_model=service_model, service_waiter_model=None) queue_cls = self.factory.load_from_definition(resource_name='Queue', single_resource_json_definition=resource_json_definition, service_context=service_context) </DeepExtract> queue1 = queue_cls() queue2 = queue_cls() assert queue1.meta == queue2.meta queue1.meta.data = {'id': 'foo'} queue2.meta.data = {'id': 'bar'} assert queue_cls.meta != queue1.meta assert queue1.meta != queue2.meta assert queue1.meta != 'bad-value'
def test_resource_meta_unique(self): if resource_json_definition is None: resource_json_definition = {} if resource_json_definitions is None: resource_json_definitions = {} service_context = ServiceContext(service_name='test', resource_json_definitions=resource_json_definitions, service_model=service_model, service_waiter_model=None) queue_cls = self.factory.load_from_definition(resource_name='Queue', single_resource_json_definition=resource_json_definition, service_context=service_context) queue1 = queue_cls() queue2 = queue_cls() assert queue1.meta == queue2.meta queue1.meta.data = {'id': 'foo'} queue2.meta.data = {'id': 'bar'} assert queue_cls.meta != queue1.meta assert queue1.meta != queue2.meta assert queue1.meta != 'bad-value'
boto3
positive
def bulk_merge_concepts(self, input_ids, concept_lists): """ bulk merge concepts from a list of input ids Args: input_ids: a list of input IDs concept_lists: a list of concept lists, each one corresponding to a listed input ID and filled with concepts to be added to that input Returns: an Input object Examples: >>> app.inputs.bulk_merge_concepts('id', [[('cat',True), ('dog',False)]]) """ if len(input_ids) != len(concept_lists): raise UserError('Argument error. please check') inputs = [] for (input_id, concept_list) in zip(input_ids, concept_lists): concepts = [] not_concepts = [] for (concept_id, value) in concept_list: if value is True: concepts.append(concept_id) else: not_concepts.append(concept_id) image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts) inputs.append(image) <DeepExtract> ret = self.api.patch_inputs(action='merge', inputs=inputs) objs = [self._to_obj(item) for item in ret['inputs']] res = objs </DeepExtract> return res
def bulk_merge_concepts(self, input_ids, concept_lists): """ bulk merge concepts from a list of input ids Args: input_ids: a list of input IDs concept_lists: a list of concept lists, each one corresponding to a listed input ID and filled with concepts to be added to that input Returns: an Input object Examples: >>> app.inputs.bulk_merge_concepts('id', [[('cat',True), ('dog',False)]]) """ if len(input_ids) != len(concept_lists): raise UserError('Argument error. please check') inputs = [] for (input_id, concept_list) in zip(input_ids, concept_lists): concepts = [] not_concepts = [] for (concept_id, value) in concept_list: if value is True: concepts.append(concept_id) else: not_concepts.append(concept_id) image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts) inputs.append(image) ret = self.api.patch_inputs(action='merge', inputs=inputs) objs = [self._to_obj(item) for item in ret['inputs']] res = objs return res
clarifai-python
positive
def series(self): """Generator of single series data (no dates are included).""" <DeepExtract> raise NotImplementedError </DeepExtract> if len(data): for c in range(self.count()): yield data[:, c] else: raise StopIteration
def series(self): """Generator of single series data (no dates are included).""" raise NotImplementedError if len(data): for c in range(self.count()): yield data[:, c] else: raise StopIteration
dynts
positive
def open_sub_channel(self, term): """ Opens a sub-channel of communication by executing a new shell on the SSH server using OpenSSH's `Master mode <http://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing>`_ capability (it spawns a new slave) and returns the resulting :class:`termio.Multiplex` instance. If a slave has already been opened for this purpose it will re-use the existing channel. """ term = int(term) global OPEN_SUBCHANNELS if term in OPEN_SUBCHANNELS and OPEN_SUBCHANNELS[term].isalive(): return OPEN_SUBCHANNELS[term] self.ssh_log.info('Opening SSH sub-channel', metadata={'term': term}) session = self.ws.session session_dir = self.ws.settings['session_dir'] session_path = os.path.join(session_dir, session) if not session_path: raise SSHMultiplexingException(_('SSH Plugin: Unable to open slave sub-channel.')) socket_path = self.loc_terms[term]['ssh_socket'] if not socket_path: raise SSHMultiplexingException(_('SSH Plugin: Unable to open slave sub-channel.')) <DeepExtract> user = self.current_user['upn'] users_dir = os.path.join(self.ws.settings['user_dir'], user) old_ssh_dir = os.path.join(users_dir, 'ssh') users_ssh_dir = os.path.join(users_dir, '.ssh') if os.path.exists(old_ssh_dir): if not os.path.exists(users_ssh_dir): self.ssh_log.info(_("Renaming %s's 'ssh' directory to '.ssh'." % user)) os.rename(old_ssh_dir, users_ssh_dir) else: self.ssh_log.warning(_("Both an 'ssh' and '.ssh' directory exist for user %s. Using the .ssh directory." % user)) users_ssh_dir = users_ssh_dir </DeepExtract> ssh_config_path = os.path.join(users_ssh_dir, 'config') if not os.path.exists(ssh_config_path): with open(ssh_config_path, 'w') as f: f.write('\n') ssh = which('ssh') ssh_command = "%s -x -S'%s' -F'%s' go_ssh_remote_cmd" % (ssh, socket_path, ssh_config_path) OPEN_SUBCHANNELS[term] = m = self.new_multiplex(ssh_command, '%s (sub)' % term) m.spawn(rows=100, cols=200) m.writeline(u'echo -e "\\033]0;Term %s sub-channel\\007"' % term) return m
def open_sub_channel(self, term): """ Opens a sub-channel of communication by executing a new shell on the SSH server using OpenSSH's `Master mode <http://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing>`_ capability (it spawns a new slave) and returns the resulting :class:`termio.Multiplex` instance. If a slave has already been opened for this purpose it will re-use the existing channel. """ term = int(term) global OPEN_SUBCHANNELS if term in OPEN_SUBCHANNELS and OPEN_SUBCHANNELS[term].isalive(): return OPEN_SUBCHANNELS[term] self.ssh_log.info('Opening SSH sub-channel', metadata={'term': term}) session = self.ws.session session_dir = self.ws.settings['session_dir'] session_path = os.path.join(session_dir, session) if not session_path: raise SSHMultiplexingException(_('SSH Plugin: Unable to open slave sub-channel.')) socket_path = self.loc_terms[term]['ssh_socket'] if not socket_path: raise SSHMultiplexingException(_('SSH Plugin: Unable to open slave sub-channel.')) user = self.current_user['upn'] users_dir = os.path.join(self.ws.settings['user_dir'], user) old_ssh_dir = os.path.join(users_dir, 'ssh') users_ssh_dir = os.path.join(users_dir, '.ssh') if os.path.exists(old_ssh_dir): if not os.path.exists(users_ssh_dir): self.ssh_log.info(_("Renaming %s's 'ssh' directory to '.ssh'." % user)) os.rename(old_ssh_dir, users_ssh_dir) else: self.ssh_log.warning(_("Both an 'ssh' and '.ssh' directory exist for user %s. Using the .ssh directory." % user)) users_ssh_dir = users_ssh_dir ssh_config_path = os.path.join(users_ssh_dir, 'config') if not os.path.exists(ssh_config_path): with open(ssh_config_path, 'w') as f: f.write('\n') ssh = which('ssh') ssh_command = "%s -x -S'%s' -F'%s' go_ssh_remote_cmd" % (ssh, socket_path, ssh_config_path) OPEN_SUBCHANNELS[term] = m = self.new_multiplex(ssh_command, '%s (sub)' % term) m.spawn(rows=100, cols=200) m.writeline(u'echo -e "\\033]0;Term %s sub-channel\\007"' % term) return m
django-gateone
positive
def test_set_select(self): node = brewery.nodes.SetSelectNode(field='type', value_set=['a']) <DeepExtract> node.inputs = [self.input] node.outputs = [self.output] </DeepExtract> <DeepExtract> if not pipe: pipe = self.input pipe.empty() pipe.fields = brewery.FieldList(['id', 'id2', 'q', 'type', 'class']) for i in range(1, 10): pipe.put([i, i, float(i) / 4, 'a', 'x']) pipe.put([i, i * 10, float(i) / 4, 'a', 'y']) pipe.put([i * 10, i * 100, float(i) / 4, 'b', 'x']) pipe.put([i * 100, i * 1000, float(i) / 4, 'c', 'y']) </DeepExtract> <DeepExtract> node.initialize() for output in node.outputs: output.fields = node.output_fields </DeepExtract> node.run() node.finalize() self.assertEqual(18, len(self.output.buffer))
def test_set_select(self): node = brewery.nodes.SetSelectNode(field='type', value_set=['a']) node.inputs = [self.input] node.outputs = [self.output] if not pipe: pipe = self.input pipe.empty() pipe.fields = brewery.FieldList(['id', 'id2', 'q', 'type', 'class']) for i in range(1, 10): pipe.put([i, i, float(i) / 4, 'a', 'x']) pipe.put([i, i * 10, float(i) / 4, 'a', 'y']) pipe.put([i * 10, i * 100, float(i) / 4, 'b', 'x']) pipe.put([i * 100, i * 1000, float(i) / 4, 'c', 'y']) node.initialize() for output in node.outputs: output.fields = node.output_fields node.run() node.finalize() self.assertEqual(18, len(self.output.buffer))
brewery
positive
def train_model(): """Trains mobilenet_v1.""" <DeepExtract> g = tf.Graph() with g.as_default(), tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): (inputs, labels) = imagenet_input(is_training=True) with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)): (logits, _) = mobilenet_v1.mobilenet_v1(inputs, is_training=True, depth_multiplier=FLAGS.depth_multiplier, num_classes=FLAGS.num_classes) tf.losses.softmax_cross_entropy(labels, logits) if FLAGS.quantize: tf.contrib.quantize.create_training_graph(quant_delay=get_quant_delay()) total_loss = tf.losses.get_total_loss(name='total_loss') num_epochs_per_decay = 2.5 imagenet_size = 1271167 decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay) learning_rate = tf.train.exponential_decay(get_learning_rate(), tf.train.get_or_create_global_step(), decay_steps, _LEARNING_RATE_DECAY_FACTOR, staircase=True) opt = tf.train.GradientDescentOptimizer(learning_rate) train_tensor = slim.learning.create_train_op(total_loss, optimizer=opt) slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses') slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training') (g, train_tensor) = (g, train_tensor) </DeepExtract> with g.as_default(): slim.learning.train(train_tensor, FLAGS.checkpoint_dir, is_chief=FLAGS.task == 0, master=FLAGS.master, log_every_n_steps=FLAGS.log_every_n_steps, graph=g, number_of_steps=FLAGS.number_of_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs, init_fn=get_checkpoint_init_fn(), global_step=tf.train.get_global_step())
def train_model(): """Trains mobilenet_v1.""" g = tf.Graph() with g.as_default(), tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)): (inputs, labels) = imagenet_input(is_training=True) with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)): (logits, _) = mobilenet_v1.mobilenet_v1(inputs, is_training=True, depth_multiplier=FLAGS.depth_multiplier, num_classes=FLAGS.num_classes) tf.losses.softmax_cross_entropy(labels, logits) if FLAGS.quantize: tf.contrib.quantize.create_training_graph(quant_delay=get_quant_delay()) total_loss = tf.losses.get_total_loss(name='total_loss') num_epochs_per_decay = 2.5 imagenet_size = 1271167 decay_steps = int(imagenet_size / FLAGS.batch_size * num_epochs_per_decay) learning_rate = tf.train.exponential_decay(get_learning_rate(), tf.train.get_or_create_global_step(), decay_steps, _LEARNING_RATE_DECAY_FACTOR, staircase=True) opt = tf.train.GradientDescentOptimizer(learning_rate) train_tensor = slim.learning.create_train_op(total_loss, optimizer=opt) slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses') slim.summaries.add_scalar_summary(learning_rate, 'learning_rate', 'training') (g, train_tensor) = (g, train_tensor) with g.as_default(): slim.learning.train(train_tensor, FLAGS.checkpoint_dir, is_chief=FLAGS.task == 0, master=FLAGS.master, log_every_n_steps=FLAGS.log_every_n_steps, graph=g, number_of_steps=FLAGS.number_of_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs, init_fn=get_checkpoint_init_fn(), global_step=tf.train.get_global_step())
epos
positive
def train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices=d2l.try_all_gpus()): """Train a model with mutiple GPUs (defined in Chapter 13). Defined in :numref:`sec_image_augmentation`""" (timer, num_batches) = (d2l.Timer(), len(train_iter)) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1], legend=['train loss', 'train acc', 'test acc']) net = nn.DataParallel(net, device_ids=devices).to(devices[0]) for epoch in range(num_epochs): metric = d2l.Accumulator(4) for (i, (features, labels)) in enumerate(train_iter): timer.start() <DeepExtract> if isinstance(features, list): features = [x.to(devices[0]) for x in features] else: features = features.to(devices[0]) labels = labels.to(devices[0]) net.train() trainer.zero_grad() pred = net(features) l = loss(pred, labels) l.sum().backward() trainer.step() train_loss_sum = l.sum() train_acc_sum = d2l.accuracy(pred, labels) (l, acc) = (train_loss_sum, train_acc_sum) </DeepExtract> metric.add(l, acc, labels.shape[0], labels.numel()) timer.stop() if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[2], metric[1] / metric[3], None)) test_acc = d2l.evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc)) print(f'loss {metric[0] / metric[2]:.3f}, train acc {metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec on {str(devices)}')
def train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices=d2l.try_all_gpus()): """Train a model with mutiple GPUs (defined in Chapter 13). Defined in :numref:`sec_image_augmentation`""" (timer, num_batches) = (d2l.Timer(), len(train_iter)) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1], legend=['train loss', 'train acc', 'test acc']) net = nn.DataParallel(net, device_ids=devices).to(devices[0]) for epoch in range(num_epochs): metric = d2l.Accumulator(4) for (i, (features, labels)) in enumerate(train_iter): timer.start() if isinstance(features, list): features = [x.to(devices[0]) for x in features] else: features = features.to(devices[0]) labels = labels.to(devices[0]) net.train() trainer.zero_grad() pred = net(features) l = loss(pred, labels) l.sum().backward() trainer.step() train_loss_sum = l.sum() train_acc_sum = d2l.accuracy(pred, labels) (l, acc) = (train_loss_sum, train_acc_sum) metric.add(l, acc, labels.shape[0], labels.numel()) timer.stop() if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[2], metric[1] / metric[3], None)) test_acc = d2l.evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc)) print(f'loss {metric[0] / metric[2]:.3f}, train acc {metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}') print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec on {str(devices)}')
d2l-vn
positive
def __init__(self, keyword, comment): self.starttime = time.time() self.errors = 0 if keyword == '': self.oOutput = None else: self.oOutput = cOutput('%s-%s-%s.log' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], keyword, self.FormatTime())) <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Start', eol) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Start', eol]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Start', eol]] else: self.LineSub('Start', eol) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]]] else: self.LineSub('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Comment', comment) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Comment', comment]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Comment', comment]] else: self.LineSub('Comment', comment) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Args', repr(sys.argv)) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Args', repr(sys.argv)]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Args', repr(sys.argv)]] else: self.LineSub('Args', repr(sys.argv)) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Version', __version__) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Version', __version__]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Version', __version__]] else: self.LineSub('Version', __version__) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Python', repr(sys.version_info)) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Python', repr(sys.version_info)]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Python', repr(sys.version_info)]] else: self.LineSub('Python', repr(sys.version_info)) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('Platform', sys.platform) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Platform', sys.platform]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Platform', sys.platform]] else: self.LineSub('Platform', sys.platform) </DeepExtract> <DeepExtract> if self.head: if self.headCounter < 10: self.LineSub('CWD', repr(os.getcwd())) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['CWD', repr(os.getcwd())]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['CWD', repr(os.getcwd())]] else: self.LineSub('CWD', repr(os.getcwd())) </DeepExtract>
def __init__(self, keyword, comment): self.starttime = time.time() self.errors = 0 if keyword == '': self.oOutput = None else: self.oOutput = cOutput('%s-%s-%s.log' % (os.path.splitext(os.path.basename(sys.argv[0]))[0], keyword, self.FormatTime())) if self.head: if self.headCounter < 10: self.LineSub('Start', eol) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Start', eol]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Start', eol]] else: self.LineSub('Start', eol) if self.head: if self.headCounter < 10: self.LineSub('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]]] else: self.LineSub('UTC', '%04d%02d%02d-%02d%02d%02d' % time.gmtime(time.time())[0:6]) if self.head: if self.headCounter < 10: self.LineSub('Comment', comment) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Comment', comment]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Comment', comment]] else: self.LineSub('Comment', comment) if self.head: if self.headCounter < 10: self.LineSub('Args', repr(sys.argv)) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Args', repr(sys.argv)]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Args', repr(sys.argv)]] else: self.LineSub('Args', repr(sys.argv)) if self.head: if self.headCounter < 10: self.LineSub('Version', __version__) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Version', __version__]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Version', __version__]] else: self.LineSub('Version', __version__) if self.head: if self.headCounter < 10: self.LineSub('Python', repr(sys.version_info)) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Python', repr(sys.version_info)]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Python', repr(sys.version_info)]] else: self.LineSub('Python', repr(sys.version_info)) if self.head: if self.headCounter < 10: self.LineSub('Platform', sys.platform) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Platform', sys.platform]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['Platform', sys.platform]] else: self.LineSub('Platform', sys.platform) if self.head: if self.headCounter < 10: self.LineSub('CWD', repr(os.getcwd())) elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['CWD', repr(os.getcwd())]] self.headCounter += 1 elif self.tail: self.tailQueue = self.tailQueue[-9:] + [['CWD', repr(os.getcwd())]] else: self.LineSub('CWD', repr(os.getcwd())) </DeepExtract>
Beta
positive
def get_node_list(config, label_selector=None): <DeepExtract> if config is None: k8sconfig.load_incluster_config() k8s_api = k8sclient.CoreV1Api() else: client = k8sclient.ApiClient(configuration=config) k8s_api = k8sclient.CoreV1Api(api_client=client) </DeepExtract> if label_selector: nodes = k8s_api.list_node(label_selector=label_selector).to_dict() else: nodes = k8s_api.list_node().to_dict() return nodes['items']
def get_node_list(config, label_selector=None): if config is None: k8sconfig.load_incluster_config() k8s_api = k8sclient.CoreV1Api() else: client = k8sclient.ApiClient(configuration=config) k8s_api = k8sclient.CoreV1Api(api_client=client) if label_selector: nodes = k8s_api.list_node(label_selector=label_selector).to_dict() else: nodes = k8s_api.list_node().to_dict() return nodes['items']
CPU-Manager-for-Kubernetes
positive
def test_unbounded_stop(self): <DeepExtract> self.spec = mod.Specifications(spec_type='incl_rec', specs_strings=['5:'], header=None, infile_item_count=100) </DeepExtract> assert len(self.spec.specs_final) == 1 assert self.flatten_spec(0) == (5, 100, 1) <DeepExtract> self.spec = mod.Specifications(spec_type='incl_rec', specs_strings=['5::'], header=None, infile_item_count=100) </DeepExtract> assert len(self.spec.specs_final) == 1 assert self.flatten_spec(0) == (5, 100, 1)
def test_unbounded_stop(self): self.spec = mod.Specifications(spec_type='incl_rec', specs_strings=['5:'], header=None, infile_item_count=100) assert len(self.spec.specs_final) == 1 assert self.flatten_spec(0) == (5, 100, 1) self.spec = mod.Specifications(spec_type='incl_rec', specs_strings=['5::'], header=None, infile_item_count=100) assert len(self.spec.specs_final) == 1 assert self.flatten_spec(0) == (5, 100, 1)
DataGristle
positive
def onepredict(self): prefix = 'onetrain' if self.test else '' self.prev_name = None ds_cls = ValDataset if not self.test else SequentialDataset val_index = [i for i in range(4)] val_dataset = ds_cls(self.ds, val_index, stage='test', config=self.config) val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.test_batch_size, num_workers=self.num_workers, drop_last=False) <DeepExtract> with warnings.catch_warnings(): warnings.simplefilter('ignore', SourceChangeWarning) model_path = self.config.pretrain_model_path model = None if '_checkpoint' in model_path: checkpoint = torch.load(os.path.join(self.config.results_dir, 'weights', self.config.folder, 'onetrain_checkpoint.pth')) if self.config.folder == 'resnet34_': model = resnet_unet.ResnetUNet(num_classes=1, num_channels=5) elif self.config.folder == 'denseunet_': model = dense_unet.DenseUNet(in_channels=5, n_classes=1) model = torch.nn.DataParallel(model).cuda() pretrained_dict = checkpoint['state_dict'] model_dict = model.state_dict() model_dict.update(pretrained_dict) else: model = torch.load(model_path) model.eval() model = model </DeepExtract> pbar = val_dl if self.config.dbg else tqdm.tqdm(val_dl, total=len(val_dl)) for data in pbar: self.show_mask = 'mask' in data and self.show_mask if 'mask' not in data: self.need_dice = False <DeepExtract> dsize = data['image'].size() (xinsidx, xineidx, xcpsidx, xcpeidx, xoutsidx, xouteidx) = self.index_in_copy_out(dsize[2]) (yinsidx, yineidx, ycpsidx, ycpeidx, youtsidx, youteidx) = self.index_in_copy_out(dsize[3]) predicted = np.zeros((dsize[0], 1, dsize[2], dsize[3])) for i in range(len(xinsidx)): for j in range(len(yinsidx)): samples = torch.autograd.Variable(data['image'][:, :, xinsidx[i]:xineidx[i], yinsidx[j]:yineidx[j]], volatile=True).cuda() prediction = predict(model, samples, flips=self.flips) predicted[:, :, xoutsidx[i]:xouteidx[i], youtsidx[j]:youteidx[j]] = prediction[:, :, xcpsidx[i]:xcpeidx[i], ycpsidx[j]:ycpeidx[j]] predicted = predicted </DeepExtract> <DeepExtract> raise NotImplementedError </DeepExtract> if not self.config.dbg and self.need_dice: pbar.set_postfix(dice='{:.5f}'.format(np.mean(self.dice))) <DeepExtract> self.full_pred = self.cut_border(self.full_pred) if self.full_image is not None: self.full_image = self.cut_border(self.full_image) if self.full_mask is not None: self.full_mask = self.cut_border(self.full_mask) if np.any(self.full_pred > 0.5) or np.any(self.full_mask >= 1): d = 1 - dice(self.full_pred.flatten() > 0.5, self.full_mask.flatten() >= 1) self.dice.append(d) if self.config.dbg: print(self.prev_name, ' dice: ', d) else: return if self.config.dbg: self.visualize(show_light=True) if self.config.save_images: self.save(self.prev_name, prefix=prefix) </DeepExtract> if self.need_dice: print(np.mean(self.dice))
def onepredict(self): prefix = 'onetrain' if self.test else '' self.prev_name = None ds_cls = ValDataset if not self.test else SequentialDataset val_index = [i for i in range(4)] val_dataset = ds_cls(self.ds, val_index, stage='test', config=self.config) val_dl = PytorchDataLoader(val_dataset, batch_size=self.config.test_batch_size, num_workers=self.num_workers, drop_last=False) with warnings.catch_warnings(): warnings.simplefilter('ignore', SourceChangeWarning) model_path = self.config.pretrain_model_path model = None if '_checkpoint' in model_path: checkpoint = torch.load(os.path.join(self.config.results_dir, 'weights', self.config.folder, 'onetrain_checkpoint.pth')) if self.config.folder == 'resnet34_': model = resnet_unet.ResnetUNet(num_classes=1, num_channels=5) elif self.config.folder == 'denseunet_': model = dense_unet.DenseUNet(in_channels=5, n_classes=1) model = torch.nn.DataParallel(model).cuda() pretrained_dict = checkpoint['state_dict'] model_dict = model.state_dict() model_dict.update(pretrained_dict) else: model = torch.load(model_path) model.eval() model = model pbar = val_dl if self.config.dbg else tqdm.tqdm(val_dl, total=len(val_dl)) for data in pbar: self.show_mask = 'mask' in data and self.show_mask if 'mask' not in data: self.need_dice = False dsize = data['image'].size() (xinsidx, xineidx, xcpsidx, xcpeidx, xoutsidx, xouteidx) = self.index_in_copy_out(dsize[2]) (yinsidx, yineidx, ycpsidx, ycpeidx, youtsidx, youteidx) = self.index_in_copy_out(dsize[3]) predicted = np.zeros((dsize[0], 1, dsize[2], dsize[3])) for i in range(len(xinsidx)): for j in range(len(yinsidx)): samples = torch.autograd.Variable(data['image'][:, :, xinsidx[i]:xineidx[i], yinsidx[j]:yineidx[j]], volatile=True).cuda() prediction = predict(model, samples, flips=self.flips) predicted[:, :, xoutsidx[i]:xouteidx[i], youtsidx[j]:youteidx[j]] = prediction[:, :, xcpsidx[i]:xcpeidx[i], ycpsidx[j]:ycpeidx[j]] predicted = predicted raise NotImplementedError if not self.config.dbg and self.need_dice: pbar.set_postfix(dice='{:.5f}'.format(np.mean(self.dice))) self.full_pred = self.cut_border(self.full_pred) if self.full_image is not None: self.full_image = self.cut_border(self.full_image) if self.full_mask is not None: self.full_mask = self.cut_border(self.full_mask) if np.any(self.full_pred > 0.5) or np.any(self.full_mask >= 1): d = 1 - dice(self.full_pred.flatten() > 0.5, self.full_mask.flatten() >= 1) self.dice.append(d) if self.config.dbg: print(self.prev_name, ' dice: ', d) else: return if self.config.dbg: self.visualize(show_light=True) if self.config.save_images: self.save(self.prev_name, prefix=prefix) if self.need_dice: print(np.mean(self.dice))
Danesfield
positive
def __call__(self): <DeepExtract> self.tile_type = self.request.form.get('tile-type') self.tile_id = self.request.form.get('tile-id') self.uuid = self.request.form.get('uuid') </DeepExtract> return self.render()
def __call__(self): self.tile_type = self.request.form.get('tile-type') self.tile_id = self.request.form.get('tile-id') self.uuid = self.request.form.get('uuid') return self.render()
collective.cover
positive
def test_perspective_primary_draw_metadata(display): <DeepExtract> perspective = display.perspectives[1] </DeepExtract> feed = Feed(url='feed url', title='feed title', description='feed description', link='feed link', last_build_date='feed last_build_date', copyright='feed copyright', episodes=[]) episode = Episode(feed, title='episode title', description='episode description', link='episode link', pubdate='episode pubdate', copyright='episode copyright', enclosure='episode enclosure') display.database.replace_feed(feed) display.database.replace_episode(feed, episode) perspective._draw_metadata(perspective._metadata_window) perspective._draw_metadata(perspective._metadata_window)
def test_perspective_primary_draw_metadata(display): perspective = display.perspectives[1] feed = Feed(url='feed url', title='feed title', description='feed description', link='feed link', last_build_date='feed last_build_date', copyright='feed copyright', episodes=[]) episode = Episode(feed, title='episode title', description='episode description', link='episode link', pubdate='episode pubdate', copyright='episode copyright', enclosure='episode enclosure') display.database.replace_feed(feed) display.database.replace_episode(feed, episode) perspective._draw_metadata(perspective._metadata_window) perspective._draw_metadata(perspective._metadata_window)
castero
positive
def delete_config(self, conf_name, category): """ Deletes a configuration from file system. """ <DeepExtract> category_dir = '' if category == 'Default' else '/' + category log_path = os.path.join(cfclient.config_path, 'log' + category_dir) </DeepExtract> conf_path = os.path.join(log_path, conf_name) + '.json' if not os.path.exists(conf_path): conf_path = os.path.join(log_path, conf_name[0].lower() + conf_name[1:] + '.json') if not os.path.exists(conf_path): logger.warning('Failed to find log-config %s' % conf_path) return os.remove(conf_path) for conf in self._log_configs[category]: if conf.name == conf_name: self._log_configs[category].remove(conf)
def delete_config(self, conf_name, category): """ Deletes a configuration from file system. """ category_dir = '' if category == 'Default' else '/' + category log_path = os.path.join(cfclient.config_path, 'log' + category_dir) conf_path = os.path.join(log_path, conf_name) + '.json' if not os.path.exists(conf_path): conf_path = os.path.join(log_path, conf_name[0].lower() + conf_name[1:] + '.json') if not os.path.exists(conf_path): logger.warning('Failed to find log-config %s' % conf_path) return os.remove(conf_path) for conf in self._log_configs[category]: if conf.name == conf_name: self._log_configs[category].remove(conf)
crazyflie-clients-python
positive
def download(self, **kwargs): if 'json_output' in kwargs and kwargs['json_output']: json_output.output(self) elif 'info_only' in kwargs and kwargs['info_only']: if 'stream_id' in kwargs and kwargs['stream_id']: stream_id = kwargs['stream_id'] if 'index' not in kwargs: <DeepExtract> maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if stream_id: print('stream:') self.p_stream(stream_id) elif stream_id is None: print('stream: # Best quality') stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream(stream_id) elif stream_id == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) </DeepExtract> else: <DeepExtract> if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() </DeepExtract> elif 'index' not in kwargs: <DeepExtract> maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if []: print('stream:') self.p_stream([]) elif [] is None: print('stream: # Best quality') [] = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream([]) elif [] == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) </DeepExtract> else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] <DeepExtract> if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() </DeepExtract> else: if 'stream_id' in kwargs and kwargs['stream_id']: stream_id = kwargs['stream_id'] else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if 'index' not in kwargs: <DeepExtract> maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if stream_id: print('stream:') self.p_stream(stream_id) elif stream_id is None: print('stream: # Best quality') stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream(stream_id) elif stream_id == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) </DeepExtract> else: <DeepExtract> if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() </DeepExtract> if stream_id in self.streams: urls = self.streams[stream_id]['src'] ext = self.streams[stream_id]['container'] total_size = self.streams[stream_id]['size'] else: urls = self.dash_streams[stream_id]['src'] ext = self.dash_streams[stream_id]['container'] total_size = self.dash_streams[stream_id]['size'] if not urls: log.wtf('[Failed] Cannot extract video source.') download_urls(urls, self.title, ext, total_size, output_dir=kwargs['output_dir'], merge=kwargs['merge'], av=stream_id in self.dash_streams) if 'caption' not in kwargs or not kwargs['caption']: print('Skipping captions.') return for lang in self.caption_tracks: filename = '%s.%s.srt' % (get_filename(self.title), lang) print('Saving %s ... ' % filename, end='', flush=True) srt = self.caption_tracks[lang] with open(os.path.join(kwargs['output_dir'], filename), 'w', encoding='utf-8') as x: x.write(srt) print('Done.') <DeepExtract> self.url = None self.title = None self.vid = None self.streams = {} self.streams_sorted = [] if args: self.url = args[0] </DeepExtract>
def download(self, **kwargs): if 'json_output' in kwargs and kwargs['json_output']: json_output.output(self) elif 'info_only' in kwargs and kwargs['info_only']: if 'stream_id' in kwargs and kwargs['stream_id']: stream_id = kwargs['stream_id'] if 'index' not in kwargs: maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if stream_id: print('stream:') self.p_stream(stream_id) elif stream_id is None: print('stream: # Best quality') stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream(stream_id) elif stream_id == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) else: if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() elif 'index' not in kwargs: maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if []: print('stream:') self.p_stream([]) elif [] is None: print('stream: # Best quality') [] = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream([]) elif [] == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() else: if 'stream_id' in kwargs and kwargs['stream_id']: stream_id = kwargs['stream_id'] else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if 'index' not in kwargs: maybe_print('site: %s' % self.__class__.name) maybe_print('title: %s' % self.title) if stream_id: print('stream:') self.p_stream(stream_id) elif stream_id is None: print('stream: # Best quality') stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_stream(stream_id) elif stream_id == []: print('streams: # Available quality and codecs') if self.dash_streams: print(' [ DASH ] %s' % ('_' * 36)) itags = sorted(self.dash_streams, key=lambda i: -self.dash_streams[i]['size']) for stream in itags: self.p_stream(stream) print(' [ DEFAULT ] %s' % ('_' * 33)) for stream in self.streams_sorted: self.p_stream(stream['id'] if 'id' in stream else stream['itag']) if self.audiolang: print('audio-languages:') for i in self.audiolang: print(' - lang: {}'.format(i['lang'])) print(' download-url: {}\n'.format(i['url'])) else: if stream_id in self.streams: stream = self.streams[stream_id] else: stream = self.dash_streams[stream_id] maybe_print(' - title: %s' % self.title) print(' size: %s MiB (%s bytes)' % (round(stream['size'] / 1048576, 1), stream['size'])) print(' url: %s' % self.url) print() if stream_id in self.streams: urls = self.streams[stream_id]['src'] ext = self.streams[stream_id]['container'] total_size = self.streams[stream_id]['size'] else: urls = self.dash_streams[stream_id]['src'] ext = self.dash_streams[stream_id]['container'] total_size = self.dash_streams[stream_id]['size'] if not urls: log.wtf('[Failed] Cannot extract video source.') download_urls(urls, self.title, ext, total_size, output_dir=kwargs['output_dir'], merge=kwargs['merge'], av=stream_id in self.dash_streams) if 'caption' not in kwargs or not kwargs['caption']: print('Skipping captions.') return for lang in self.caption_tracks: filename = '%s.%s.srt' % (get_filename(self.title), lang) print('Saving %s ... ' % filename, end='', flush=True) srt = self.caption_tracks[lang] with open(os.path.join(kwargs['output_dir'], filename), 'w', encoding='utf-8') as x: x.write(srt) print('Done.') self.url = None self.title = None self.vid = None self.streams = {} self.streams_sorted = [] if args: self.url = args[0] </DeepExtract>
acmpv
positive
def difference(self, other): """ :param other: an IP set. :return: the difference between this IP set and another as a new IP set (all IP addresses and subnets that are in this IP set but not found in the other.) """ result_ranges = [] result_cidrs = {} own_nets = sorted(self._cidrs) other_nets = sorted(other._cidrs) own_idx = 0 other_idx = 0 own_len = len(own_nets) other_len = len(other_nets) while own_idx < own_len and other_idx < other_len: own_cur = own_nets[own_idx] other_cur = other_nets[other_idx] if own_cur == other_cur: own_idx += 1 other_idx += 1 elif own_cur in other_cur: own_idx += 1 elif other_cur in own_cur: <DeepExtract> version = own_cur._module.version subnet = other_nets[other_idx] if subnet.first > own_cur.first: result_ranges.append((version, own_cur.first, subnet.first - 1)) other_idx += 1 prev_subnet = subnet while other_idx < len(other_nets): cur_subnet = other_nets[other_idx] if cur_subnet not in own_cur: break if prev_subnet.last + 1 == cur_subnet.first: pass else: result_ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1)) other_idx += 1 prev_subnet = cur_subnet first = prev_subnet.last + 1 last = own_cur.last if first <= last: result_ranges.append((version, first, last)) other_idx = other_idx </DeepExtract> own_idx += 1 elif own_cur < other_cur: result_cidrs[own_cur] = True own_idx += 1 else: other_idx += 1 while own_idx < own_len: result_cidrs[own_nets[own_idx]] = True own_idx += 1 for (start, stop) in _iter_merged_ranges(result_ranges): for cidr in iprange_to_cidrs(start, stop): result_cidrs[cidr] = True result = IPSet() result._cidrs = result_cidrs return result
def difference(self, other): """ :param other: an IP set. :return: the difference between this IP set and another as a new IP set (all IP addresses and subnets that are in this IP set but not found in the other.) """ result_ranges = [] result_cidrs = {} own_nets = sorted(self._cidrs) other_nets = sorted(other._cidrs) own_idx = 0 other_idx = 0 own_len = len(own_nets) other_len = len(other_nets) while own_idx < own_len and other_idx < other_len: own_cur = own_nets[own_idx] other_cur = other_nets[other_idx] if own_cur == other_cur: own_idx += 1 other_idx += 1 elif own_cur in other_cur: own_idx += 1 elif other_cur in own_cur: version = own_cur._module.version subnet = other_nets[other_idx] if subnet.first > own_cur.first: result_ranges.append((version, own_cur.first, subnet.first - 1)) other_idx += 1 prev_subnet = subnet while other_idx < len(other_nets): cur_subnet = other_nets[other_idx] if cur_subnet not in own_cur: break if prev_subnet.last + 1 == cur_subnet.first: pass else: result_ranges.append((version, prev_subnet.last + 1, cur_subnet.first - 1)) other_idx += 1 prev_subnet = cur_subnet first = prev_subnet.last + 1 last = own_cur.last if first <= last: result_ranges.append((version, first, last)) other_idx = other_idx own_idx += 1 elif own_cur < other_cur: result_cidrs[own_cur] = True own_idx += 1 else: other_idx += 1 while own_idx < own_len: result_cidrs[own_nets[own_idx]] = True own_idx += 1 for (start, stop) in _iter_merged_ranges(result_ranges): for cidr in iprange_to_cidrs(start, stop): result_cidrs[cidr] = True result = IPSet() result._cidrs = result_cidrs return result
AdvancedCloudFormation
positive
def compute(self, *x): """ Return network output. """ <DeepExtract> if not hasattr(self, '_compute'): if isinstance(self._test_outputs, dict): (self._output_keys, out_tensors) = map(list, zip(*self._test_outputs.items())) if self._test_output: self._output_keys.insert(0, 'cost') out_tensors.insert(0, self._test_output) elif isinstance(self._test_outputs, list) and self._test_outputs: out_tensors = self._test_outputs if self._test_output: out_tensors.insert(0, self._test_output) else: out_tensors = self._test_output self._compute = theano.function(filter(lambda x: x not in self.target_variables, self.input_variables), out_tensors, updates=self.updates, allow_input_downcast=True) </DeepExtract> outs = self._compute(*x) if self._output_keys: return MapDict(dict(zip(self._output_keys, outs))) else: return outs
def compute(self, *x): """ Return network output. """ if not hasattr(self, '_compute'): if isinstance(self._test_outputs, dict): (self._output_keys, out_tensors) = map(list, zip(*self._test_outputs.items())) if self._test_output: self._output_keys.insert(0, 'cost') out_tensors.insert(0, self._test_output) elif isinstance(self._test_outputs, list) and self._test_outputs: out_tensors = self._test_outputs if self._test_output: out_tensors.insert(0, self._test_output) else: out_tensors = self._test_output self._compute = theano.function(filter(lambda x: x not in self.target_variables, self.input_variables), out_tensors, updates=self.updates, allow_input_downcast=True) outs = self._compute(*x) if self._output_keys: return MapDict(dict(zip(self._output_keys, outs))) else: return outs
deepy
positive
def update_or_create(self): log('ModuleExecutor.update_or_create()') if not self.managed_device_exists(): self.module_result['changed'] = True if not self.module.check_mode: <DeepExtract> log('ModuleExecutor.create_managed_device()') post_data = {'managed_device': self.configured_managed_device} log('post data: %s' % post_data) result = self.fetcher.post(post_data=post_data, resource='managed_device', action='add_device') log('result of post: %s' % result) if result['http_response_data']['status'] == 200: if result.get('nitro_errorcode') is not None: if result['nitro_errorcode'] != 0: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity')) elif 400 <= result['http_response_data']['status'] <= 599: raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity')) else: msg = 'Did not get nitro errorcode and http status was not 200 or 4xx (%s)' % result['http_response_data']['status'] self.module.fail_json(msg=msg, **self.module_result) </DeepExtract> elif not self.managed_device_identical(): self.module_result['changed'] = True if not self.module.check_mode: <DeepExtract> log('ModuleExecutor.update_managed_device()') put_payload = self.configured_managed_device put_data = {'managed_device': put_payload} log('request put data: %s' % put_data) id = self.fetched_managed_device['id'] result = self.fetcher.put(put_data=put_data, resource='managed_device', id=id) log('result of put: %s' % result) if result['http_response_data']['status'] == 200: if result.get('nitro_errorcode') is not None: if result['nitro_errorcode'] != 0: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity')) elif 400 <= result['http_response_data']['status'] <= 599: raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity')) else: msg = 'Did not get nitro errorcode and http status was not 200 or 4xx (%s)' % result['http_response_data']['status'] </DeepExtract> <DeepExtract> log('ModuleExecutor.fetch_managed_device()') self.fetched_managed_device = {} result = self.fetcher.get('managed_device') log('get result %s' % result) for managed_device in result['data']['managed_device']: match = True for get_id_attribute in self.attribute_config['managed_device']['get_id_attributes']: fetched_value = managed_device.get(get_id_attribute) configured_value = self.configured_managed_device.get(get_id_attribute) if configured_value is None: continue if configured_value != fetched_value: match = False if match: self.fetched_managed_device = managed_device log('fetched managed device %s' % self.fetched_managed_device) </DeepExtract> self.module_result['managed_device'] = self.fetched_managed_device
def update_or_create(self): log('ModuleExecutor.update_or_create()') if not self.managed_device_exists(): self.module_result['changed'] = True if not self.module.check_mode: log('ModuleExecutor.create_managed_device()') post_data = {'managed_device': self.configured_managed_device} log('post data: %s' % post_data) result = self.fetcher.post(post_data=post_data, resource='managed_device', action='add_device') log('result of post: %s' % result) if result['http_response_data']['status'] == 200: if result.get('nitro_errorcode') is not None: if result['nitro_errorcode'] != 0: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity')) elif 400 <= result['http_response_data']['status'] <= 599: raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity')) else: msg = 'Did not get nitro errorcode and http status was not 200 or 4xx (%s)' % result['http_response_data']['status'] self.module.fail_json(msg=msg, **self.module_result) elif not self.managed_device_identical(): self.module_result['changed'] = True if not self.module.check_mode: log('ModuleExecutor.update_managed_device()') put_payload = self.configured_managed_device put_data = {'managed_device': put_payload} log('request put data: %s' % put_data) id = self.fetched_managed_device['id'] result = self.fetcher.put(put_data=put_data, resource='managed_device', id=id) log('result of put: %s' % result) if result['http_response_data']['status'] == 200: if result.get('nitro_errorcode') is not None: if result['nitro_errorcode'] != 0: raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity')) elif 400 <= result['http_response_data']['status'] <= 599: raise NitroException(errorcode=result.get('nitro_errorcode'), message=result.get('nitro_message'), severity=result.get('nitro_severity')) else: msg = 'Did not get nitro errorcode and http status was not 200 or 4xx (%s)' % result['http_response_data']['status'] log('ModuleExecutor.fetch_managed_device()') self.fetched_managed_device = {} result = self.fetcher.get('managed_device') log('get result %s' % result) for managed_device in result['data']['managed_device']: match = True for get_id_attribute in self.attribute_config['managed_device']['get_id_attributes']: fetched_value = managed_device.get(get_id_attribute) configured_value = self.configured_managed_device.get(get_id_attribute) if configured_value is None: continue if configured_value != fetched_value: match = False if match: self.fetched_managed_device = managed_device log('fetched managed device %s' % self.fetched_managed_device) self.module_result['managed_device'] = self.fetched_managed_device
citrix-adc-ansible-modules
positive
def forward_train(self, img, img_metas, gt_masks, **kwargs): """Forward training process and loss computing Args: img (list[Tensor]): input images img_metas(dict) : image meta-info gt_masks(np.ndarray): ground-truth label for training Returns: dict: losses for training data """ losses = dict() <DeepExtract> x = self.backbone(img) if self.with_neck: x = self.neck(x) x = x </DeepExtract> mask_pred = self.mask_head(x) mask_targets = self.mask_head.get_target(gt_masks) loss_mask = self.mask_head.loss(mask_pred, mask_targets) losses.update(loss_mask) return losses
def forward_train(self, img, img_metas, gt_masks, **kwargs): """Forward training process and loss computing Args: img (list[Tensor]): input images img_metas(dict) : image meta-info gt_masks(np.ndarray): ground-truth label for training Returns: dict: losses for training data """ losses = dict() x = self.backbone(img) if self.with_neck: x = self.neck(x) x = x mask_pred = self.mask_head(x) mask_targets = self.mask_head.get_target(gt_masks) loss_mask = self.mask_head.loss(mask_pred, mask_targets) losses.update(loss_mask) return losses
DAVAR-Lab-OCR
positive
def insertBefore(self, child, beforeChild): """ insertBefore - Inserts a child before #beforeChild @param child <AdvancedTag/str> - Child block to insert @param beforeChild <AdvancedTag/str> - Child block to insert before. if None, will be appended @return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference. @raises ValueError - If #beforeChild is defined and is not a child of this node """ if beforeChild is None: return self.appendBlock(child) <DeepExtract> isChildTag = issubclass(child.__class__, AdvancedTag) </DeepExtract> myBlocks = self.blocks myChildren = self.children try: blocksIdx = myBlocks.index(beforeChild) if isChildTag: childrenIdx = myChildren.index(beforeChild) except ValueError: raise ValueError('Provided "beforeChild" is not a child of element, cannot insert.') self.blocks = myBlocks[:blocksIdx] + [child] + myBlocks[blocksIdx:] if isChildTag: self.children = myChildren[:childrenIdx] + [child] + myChildren[childrenIdx:] return child
def insertBefore(self, child, beforeChild): """ insertBefore - Inserts a child before #beforeChild @param child <AdvancedTag/str> - Child block to insert @param beforeChild <AdvancedTag/str> - Child block to insert before. if None, will be appended @return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference. @raises ValueError - If #beforeChild is defined and is not a child of this node """ if beforeChild is None: return self.appendBlock(child) isChildTag = issubclass(child.__class__, AdvancedTag) myBlocks = self.blocks myChildren = self.children try: blocksIdx = myBlocks.index(beforeChild) if isChildTag: childrenIdx = myChildren.index(beforeChild) except ValueError: raise ValueError('Provided "beforeChild" is not a child of element, cannot insert.') self.blocks = myBlocks[:blocksIdx] + [child] + myBlocks[blocksIdx:] if isChildTag: self.children = myChildren[:childrenIdx] + [child] + myChildren[childrenIdx:] return child
AdvancedHTMLParser
positive
def test_ctor_no_title(self): child = DummySchemaNode(None, name='fred') <DeepExtract> from colander import Invalid exc = Invalid(None, child, val) None = exc </DeepExtract> self.assertEqual(node.typ, None) self.assertEqual(node.children, [child]) self.assertEqual(node.validator, 1) self.assertEqual(node.default, 2) self.assertEqual(node.missing, 'missing') self.assertEqual(node.name, 'name_a') self.assertEqual(node.title, 'Name A')
def test_ctor_no_title(self): child = DummySchemaNode(None, name='fred') from colander import Invalid exc = Invalid(None, child, val) None = exc self.assertEqual(node.typ, None) self.assertEqual(node.children, [child]) self.assertEqual(node.validator, 1) self.assertEqual(node.default, 2) self.assertEqual(node.missing, 'missing') self.assertEqual(node.name, 'name_a') self.assertEqual(node.title, 'Name A')
colander
positive