before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def check(seqdataentry): print(seqdataentry['contig']['name']) <DeepExtract> seqdataentry['reference']['dna'] = seqdataentry['reference']['dna'].upper() seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'].upper() for i in range(len(seqdataentry['reference']['dna'])): if seqdataentry['reference']['dna'][i:i + 3] == 'ATG': seqdataentry['reference']['dna'] = seqdataentry['reference']['dna'][i:len(seqdataentry['reference']['dna'])] break for i in range(len(seqdataentry['contig']['dna'])): if seqdataentry['reference']['dna'][0:10] == seqdataentry['contig']['dna'][i:i + 10]: seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'][i:len(seqdataentry['contig']['dna'])] if seqdataentry['reference']['dna'][len(seqdataentry['reference']['dna']) - 11:len(seqdataentry['reference']['dna']) - 1] == seqdataentry['contig']['dna'][i:i + 10]: seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'][0:i + 10] dna = (seqdataentry['reference']['dna'], seqdataentry['contig']['dna']) </DeepExtract> <DeepExtract> dna[0] = dna[0].upper() dna[1] = dna[1].upper() counter = 0 for i in range(len(dna[1])): if i % 3 == 0 or i == 0: if dna[0][i:i + 3] == dna[1][i:i + 3]: counter = 0 pass elif i + 3 > len(dna[1]): pass elif counter <= 5: print('DNA pos %d, %s mutated to %s --- %s%d%s' % (i + 3 - 1, dna[0][i:i + 3], dna[1][i:i + 3], dna.translate(dna[0][i:i + 3]), int((i + 3) / 3), dna.translate(dna[1][i:i + 3]))) counter += 1 else: print('over %d consecutive mismatches, rest of construct is likely out of frame' % (counter - 1)) break print('\n') </DeepExtract>
def check(seqdataentry): print(seqdataentry['contig']['name']) seqdataentry['reference']['dna'] = seqdataentry['reference']['dna'].upper() seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'].upper() for i in range(len(seqdataentry['reference']['dna'])): if seqdataentry['reference']['dna'][i:i + 3] == 'ATG': seqdataentry['reference']['dna'] = seqdataentry['reference']['dna'][i:len(seqdataentry['reference']['dna'])] break for i in range(len(seqdataentry['contig']['dna'])): if seqdataentry['reference']['dna'][0:10] == seqdataentry['contig']['dna'][i:i + 10]: seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'][i:len(seqdataentry['contig']['dna'])] if seqdataentry['reference']['dna'][len(seqdataentry['reference']['dna']) - 11:len(seqdataentry['reference']['dna']) - 1] == seqdataentry['contig']['dna'][i:i + 10]: seqdataentry['contig']['dna'] = seqdataentry['contig']['dna'][0:i + 10] dna = (seqdataentry['reference']['dna'], seqdataentry['contig']['dna']) dna[0] = dna[0].upper() dna[1] = dna[1].upper() counter = 0 for i in range(len(dna[1])): if i % 3 == 0 or i == 0: if dna[0][i:i + 3] == dna[1][i:i + 3]: counter = 0 pass elif i + 3 > len(dna[1]): pass elif counter <= 5: print('DNA pos %d, %s mutated to %s --- %s%d%s' % (i + 3 - 1, dna[0][i:i + 3], dna[1][i:i + 3], dna.translate(dna[0][i:i + 3]), int((i + 3) / 3), dna.translate(dna[1][i:i + 3]))) counter += 1 else: print('over %d consecutive mismatches, rest of construct is likely out of frame' % (counter - 1)) break print('\n') </DeepExtract>
DNApy
positive
def _confirm_application_information(bundle_info): app_info = bundle_info.app_info <DeepExtract> app_info.id = self._ui.ask_text('ID [Eg: com.example.app]:', default=app_info.id) </DeepExtract> <DeepExtract> app_info.name = self._ui.ask_text('Application Name:', default=app_info.name) </DeepExtract> <DeepExtract> if not app_info.icon: app_info.icon = 'application-vnd.appimage' app_info.icon = self._ui.ask_text('Icon:', default=app_info.icon) </DeepExtract> <DeepExtract> if app_info.exec: options = self._resolve_exec_path(bundle_info.app_dir, app_info.exec) if options: app_info.exec = self._ui.ask_select('Executable path:', choices=options) app_info.exec = self._ui.ask_text('Executable path relative to AppDir [usr/bin/app]:', default=app_info.exec) </DeepExtract> <DeepExtract> if not app_info.exec_args: app_info.exec_args = '$@' app_info.exec_args = self._ui.ask_text('Arguments [Default: $@]:', default=app_info.exec_args) </DeepExtract> <DeepExtract> if not app_info.version: app_info.version = 'latest' app_info.version = self._ui.ask_text('Version [Eg: 1.0.0]:', default=app_info.version) </DeepExtract>
def _confirm_application_information(bundle_info): app_info = bundle_info.app_info app_info.id = self._ui.ask_text('ID [Eg: com.example.app]:', default=app_info.id) app_info.name = self._ui.ask_text('Application Name:', default=app_info.name) if not app_info.icon: app_info.icon = 'application-vnd.appimage' app_info.icon = self._ui.ask_text('Icon:', default=app_info.icon) if app_info.exec: options = self._resolve_exec_path(bundle_info.app_dir, app_info.exec) if options: app_info.exec = self._ui.ask_select('Executable path:', choices=options) app_info.exec = self._ui.ask_text('Executable path relative to AppDir [usr/bin/app]:', default=app_info.exec) if not app_info.exec_args: app_info.exec_args = '$@' app_info.exec_args = self._ui.ask_text('Arguments [Default: $@]:', default=app_info.exec_args) if not app_info.version: app_info.version = 'latest' app_info.version = self._ui.ask_text('Version [Eg: 1.0.0]:', default=app_info.version) </DeepExtract>
appimage-builder
positive
def set_to_public_tree_node_state(self, node): """ Sets the internal env wrapper to the state ""node"" is in. Args: node: Any node (of any type) in a PublicTree instance. """ state_seq = [] def add(_node): if _node is not None: if _node.p_id_acting_next != _node.tree.CHANCE_ID: state_seq.insert(0, _node.env_state) <DeepExtract> if _node.parent is not None: if _node.parent.p_id_acting_next != _node.parent.tree.CHANCE_ID: state_seq.insert(0, _node.parent.env_state) add(_node.parent.parent) </DeepExtract> <DeepExtract> if node is not None: if node.p_id_acting_next != node.tree.CHANCE_ID: state_seq.insert(0, node.env_state) add(node.parent) </DeepExtract> self.reset() <DeepExtract> self._action_count_this_round = [0, 0] self._game_round_last_tick = Poker.PREFLOP self._action_history_vector = np.zeros(shape=self._action_vector_size, dtype=np.float32) </DeepExtract> for sd in state_seq: self.env.load_state_dict(sd, blank_private_info=True) <DeepExtract> _last_a = self.env.last_action[0] if _last_a is not None: _last_actor = self.env.last_action[2] idx = self.env_bldr.get_vector_idx(round_=self._game_round_last_tick, p_id=_last_actor, nth_action_this_round=self._action_count_this_round[_last_actor], action_idx=_last_a) self._action_history_vector[idx] = 1 self._action_count_this_round[_last_actor] += 1 if self.env.current_round != self._game_round_last_tick: self._game_round_last_tick = self.env.current_round self._action_count_this_round = [0, 0] </DeepExtract>
def set_to_public_tree_node_state(self, node): """ Sets the internal env wrapper to the state ""node"" is in. Args: node: Any node (of any type) in a PublicTree instance. """ state_seq = [] def add(_node): if _node is not None: if _node.p_id_acting_next != _node.tree.CHANCE_ID: state_seq.insert(0, _node.env_state) if _node.parent is not None: if _node.parent.p_id_acting_next != _node.parent.tree.CHANCE_ID: state_seq.insert(0, _node.parent.env_state) add(_node.parent.parent) if node is not None: if node.p_id_acting_next != node.tree.CHANCE_ID: state_seq.insert(0, node.env_state) add(node.parent) self.reset() self._action_count_this_round = [0, 0] self._game_round_last_tick = Poker.PREFLOP self._action_history_vector = np.zeros(shape=self._action_vector_size, dtype=np.float32) for sd in state_seq: self.env.load_state_dict(sd, blank_private_info=True) _last_a = self.env.last_action[0] if _last_a is not None: _last_actor = self.env.last_action[2] idx = self.env_bldr.get_vector_idx(round_=self._game_round_last_tick, p_id=_last_actor, nth_action_this_round=self._action_count_this_round[_last_actor], action_idx=_last_a) self._action_history_vector[idx] = 1 self._action_count_this_round[_last_actor] += 1 if self.env.current_round != self._game_round_last_tick: self._game_round_last_tick = self.env.current_round self._action_count_this_round = [0, 0] </DeepExtract>
DREAM
positive
def write_proxy_exports(self): """Write proxy exports to localhost to block build/check calls to internet.""" <DeepExtract> self.specfile.write_strip('export http_proxy=http://127.0.0.1:9/') </DeepExtract> <DeepExtract> self.specfile.write_strip('export https_proxy=http://127.0.0.1:9/') </DeepExtract> <DeepExtract> self.specfile.write_strip('export no_proxy=localhost,127.0.0.1,0.0.0.0') </DeepExtract>
def write_proxy_exports(self): """Write proxy exports to localhost to block build/check calls to internet.""" self.specfile.write_strip('export http_proxy=http://127.0.0.1:9/') self.specfile.write_strip('export https_proxy=http://127.0.0.1:9/') self.specfile.write_strip('export no_proxy=localhost,127.0.0.1,0.0.0.0') </DeepExtract>
autospec
positive
def test_get_cube(self): <DeepExtract> model_name = model_name or 'model.json' config = read_slicer_config(self.data_path('slicer.ini')) ws = Workspace(config=config) ws.import_model(self.model_path('model.json')) ws = ws </DeepExtract> cube = ws.cube('contracts') self.assertEqual('contracts', cube.name) self.assertEqual(1, len(cube.measures))
def test_get_cube(self): model_name = model_name or 'model.json' config = read_slicer_config(self.data_path('slicer.ini')) ws = Workspace(config=config) ws.import_model(self.model_path('model.json')) ws = ws cube = ws.cube('contracts') self.assertEqual('contracts', cube.name) self.assertEqual(1, len(cube.measures))
cubes
positive
def get_config(fname, overrides=None, show=True): """ Read config from file """ assert os.path.exists(fname), 'config file({}) is not exist'.format(fname) <DeepExtract> with open(fname, 'r') as fopen: yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) create_attr_dict(yaml_config) config = yaml_config </DeepExtract> <DeepExtract> if overrides is not None: for opt in overrides: assert isinstance(opt, str), 'option({}) should be a str'.format(opt) assert '=' in opt, 'option({}) should contain a =to distinguish between key and value'.format(opt) pair = opt.split('=') assert len(pair) == 2, 'there can be only a = in the option' (key, value) = pair keys = key.split('.') override(config, keys, value) return config </DeepExtract> return config
def get_config(fname, overrides=None, show=True): """ Read config from file """ assert os.path.exists(fname), 'config file({}) is not exist'.format(fname) with open(fname, 'r') as fopen: yaml_config = AttrDict(yaml.load(fopen, Loader=yaml.SafeLoader)) create_attr_dict(yaml_config) config = yaml_config if overrides is not None: for opt in overrides: assert isinstance(opt, str), 'option({}) should be a str'.format(opt) assert '=' in opt, 'option({}) should contain a =to distinguish between key and value'.format(opt) pair = opt.split('=') assert len(pair) == 2, 'there can be only a = in the option' (key, value) = pair keys = key.split('.') override(config, keys, value) return config return config
-AI-emmmm
positive
def connect(self): <DeepExtract> extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection((self.host, self.port), self.timeout, **extra_kw) except SocketTimeout: raise ConnectTimeoutError(self, 'Connection to %s timed out. (connect timeout=%s)' % (self.host, self.timeout)) conn = conn </DeepExtract> resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) hostname = self.host if getattr(self, '_tunnel_host', None): self.sock = conn self._tunnel() self.auto_open = 0 hostname = self._tunnel_host is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn('System time is way off (before {0}). This will probably lead to SSL verification errors'.format(RECENT_DATE), SystemTimeWarning) self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, server_hostname=hostname, ssl_version=resolved_ssl_version) if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) elif resolved_cert_reqs != ssl.CERT_NONE and self.assert_hostname is not False: cert = self.sock.getpeercert() if not cert.get('subjectAltName', ()): warnings.warn('Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. This feature is being removed by major browsers and deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 for details.)', SecurityWarning) match_hostname(cert, self.assert_hostname or hostname) self.is_verified = resolved_cert_reqs == ssl.CERT_REQUIRED or self.assert_fingerprint is not None
def connect(self): extra_kw = {} if self.source_address: extra_kw['source_address'] = self.source_address if self.socket_options: extra_kw['socket_options'] = self.socket_options try: conn = connection.create_connection((self.host, self.port), self.timeout, **extra_kw) except SocketTimeout: raise ConnectTimeoutError(self, 'Connection to %s timed out. (connect timeout=%s)' % (self.host, self.timeout)) conn = conn resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) hostname = self.host if getattr(self, '_tunnel_host', None): self.sock = conn self._tunnel() self.auto_open = 0 hostname = self._tunnel_host is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn('System time is way off (before {0}). This will probably lead to SSL verification errors'.format(RECENT_DATE), SystemTimeWarning) self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, server_hostname=hostname, ssl_version=resolved_ssl_version) if self.assert_fingerprint: assert_fingerprint(self.sock.getpeercert(binary_form=True), self.assert_fingerprint) elif resolved_cert_reqs != ssl.CERT_NONE and self.assert_hostname is not False: cert = self.sock.getpeercert() if not cert.get('subjectAltName', ()): warnings.warn('Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. This feature is being removed by major browsers and deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 for details.)', SecurityWarning) match_hostname(cert, self.assert_hostname or hostname) self.is_verified = resolved_cert_reqs == ssl.CERT_REQUIRED or self.assert_fingerprint is not None
BruteSploit
positive
@gui_exception def predict_annotations_using_extr_points(points): pass @work_exception def do_work(): <DeepExtract> import torch from collections import OrderedDict from PIL import Image import numpy as np from torch.nn.functional import upsample from contrib.dextr import deeplab_resnet as resnet from contrib.dextr import helpers modelName = 'dextr_pascal-sbd' pad = 50 thres = 0.8 gpu_id = 0 device = torch.device('cuda:' + str(gpu_id) if torch.cuda.is_available() else 'cpu') model = resnet.resnet101(1, nInputChannels=4, classifier='psp') model_path = os.path.abspath('./models/{}.pth'.format(modelName)) state_dict_checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage) if 'module.' in list(state_dict_checkpoint.keys())[0]: new_state_dict = OrderedDict() for (k, v) in state_dict_checkpoint.items(): name = k[7:] new_state_dict[name] = v else: new_state_dict = state_dict_checkpoint model.load_state_dict(new_state_dict) model.eval() model.to(device) image = np.array(Image.open(self.tag.file_path)) extreme_points_ori = np.asarray(points).astype(np.int) with torch.no_grad(): bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True) crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True) resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32) extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad, pad] extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int) extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10) extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255) input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2) inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...]) inputs = inputs.to(device) outputs = model.forward(inputs) outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True) outputs = outputs.to(torch.device('cpu')) pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0)) pred = 1 / (1 + np.exp(-pred)) pred = np.squeeze(pred) result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres binary = np.zeros_like(result, dtype=np.uint8) binary[result] = 255 contour_list = cv2.findContours(binary.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contour_list = imutils.grab_contours(contour_list) contours = [] for contour in contour_list: c_points = np.vstack(contour).squeeze().tolist() contours.append(c_points) contours = contours </DeepExtract> return (contours, None) @gui_exception def done_work(result): self._loading_dialog.hide() (pred_out, err) = result if err: raise err if pred_out: for c in pred_out: c_points = [] for i in range(0, len(c), 10): c_points.append(c[i]) if len(c_points) > 5: polygon = EditablePolygon() polygon.tag = self.tag.dataset self.image_viewer._scene.addItem(polygon) bbox: QRectF = self.image_viewer.pixmap.boundingRect() offset = QPointF(bbox.width() / 2, bbox.height() / 2) for point in c_points: polygon.addPoint(QPoint(point[0] - offset.x(), point[1] - offset.y())) self._loading_dialog.show() worker = Worker(do_work) worker.signals.result.connect(done_work) self._thread_pool.start(worker)
@gui_exception def predict_annotations_using_extr_points(points): pass @work_exception def do_work(): import torch from collections import OrderedDict from PIL import Image import numpy as np from torch.nn.functional import upsample from contrib.dextr import deeplab_resnet as resnet from contrib.dextr import helpers modelName = 'dextr_pascal-sbd' pad = 50 thres = 0.8 gpu_id = 0 device = torch.device('cuda:' + str(gpu_id) if torch.cuda.is_available() else 'cpu') model = resnet.resnet101(1, nInputChannels=4, classifier='psp') model_path = os.path.abspath('./models/{}.pth'.format(modelName)) state_dict_checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage) if 'module.' in list(state_dict_checkpoint.keys())[0]: new_state_dict = OrderedDict() for (k, v) in state_dict_checkpoint.items(): name = k[7:] new_state_dict[name] = v else: new_state_dict = state_dict_checkpoint model.load_state_dict(new_state_dict) model.eval() model.to(device) image = np.array(Image.open(self.tag.file_path)) extreme_points_ori = np.asarray(points).astype(np.int) with torch.no_grad(): bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True) crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True) resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32) extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad, pad] extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int) extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10) extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255) input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2) inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...]) inputs = inputs.to(device) outputs = model.forward(inputs) outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True) outputs = outputs.to(torch.device('cpu')) pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0)) pred = 1 / (1 + np.exp(-pred)) pred = np.squeeze(pred) result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres binary = np.zeros_like(result, dtype=np.uint8) binary[result] = 255 contour_list = cv2.findContours(binary.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contour_list = imutils.grab_contours(contour_list) contours = [] for contour in contour_list: c_points = np.vstack(contour).squeeze().tolist() contours.append(c_points) contours = contours return (contours, None) @gui_exception def done_work(result): self._loading_dialog.hide() (pred_out, err) = result if err: raise err if pred_out: for c in pred_out: c_points = [] for i in range(0, len(c), 10): c_points.append(c[i]) if len(c_points) > 5: polygon = EditablePolygon() polygon.tag = self.tag.dataset self.image_viewer._scene.addItem(polygon) bbox: QRectF = self.image_viewer.pixmap.boundingRect() offset = QPointF(bbox.width() / 2, bbox.height() / 2) for point in c_points: polygon.addPoint(QPoint(point[0] - offset.x(), point[1] - offset.y())) self._loading_dialog.show() worker = Worker(do_work) worker.signals.result.connect(done_work) self._thread_pool.start(worker)
CvStudio
positive
def is_active_modules(self, *modules): """ [UNIT].. -- check if these units are in active state implements True if all is-active = True """ units = [] results = [] for module in modules: <DeepExtract> found = [] for unit in self.match_sysd_units(to_list(module), suffix): if unit not in found: found.append(unit) for unit in self.match_sysd_templates(to_list(module), suffix): if unit not in found: found.append(unit) for unit in self.match_sysv_units(to_list(module), suffix): if unit not in found: found.append(unit) units = found </DeepExtract> if not units: logg.error('Unit %s not found.', unit_of(module)) self.error |= NOT_ACTIVE results += ['inactive'] continue for unit in units: <DeepExtract> conf = self.load_unit_conf(unit) if not conf: logg.warning('Unit %s not found.', unit) active = 'unknown' else: active = self.get_active_from(conf) </DeepExtract> <DeepExtract> conf = self.get_unit_conf(unit) enabled = self.enabled_from(conf) </DeepExtract> if enabled != 'enabled' and ACTIVE_IF_ENABLED: active = 'inactive' results += [active] break status = 'active' in results non_active = [result for result in results if result != 'active'] if non_active: self.error |= NOT_ACTIVE if non_active: self.error |= NOT_OK if _quiet: return [] return results
def is_active_modules(self, *modules): """ [UNIT].. -- check if these units are in active state implements True if all is-active = True """ units = [] results = [] for module in modules: found = [] for unit in self.match_sysd_units(to_list(module), suffix): if unit not in found: found.append(unit) for unit in self.match_sysd_templates(to_list(module), suffix): if unit not in found: found.append(unit) for unit in self.match_sysv_units(to_list(module), suffix): if unit not in found: found.append(unit) units = found if not units: logg.error('Unit %s not found.', unit_of(module)) self.error |= NOT_ACTIVE results += ['inactive'] continue for unit in units: conf = self.load_unit_conf(unit) if not conf: logg.warning('Unit %s not found.', unit) active = 'unknown' else: active = self.get_active_from(conf) conf = self.get_unit_conf(unit) enabled = self.enabled_from(conf) if enabled != 'enabled' and ACTIVE_IF_ENABLED: active = 'inactive' results += [active] break status = 'active' in results non_active = [result for result in results if result != 'active'] if non_active: self.error |= NOT_ACTIVE if non_active: self.error |= NOT_OK if _quiet: return [] return results
deployment
positive
def evaluateTriplet(amb_codon): """ Evaluate the ambiguous codon by computing which amino acids it codes for. The input is a string, three letters long and comprising only IUPAC Nucleotide ambiguity code. The valid values is any combination of three of the following: GATCRYWSMKHBVDN """ assert type(amb_codon) is str and len(amb_codon) == 3, 'Error, the ambiguous codon must be a string three characters long.' m = re.match('^[GATCRYWSMKHBVDN]{3}$', amb_codon) assert m != None, 'Error, the codon %s is not valid. It may only use the chracters GATCRYWSMKHBVDN.' % amb_codon self.target = list(set([dna.Translate(s, self.getTable()) for s in dna.UnAmb(amb_codon)])) <DeepExtract> assert type(amb_codon) is str and len(amb_codon) == 3, 'Error, the ambiguous codon must be a string three characters long.' m = re.match('^[GATCRYWSMKHBVDN]{3}$', amb_codon) assert m != None, 'Error, the codon %s is not valid. It may only use the chracters GATCRYWSMKHBVDN.' % amb_codon self.triplet = amb_codon </DeepExtract> <DeepExtract> assert all([s in 'FLSYCWPHERIMTNKVADQG*' for s in []]), 'Error, one or more of the amino acids %s are not valid.' % [] self.offtarget = [] </DeepExtract> <DeepExtract> assert all([s in 'FLSYCWPHERIMTNKVADQG*' for s in self.next_steps()]), 'Error, one or more of the amino acids %s are not valid.' % self.next_steps() self.possible = self.next_steps() </DeepExtract>
def evaluateTriplet(amb_codon): """ Evaluate the ambiguous codon by computing which amino acids it codes for. The input is a string, three letters long and comprising only IUPAC Nucleotide ambiguity code. The valid values is any combination of three of the following: GATCRYWSMKHBVDN """ assert type(amb_codon) is str and len(amb_codon) == 3, 'Error, the ambiguous codon must be a string three characters long.' m = re.match('^[GATCRYWSMKHBVDN]{3}$', amb_codon) assert m != None, 'Error, the codon %s is not valid. It may only use the chracters GATCRYWSMKHBVDN.' % amb_codon self.target = list(set([dna.Translate(s, self.getTable()) for s in dna.UnAmb(amb_codon)])) assert type(amb_codon) is str and len(amb_codon) == 3, 'Error, the ambiguous codon must be a string three characters long.' m = re.match('^[GATCRYWSMKHBVDN]{3}$', amb_codon) assert m != None, 'Error, the codon %s is not valid. It may only use the chracters GATCRYWSMKHBVDN.' % amb_codon self.triplet = amb_codon assert all([s in 'FLSYCWPHERIMTNKVADQG*' for s in []]), 'Error, one or more of the amino acids %s are not valid.' % [] self.offtarget = [] assert all([s in 'FLSYCWPHERIMTNKVADQG*' for s in self.next_steps()]), 'Error, one or more of the amino acids %s are not valid.' % self.next_steps() self.possible = self.next_steps() </DeepExtract>
DNApy
positive
def test_confirmation_page_logged_in_with_location(self): session = self.client.session <DeepExtract> email = 'test@test.com' registrant = Registrant.objects.create(email=email) session = session session['registrant_id'] = str(registrant.id) session['registrant_email'] = registrant.email session.save() return registrant </DeepExtract> location = Location.objects.create(category='category', name='Name of venue', address='Address line 1', city='Ottawa', province='ON', postal_code='K1K 1K1', contact_email='test@test.com', contact_phone='613-555-5555') session['location_id'] = str(location.id) session.save() response = self.client.get(reverse('register:confirmation')) self.assertEqual(response.status_code, 200)
def test_confirmation_page_logged_in_with_location(self): session = self.client.session email = 'test@test.com' registrant = Registrant.objects.create(email=email) session = session session['registrant_id'] = str(registrant.id) session['registrant_email'] = registrant.email session.save() return registrant location = Location.objects.create(category='category', name='Name of venue', address='Address line 1', city='Ottawa', province='ON', postal_code='K1K 1K1', contact_email='test@test.com', contact_phone='613-555-5555') session['location_id'] = str(location.id) session.save() response = self.client.get(reverse('register:confirmation')) self.assertEqual(response.status_code, 200)
covid-alert-portal
positive
def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix <DeepExtract> value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: already_processed.add('id') self.id = value value = find_attr_value_('version', node) if value is not None and 'version' not in already_processed: already_processed.add('version') self.version = value value = find_attr_value_('include', node) if value is not None and 'include' not in already_processed: already_processed.add('include') self.include = value value = find_attr_value_('exclude', node) if value is not None and 'exclude' not in already_processed: already_processed.add('exclude') self.exclude = value </DeepExtract> <DeepExtract> if node.text is not None: text = node.text else: text = '' for child in node: if child.tail is not None: text += child.tail self.valueOf_ = text </DeepExtract> for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] <DeepExtract> pass </DeepExtract> return self
def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: already_processed.add('id') self.id = value value = find_attr_value_('version', node) if value is not None and 'version' not in already_processed: already_processed.add('version') self.version = value value = find_attr_value_('include', node) if value is not None and 'include' not in already_processed: already_processed.add('include') self.include = value value = find_attr_value_('exclude', node) if value is not None and 'exclude' not in already_processed: already_processed.add('exclude') self.exclude = value if node.text is not None: text = node.text else: text = '' for child in node: if child.tail is not None: text += child.tail self.valueOf_ = text for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] pass return self
autopkg
positive
def _initialize(peer_cid: bytes) -> None: self.tls = tls.Context(alpn_protocols=self._configuration.alpn_protocols, cadata=self._configuration.cadata, cafile=self._configuration.cafile, capath=self._configuration.capath, cipher_suites=self.configuration.cipher_suites, is_client=self._is_client, logger=self._logger, max_early_data=None if self._is_client else MAX_EARLY_DATA, server_name=self._configuration.server_name, verify_mode=self._configuration.verify_mode) self.tls.certificate = self._configuration.certificate self.tls.certificate_chain = self._configuration.certificate_chain self.tls.certificate_private_key = self._configuration.private_key self.tls.handshake_extensions = [(get_transport_parameters_extension(self._version), self._serialize_transport_parameters())] session_ticket = self._configuration.session_ticket if self._is_client and session_ticket is not None and session_ticket.is_valid and (session_ticket.server_name == self._configuration.server_name): self.tls.session_ticket = self._configuration.session_ticket if session_ticket.max_early_data_size == MAX_EARLY_DATA: for (ext_type, ext_data) in session_ticket.other_extensions: if ext_type == get_transport_parameters_extension(self._version): <DeepExtract> try: quic_transport_parameters = pull_quic_transport_parameters(Buffer(data=ext_data)) except ValueError: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='Could not parse QUIC transport parameters') if self._quic_logger is not None and (not True): self._quic_logger.log_event(category='transport', event='parameters_set', data=self._quic_logger.encode_transport_parameters(owner='remote', parameters=quic_transport_parameters)) if not self._is_client: for attr in ['original_destination_connection_id', 'preferred_address', 'retry_source_connection_id', 'stateless_reset_token']: if getattr(quic_transport_parameters, attr) is not None: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='%s is not allowed for clients' % attr) if not True: if quic_transport_parameters.initial_source_connection_id != self._remote_initial_source_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='initial_source_connection_id does not match') if self._is_client and quic_transport_parameters.original_destination_connection_id != self._original_destination_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='original_destination_connection_id does not match') if self._is_client and quic_transport_parameters.retry_source_connection_id != self._retry_source_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='retry_source_connection_id does not match') if quic_transport_parameters.active_connection_id_limit is not None and quic_transport_parameters.active_connection_id_limit < 2: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='active_connection_id_limit must be no less than 2') if quic_transport_parameters.ack_delay_exponent is not None and quic_transport_parameters.ack_delay_exponent > 20: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='ack_delay_exponent must be <= 20') if quic_transport_parameters.max_ack_delay is not None and quic_transport_parameters.max_ack_delay >= 2 ** 14: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='max_ack_delay must be < 2^14') if quic_transport_parameters.max_udp_payload_size is not None and quic_transport_parameters.max_udp_payload_size < 1200: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='max_udp_payload_size must be >= 1200') if not True: if quic_transport_parameters.ack_delay_exponent is not None: self._remote_ack_delay_exponent = self._remote_ack_delay_exponent if quic_transport_parameters.max_ack_delay is not None: self._loss.max_ack_delay = quic_transport_parameters.max_ack_delay / 1000.0 if self._is_client and self._peer_cid.sequence_number == 0 and (quic_transport_parameters.stateless_reset_token is not None): self._peer_cid.stateless_reset_token = quic_transport_parameters.stateless_reset_token if quic_transport_parameters.active_connection_id_limit is not None: self._remote_active_connection_id_limit = quic_transport_parameters.active_connection_id_limit if quic_transport_parameters.max_idle_timeout is not None: self._remote_max_idle_timeout = quic_transport_parameters.max_idle_timeout / 1000.0 self._remote_max_datagram_frame_size = quic_transport_parameters.max_datagram_frame_size for param in ['max_data', 'max_stream_data_bidi_local', 'max_stream_data_bidi_remote', 'max_stream_data_uni', 'max_streams_bidi', 'max_streams_uni']: value = getattr(quic_transport_parameters, 'initial_' + param) if value is not None: setattr(self, '_remote_' + param, value) </DeepExtract> break self.tls.alpn_cb = self._alpn_handler if self._session_ticket_fetcher is not None: self.tls.get_session_ticket_cb = self._session_ticket_fetcher if self._session_ticket_handler is not None: self.tls.new_session_ticket_cb = self._handle_session_ticket self.tls.update_traffic_key_cb = self._update_traffic_key def create_crypto_pair(epoch: tls.Epoch) -> CryptoPair: epoch_name = ['initial', '0rtt', 'handshake', '1rtt'][epoch.value] secret_names = ['server_%s_secret' % epoch_name, 'client_%s_secret' % epoch_name] recv_secret_name = secret_names[not self._is_client] send_secret_name = secret_names[self._is_client] return CryptoPair(recv_setup_cb=partial(self._log_key_updated, recv_secret_name), recv_teardown_cb=partial(self._log_key_retired, recv_secret_name), send_setup_cb=partial(self._log_key_updated, send_secret_name), send_teardown_cb=partial(self._log_key_retired, send_secret_name)) self._cryptos = dict(((epoch, create_crypto_pair(epoch)) for epoch in (tls.Epoch.INITIAL, tls.Epoch.ZERO_RTT, tls.Epoch.HANDSHAKE, tls.Epoch.ONE_RTT))) self._crypto_buffers = {tls.Epoch.INITIAL: Buffer(capacity=CRYPTO_BUFFER_SIZE), tls.Epoch.HANDSHAKE: Buffer(capacity=CRYPTO_BUFFER_SIZE), tls.Epoch.ONE_RTT: Buffer(capacity=CRYPTO_BUFFER_SIZE)} self._crypto_streams = {tls.Epoch.INITIAL: QuicStream(), tls.Epoch.HANDSHAKE: QuicStream(), tls.Epoch.ONE_RTT: QuicStream()} self._spaces = {tls.Epoch.INITIAL: QuicPacketSpace(), tls.Epoch.HANDSHAKE: QuicPacketSpace(), tls.Epoch.ONE_RTT: QuicPacketSpace()} self._cryptos[tls.Epoch.INITIAL].setup_initial(cid=peer_cid, is_client=self._is_client, version=self._version) self._loss.spaces = list(self._spaces.values())
def _initialize(peer_cid: bytes) -> None: self.tls = tls.Context(alpn_protocols=self._configuration.alpn_protocols, cadata=self._configuration.cadata, cafile=self._configuration.cafile, capath=self._configuration.capath, cipher_suites=self.configuration.cipher_suites, is_client=self._is_client, logger=self._logger, max_early_data=None if self._is_client else MAX_EARLY_DATA, server_name=self._configuration.server_name, verify_mode=self._configuration.verify_mode) self.tls.certificate = self._configuration.certificate self.tls.certificate_chain = self._configuration.certificate_chain self.tls.certificate_private_key = self._configuration.private_key self.tls.handshake_extensions = [(get_transport_parameters_extension(self._version), self._serialize_transport_parameters())] session_ticket = self._configuration.session_ticket if self._is_client and session_ticket is not None and session_ticket.is_valid and (session_ticket.server_name == self._configuration.server_name): self.tls.session_ticket = self._configuration.session_ticket if session_ticket.max_early_data_size == MAX_EARLY_DATA: for (ext_type, ext_data) in session_ticket.other_extensions: if ext_type == get_transport_parameters_extension(self._version): try: quic_transport_parameters = pull_quic_transport_parameters(Buffer(data=ext_data)) except ValueError: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='Could not parse QUIC transport parameters') if self._quic_logger is not None and (not True): self._quic_logger.log_event(category='transport', event='parameters_set', data=self._quic_logger.encode_transport_parameters(owner='remote', parameters=quic_transport_parameters)) if not self._is_client: for attr in ['original_destination_connection_id', 'preferred_address', 'retry_source_connection_id', 'stateless_reset_token']: if getattr(quic_transport_parameters, attr) is not None: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='%s is not allowed for clients' % attr) if not True: if quic_transport_parameters.initial_source_connection_id != self._remote_initial_source_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='initial_source_connection_id does not match') if self._is_client and quic_transport_parameters.original_destination_connection_id != self._original_destination_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='original_destination_connection_id does not match') if self._is_client and quic_transport_parameters.retry_source_connection_id != self._retry_source_connection_id: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='retry_source_connection_id does not match') if quic_transport_parameters.active_connection_id_limit is not None and quic_transport_parameters.active_connection_id_limit < 2: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='active_connection_id_limit must be no less than 2') if quic_transport_parameters.ack_delay_exponent is not None and quic_transport_parameters.ack_delay_exponent > 20: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='ack_delay_exponent must be <= 20') if quic_transport_parameters.max_ack_delay is not None and quic_transport_parameters.max_ack_delay >= 2 ** 14: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='max_ack_delay must be < 2^14') if quic_transport_parameters.max_udp_payload_size is not None and quic_transport_parameters.max_udp_payload_size < 1200: raise QuicConnectionError(error_code=QuicErrorCode.TRANSPORT_PARAMETER_ERROR, frame_type=QuicFrameType.CRYPTO, reason_phrase='max_udp_payload_size must be >= 1200') if not True: if quic_transport_parameters.ack_delay_exponent is not None: self._remote_ack_delay_exponent = self._remote_ack_delay_exponent if quic_transport_parameters.max_ack_delay is not None: self._loss.max_ack_delay = quic_transport_parameters.max_ack_delay / 1000.0 if self._is_client and self._peer_cid.sequence_number == 0 and (quic_transport_parameters.stateless_reset_token is not None): self._peer_cid.stateless_reset_token = quic_transport_parameters.stateless_reset_token if quic_transport_parameters.active_connection_id_limit is not None: self._remote_active_connection_id_limit = quic_transport_parameters.active_connection_id_limit if quic_transport_parameters.max_idle_timeout is not None: self._remote_max_idle_timeout = quic_transport_parameters.max_idle_timeout / 1000.0 self._remote_max_datagram_frame_size = quic_transport_parameters.max_datagram_frame_size for param in ['max_data', 'max_stream_data_bidi_local', 'max_stream_data_bidi_remote', 'max_stream_data_uni', 'max_streams_bidi', 'max_streams_uni']: value = getattr(quic_transport_parameters, 'initial_' + param) if value is not None: setattr(self, '_remote_' + param, value) break self.tls.alpn_cb = self._alpn_handler if self._session_ticket_fetcher is not None: self.tls.get_session_ticket_cb = self._session_ticket_fetcher if self._session_ticket_handler is not None: self.tls.new_session_ticket_cb = self._handle_session_ticket self.tls.update_traffic_key_cb = self._update_traffic_key def create_crypto_pair(epoch: tls.Epoch) -> CryptoPair: epoch_name = ['initial', '0rtt', 'handshake', '1rtt'][epoch.value] secret_names = ['server_%s_secret' % epoch_name, 'client_%s_secret' % epoch_name] recv_secret_name = secret_names[not self._is_client] send_secret_name = secret_names[self._is_client] return CryptoPair(recv_setup_cb=partial(self._log_key_updated, recv_secret_name), recv_teardown_cb=partial(self._log_key_retired, recv_secret_name), send_setup_cb=partial(self._log_key_updated, send_secret_name), send_teardown_cb=partial(self._log_key_retired, send_secret_name)) self._cryptos = dict(((epoch, create_crypto_pair(epoch)) for epoch in (tls.Epoch.INITIAL, tls.Epoch.ZERO_RTT, tls.Epoch.HANDSHAKE, tls.Epoch.ONE_RTT))) self._crypto_buffers = {tls.Epoch.INITIAL: Buffer(capacity=CRYPTO_BUFFER_SIZE), tls.Epoch.HANDSHAKE: Buffer(capacity=CRYPTO_BUFFER_SIZE), tls.Epoch.ONE_RTT: Buffer(capacity=CRYPTO_BUFFER_SIZE)} self._crypto_streams = {tls.Epoch.INITIAL: QuicStream(), tls.Epoch.HANDSHAKE: QuicStream(), tls.Epoch.ONE_RTT: QuicStream()} self._spaces = {tls.Epoch.INITIAL: QuicPacketSpace(), tls.Epoch.HANDSHAKE: QuicPacketSpace(), tls.Epoch.ONE_RTT: QuicPacketSpace()} self._cryptos[tls.Epoch.INITIAL].setup_initial(cid=peer_cid, is_client=self._is_client, version=self._version) self._loss.spaces = list(self._spaces.values())
aioquic
positive
def test_execution_order_flat(tmpdir, mocker): image_dir = str(tmpdir.mkdir('source')) <DeepExtract> shutil.copytree(os.path.join(os.path.dirname(__file__), 'modules'), os.path.join(image_dir, 'tests', 'modules')) </DeepExtract> img_desc = image_descriptor.copy() img_desc['modules']['install'] = [{'name': 'mod_1'}, {'name': 'mod_2'}, {'name': 'mod_3'}, {'name': 'mod_4'}] img_desc['modules']['repositories'] = [{'name': 'modules', 'path': 'tests/modules/repo_4'}] with open(os.path.join(image_dir, 'image.yaml'), 'w') as fd: yaml.dump(img_desc, fd, default_flow_style=False) <DeepExtract> if parameters is None: parameters = ['build', '--dry-run', '--container-file', 'Dockerfile', 'podman'] if env is None: env = {} with Chdir(image_dir): result = CliRunner(env=env).invoke(cli, parameters, catch_exceptions=False, input=input) sys.stdout.write('\n') sys.stdout.write(result.output) assert result.exit_code == 0 if message: assert message in result.output return result </DeepExtract> expected_modules_order = '\n###### START module \'mod_1:1.0\'\n###### \\\n # Copy \'mod_1\' module content\n COPY modules/mod_1 /tmp/scripts/mod_1\n # Set \'mod_1\' module defined environment variables\n ENV \\\n foo="mod_1"\n # Custom scripts from \'mod_1\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/c" ]\n###### /\n###### END module \'mod_1:1.0\'\n\n###### START module \'mod_2:1.0\'\n###### \\\n # Copy \'mod_2\' module content\n COPY modules/mod_2 /tmp/scripts/mod_2\n # Set \'mod_2\' module defined environment variables\n ENV \\\n foo="mod_2"\n # Custom scripts from \'mod_2\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/c" ]\n###### /\n###### END module \'mod_2:1.0\'\n\n###### START module \'mod_3:1.0\'\n###### \\\n # Copy \'mod_3\' module content\n COPY modules/mod_3 /tmp/scripts/mod_3\n # Custom scripts from \'mod_3\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/c" ]\n###### /\n###### END module \'mod_3:1.0\'\n\n###### START module \'mod_4:1.0\'\n###### \\\n # Copy \'mod_4\' module content\n COPY modules/mod_4 /tmp/scripts/mod_4\n # Custom scripts from \'mod_4\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/c" ]\n###### /\n###### END module \'mod_4:1.0\'\n' assert check_dockerfile_text(image_dir, expected_modules_order) assert not check_dockerfile_text(image_dir, 'RUN yum clean all')
def test_execution_order_flat(tmpdir, mocker): image_dir = str(tmpdir.mkdir('source')) shutil.copytree(os.path.join(os.path.dirname(__file__), 'modules'), os.path.join(image_dir, 'tests', 'modules')) img_desc = image_descriptor.copy() img_desc['modules']['install'] = [{'name': 'mod_1'}, {'name': 'mod_2'}, {'name': 'mod_3'}, {'name': 'mod_4'}] img_desc['modules']['repositories'] = [{'name': 'modules', 'path': 'tests/modules/repo_4'}] with open(os.path.join(image_dir, 'image.yaml'), 'w') as fd: yaml.dump(img_desc, fd, default_flow_style=False) if parameters is None: parameters = ['build', '--dry-run', '--container-file', 'Dockerfile', 'podman'] if env is None: env = {} with Chdir(image_dir): result = CliRunner(env=env).invoke(cli, parameters, catch_exceptions=False, input=input) sys.stdout.write('\n') sys.stdout.write(result.output) assert result.exit_code == 0 if message: assert message in result.output return result expected_modules_order = '\n###### START module \'mod_1:1.0\'\n###### \\\n # Copy \'mod_1\' module content\n COPY modules/mod_1 /tmp/scripts/mod_1\n # Set \'mod_1\' module defined environment variables\n ENV \\\n foo="mod_1"\n # Custom scripts from \'mod_1\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_1/c" ]\n###### /\n###### END module \'mod_1:1.0\'\n\n###### START module \'mod_2:1.0\'\n###### \\\n # Copy \'mod_2\' module content\n COPY modules/mod_2 /tmp/scripts/mod_2\n # Set \'mod_2\' module defined environment variables\n ENV \\\n foo="mod_2"\n # Custom scripts from \'mod_2\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_2/c" ]\n###### /\n###### END module \'mod_2:1.0\'\n\n###### START module \'mod_3:1.0\'\n###### \\\n # Copy \'mod_3\' module content\n COPY modules/mod_3 /tmp/scripts/mod_3\n # Custom scripts from \'mod_3\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_3/c" ]\n###### /\n###### END module \'mod_3:1.0\'\n\n###### START module \'mod_4:1.0\'\n###### \\\n # Copy \'mod_4\' module content\n COPY modules/mod_4 /tmp/scripts/mod_4\n # Custom scripts from \'mod_4\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/a" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/b" ]\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/mod_4/c" ]\n###### /\n###### END module \'mod_4:1.0\'\n' assert check_dockerfile_text(image_dir, expected_modules_order) assert not check_dockerfile_text(image_dir, 'RUN yum clean all')
cekit
positive
def transform_instance_annotations(annotation, transforms, image_size, *, keypoint_hflip_indices=None): annotation = d2_transform_inst_anno(annotation, transforms, image_size, keypoint_hflip_indices=keypoint_hflip_indices) if 'beziers' in annotation: <DeepExtract> annotation['beziers'] = np.asarray(annotation['beziers'], dtype='float64').reshape(-1, 2) annotation['beziers'] = transforms.apply_coords(annotation['beziers']).reshape(-1) do_hflip = sum((isinstance(t, T.HFlipTransform) for t in transforms.transforms)) % 2 == 1 if do_hflip: raise ValueError('Flipping text data is not supported (also disencouraged).') annotation['beziers'] = annotation['beziers'] </DeepExtract> annotation['beziers'] = beziers return annotation
def transform_instance_annotations(annotation, transforms, image_size, *, keypoint_hflip_indices=None): annotation = d2_transform_inst_anno(annotation, transforms, image_size, keypoint_hflip_indices=keypoint_hflip_indices) if 'beziers' in annotation: annotation['beziers'] = np.asarray(annotation['beziers'], dtype='float64').reshape(-1, 2) annotation['beziers'] = transforms.apply_coords(annotation['beziers']).reshape(-1) do_hflip = sum((isinstance(t, T.HFlipTransform) for t in transforms.transforms)) % 2 == 1 if do_hflip: raise ValueError('Flipping text data is not supported (also disencouraged).') annotation['beziers'] = annotation['beziers'] annotation['beziers'] = beziers return annotation
AdelaiDet
positive
def test_end_to_end_wrong_credentials_mundi(self): <DeepExtract> search_criteria = {'productType': product_type, 'start': start, 'end': end, 'geom': geom, 'raise_errors': True, **search_kwargs_dict} if items_per_page: search_criteria['items_per_page'] = items_per_page if page: search_criteria['page'] = page self.eodag.set_preferred_provider(*MUNDI_SEARCH_ARGS) (results, nb_results) = self.eodag.search(**search_criteria) if offline: results = [prod for prod in results if prod.properties.get('storageStatus', '') != ONLINE_STATUS] if check_product: self.assertGreater(len(results), 0) one_product = results[0] self.assertEqual(one_product.provider, *MUNDI_SEARCH_ARGS) product = one_product else: product = results </DeepExtract> with self.assertRaises(AuthenticationError): self.eodag.download(product)
def test_end_to_end_wrong_credentials_mundi(self): search_criteria = {'productType': product_type, 'start': start, 'end': end, 'geom': geom, 'raise_errors': True, **search_kwargs_dict} if items_per_page: search_criteria['items_per_page'] = items_per_page if page: search_criteria['page'] = page self.eodag.set_preferred_provider(*MUNDI_SEARCH_ARGS) (results, nb_results) = self.eodag.search(**search_criteria) if offline: results = [prod for prod in results if prod.properties.get('storageStatus', '') != ONLINE_STATUS] if check_product: self.assertGreater(len(results), 0) one_product = results[0] self.assertEqual(one_product.provider, *MUNDI_SEARCH_ARGS) product = one_product else: product = results with self.assertRaises(AuthenticationError): self.eodag.download(product)
eodag
positive
def __enter__(self): <DeepExtract> self._count += 1 self._start_time = self._get_time() </DeepExtract> return self
def __enter__(self): self._count += 1 self._start_time = self._get_time() return self
dtr-prototype
positive
def _get_specs(layout, surfs, array_name, cbar_range, nvals=256): """Get array specifications. Parameters ---------- layout : ndarray, shape = (n_rows, n_cols) Array of surface keys in `surfs`. Specifies how window is arranged. surfs : dict[str, BSPolyData] Dictionary of surfaces. array_name : ndarray Names of point data array to plot for each layout entry. cbar_range : {'sym'} or tuple, Range for each array. If 'sym', uses a symmetric range. Only used is array has positive and negative values. nvals : int, optional Number of lookup table values for continuous arrays. Default is 256. Returns ------- specs : ndarray Array with specifications for each array entry. """ (nrow, ncol) = layout.shape n_overlays = max([len(a) for a in array_name.ravel()]) def _set_spec(x, rg): if rg is None or rg == 'sym': (a, b) = (np.nanmin(x), np.nanmax(x)) if rg == 'sym' and np.sign(a) != np.sign(b): b = max(np.abs(a), b) a = -b rg = (a, b) if np.issubdtype(x.dtype, np.floating): return (*rg, nvals, np.array([]), False) vals = np.unique(x) return (*rg, vals.size, vals, True) dt = np.dtype([('min', 'f8'), ('max', 'f8'), ('nval', 'i8'), ('val', 'O'), ('disc', '?')]) specs = np.zeros((n_overlays, nrow, ncol), dtype=dt) specs[:] = (np.nan, np.nan, nvals, np.array([]), False) map_sp = {k: {} for k in surfs.keys()} for (idx, k) in np.ndenumerate(layout): if k is None: continue for (ia, name) in enumerate(array_name[idx]): if name not in surfs[k].point_keys: continue if name not in map_sp[k]: arr = surfs[k].PointData[name] <DeepExtract> if cbar_range[idx][ia] is None or cbar_range[idx][ia] == 'sym': (a, b) = (np.nanmin(arr), np.nanmax(arr)) if cbar_range[idx][ia] == 'sym' and np.sign(a) != np.sign(b): b = max(np.abs(a), b) a = -b cbar_range[idx][ia] = (a, b) if np.issubdtype(arr.dtype, np.floating): map_sp[k][name] = (*cbar_range[idx][ia], nvals, np.array([]), False) vals = np.unique(arr) map_sp[k][name] = (*cbar_range[idx][ia], vals.size, vals, True) </DeepExtract> specs[(ia,) + idx] = map_sp[k][name] return specs
def _get_specs(layout, surfs, array_name, cbar_range, nvals=256): """Get array specifications. Parameters ---------- layout : ndarray, shape = (n_rows, n_cols) Array of surface keys in `surfs`. Specifies how window is arranged. surfs : dict[str, BSPolyData] Dictionary of surfaces. array_name : ndarray Names of point data array to plot for each layout entry. cbar_range : {'sym'} or tuple, Range for each array. If 'sym', uses a symmetric range. Only used is array has positive and negative values. nvals : int, optional Number of lookup table values for continuous arrays. Default is 256. Returns ------- specs : ndarray Array with specifications for each array entry. """ (nrow, ncol) = layout.shape n_overlays = max([len(a) for a in array_name.ravel()]) def _set_spec(x, rg): if rg is None or rg == 'sym': (a, b) = (np.nanmin(x), np.nanmax(x)) if rg == 'sym' and np.sign(a) != np.sign(b): b = max(np.abs(a), b) a = -b rg = (a, b) if np.issubdtype(x.dtype, np.floating): return (*rg, nvals, np.array([]), False) vals = np.unique(x) return (*rg, vals.size, vals, True) dt = np.dtype([('min', 'f8'), ('max', 'f8'), ('nval', 'i8'), ('val', 'O'), ('disc', '?')]) specs = np.zeros((n_overlays, nrow, ncol), dtype=dt) specs[:] = (np.nan, np.nan, nvals, np.array([]), False) map_sp = {k: {} for k in surfs.keys()} for (idx, k) in np.ndenumerate(layout): if k is None: continue for (ia, name) in enumerate(array_name[idx]): if name not in surfs[k].point_keys: continue if name not in map_sp[k]: arr = surfs[k].PointData[name] if cbar_range[idx][ia] is None or cbar_range[idx][ia] == 'sym': (a, b) = (np.nanmin(arr), np.nanmax(arr)) if cbar_range[idx][ia] == 'sym' and np.sign(a) != np.sign(b): b = max(np.abs(a), b) a = -b cbar_range[idx][ia] = (a, b) if np.issubdtype(arr.dtype, np.floating): map_sp[k][name] = (*cbar_range[idx][ia], nvals, np.array([]), False) vals = np.unique(arr) map_sp[k][name] = (*cbar_range[idx][ia], vals.size, vals, True) specs[(ia,) + idx] = map_sp[k][name] return specs
BrainSpace
positive
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, LoggingArguments, PathArguments)) (model_args, data_args, train_args, log_args, path_args, remaining_strings) = parser.parse_args_into_dataclasses(return_remaining_strings=True) assert len(remaining_strings) == 0, f'The args {remaining_strings} could not be parsed.' tf.random.set_seed(train_args.seed) tf.autograph.set_verbosity(0) parse_bool = lambda arg: arg == 'true' do_gradient_accumulation = train_args.gradient_accumulation_steps > 1 do_xla = not parse_bool(train_args.skip_xla) do_eager = parse_bool(train_args.eager) skip_sop = parse_bool(train_args.skip_sop) skip_mlm = parse_bool(train_args.skip_mlm) pre_layer_norm = parse_bool(model_args.pre_layer_norm) fast_squad = parse_bool(log_args.fast_squad) dummy_eval = parse_bool(log_args.dummy_eval) is_sagemaker = path_args.filesystem_prefix.startswith('/opt/ml') disable_tqdm = is_sagemaker global max_grad_norm max_grad_norm = train_args.max_grad_norm hvd.init() gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.set_visible_devices(gpus[hvd.local_rank()], 'GPU') tf.config.optimizer.set_jit(do_xla) tf.config.experimental_run_functions_eagerly(do_eager) if hvd.rank() == 0: current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') platform = 'sm' if is_sagemaker else 'eks' if skip_sop: loss_str = '-skipsop' elif skip_mlm: loss_str = '-skipmlm' else: loss_str = '' if log_args.run_name is None: metadata = f"{model_args.model_type}-{model_args.model_size}-{model_args.load_from}-{hvd.size()}gpus-{train_args.per_gpu_batch_size * hvd.size() * train_args.gradient_accumulation_steps}globalbatch-{train_args.learning_rate}maxlr-{train_args.learning_rate_decay_power}power-{train_args.optimizer}opt-{train_args.total_steps}steps-{('preln' if pre_layer_norm else 'postln')}{loss_str}-{model_args.hidden_dropout_prob}dropout" run_name = f"{current_time}-{platform}-{metadata}-{(train_args.name if train_args.name else 'unnamed')}" else: run_name = log_args.run_name level = logging.INFO format = '%(asctime)-15s %(name)-12s: %(levelname)-8s %(message)s' handlers = [logging.FileHandler(os.path.join(path_args.filesystem_prefix, path_args.log_dir, f'{run_name}.log')), TqdmLoggingHandler()] logging.basicConfig(level=level, format=format, handlers=handlers) assert not (skip_sop and skip_mlm), 'Cannot use --skip_sop and --skip_mlm' <DeepExtract> global validation_batch validation_batch = rewrap_tf_function(validation_batch) if do_gradient_accumulation: global train_batch train_batch = rewrap_tf_function(train_batch) global allreduce allreduce = rewrap_tf_function(allreduce) else: global train_step train_step = rewrap_tf_function(train_step) </DeepExtract> if train_args.optimizer == 'lamb': optimizer = get_lamb_optimizer(train_args) elif train_args.optimizer == 'adamw': optimizer = get_adamw_optimizer(train_args) optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scale='dynamic') gradient_accumulator = GradientAccumulator() loaded_optimizer_weights = None model = create_model(model_class=TFAutoModelForPreTraining, model_args=model_args) tokenizer = create_tokenizer(model_args.model_type) if model_args.load_from == 'checkpoint': checkpoint_path = os.path.join(path_args.filesystem_prefix, model_args.checkpoint_path) <DeepExtract> (model_ckpt, optimizer_ckpt) = (f'{checkpoint_path}.ckpt', f'{checkpoint_path}-optimizer.npy') </DeepExtract> if hvd.rank() == 0: model.load_weights(model_ckpt) if model_args.load_optimizer_state == 'true': loaded_optimizer_weights = np.load(optimizer_ckpt, allow_pickle=True) train_glob = os.path.join(path_args.filesystem_prefix, path_args.train_dir, '*.tfrecord') validation_glob = os.path.join(path_args.filesystem_prefix, path_args.val_dir, '*.tfrecord') train_filenames = glob.glob(train_glob) validation_filenames = glob.glob(validation_glob) train_dataset = get_dataset_from_tfrecords(model_type=model_args.model_type, filenames=train_filenames, max_seq_length=data_args.max_seq_length, max_predictions_per_seq=data_args.max_predictions_per_seq, per_gpu_batch_size=train_args.per_gpu_batch_size) train_dataset = train_dataset.batch(train_args.gradient_accumulation_steps) train_dataset = train_dataset.prefetch(buffer_size=8) if hvd.rank() == 0: validation_dataset = get_dataset_from_tfrecords(model_type=model_args.model_type, filenames=validation_filenames, max_seq_length=data_args.max_seq_length, max_predictions_per_seq=data_args.max_predictions_per_seq, per_gpu_batch_size=train_args.per_gpu_batch_size) validation_dataset = validation_dataset.prefetch(buffer_size=8) pbar = tqdm.tqdm(total=train_args.total_steps, disable=disable_tqdm) summary_writer = None logger.info(f'Starting training, job name {run_name}') i = 1 start_time = time.perf_counter() for batch in train_dataset: learning_rate = optimizer.learning_rate(step=tf.constant(i, dtype=tf.float32)) loss_scale = optimizer.loss_scale <DeepExtract> (total_loss, total_mlm_loss, total_mlm_acc, total_sop_loss, total_sop_acc) = (tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32)) for step in range(train_args.gradient_accumulation_steps): (loss, mlm_loss, mlm_acc, sop_loss, sop_acc) = train_batch(model=model, optimizer=optimizer, gradient_accumulator=gradient_accumulator, input_dict={'input_ids': batch['input_ids'][step], 'attention_mask': batch['input_mask'][step], 'token_type_ids': batch['segment_ids'][step]}, label_positions=batch['masked_lm_positions'][step], label_ids=batch['masked_lm_ids'][step], label_weights=batch['masked_lm_weights'][step], next_sentence_labels=batch['next_sentence_labels'][step], skip_sop=skip_sop, skip_mlm=skip_mlm) total_loss += tf.cast(loss, total_loss.dtype) total_mlm_loss += tf.cast(mlm_loss, total_mlm_loss.dtype) total_mlm_acc += tf.cast(mlm_acc, total_mlm_acc.dtype) total_sop_loss += tf.cast(sop_loss, total_sop_loss.dtype) total_sop_acc += tf.cast(sop_acc, total_sop_acc.dtype) total_loss /= train_args.gradient_accumulation_steps total_mlm_loss /= train_args.gradient_accumulation_steps total_mlm_acc /= train_args.gradient_accumulation_steps total_sop_loss /= train_args.gradient_accumulation_steps total_sop_acc /= train_args.gradient_accumulation_steps return_tuple = allreduce(model=model, optimizer=optimizer, gradient_accumulator=gradient_accumulator, loss=total_loss, mlm_loss=total_mlm_loss, mlm_acc=total_mlm_acc, sop_loss=total_sop_loss, sop_acc=total_sop_acc) (loss, mlm_loss, mlm_acc, sop_loss, sop_acc, grad_norm, weight_norm) = return_tuple </DeepExtract> if i == 1: if hvd.rank() == 0 and loaded_optimizer_weights is not None: optimizer.set_weights(loaded_optimizer_weights) hvd.broadcast_variables(model.variables, root_rank=0) hvd.broadcast_variables(optimizer.variables(), root_rank=0) i = optimizer.get_weights()[0] is_final_step = i >= train_args.total_steps do_squad = log_args.squad_frequency != 0 and (i % log_args.squad_frequency == 0 or is_final_step) if do_squad: squad_results = get_squad_results_while_pretraining(model=model, tokenizer=tokenizer, model_size=model_args.model_size, filesystem_prefix=path_args.filesystem_prefix, step=i, dataset=data_args.squad_version, fast=log_args.fast_squad, dummy_eval=log_args.dummy_eval) if hvd.rank() == 0: (squad_exact, squad_f1) = (squad_results['exact'], squad_results['f1']) logger.info(f'SQuAD step {i} -- F1: {squad_f1:.3f}, Exact: {squad_exact:.3f}') <DeepExtract> global validation_batch validation_batch = rewrap_tf_function(validation_batch) if do_gradient_accumulation: global train_batch train_batch = rewrap_tf_function(train_batch) global allreduce allreduce = rewrap_tf_function(allreduce) else: global train_step train_step = rewrap_tf_function(train_step) </DeepExtract> gc.collect() if hvd.rank() == 0: do_log = i % log_args.log_frequency == 0 do_checkpoint = log_args.checkpoint_frequency != 0 and (i % log_args.checkpoint_frequency == 0 or is_final_step) do_validation = log_args.validation_frequency != 0 and (i % log_args.validation_frequency == 0 or is_final_step) pbar.update(1) description = f'Loss: {loss:.3f}, MLM: {mlm_loss:.3f}, SOP: {sop_loss:.3f}, MLM_acc: {mlm_acc:.3f}, SOP_acc: {sop_acc:.3f}' pbar.set_description(description) if do_log: elapsed_time = time.perf_counter() - start_time if i == 1: logger.info(f'First step: {elapsed_time:.3f} secs') else: it_per_sec = log_args.log_frequency / elapsed_time logger.info(f'Train step {i} -- {description} -- It/s: {it_per_sec:.2f}') start_time = time.perf_counter() if do_checkpoint: checkpoint_prefix = os.path.join(path_args.filesystem_prefix, path_args.checkpoint_dir, f'{run_name}-step{i}') model_ckpt = f'{checkpoint_prefix}.ckpt' optimizer_ckpt = f'{checkpoint_prefix}-optimizer.npy' logger.info(f'Saving model at {model_ckpt}, optimizer at {optimizer_ckpt}') model.save_weights(model_ckpt) optimizer_weights = optimizer.get_weights() np.save(optimizer_ckpt, optimizer_weights) if do_validation: <DeepExtract> num_batches = 100 (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) = (0, 0, 0, 0, 0) for batch in validation_dataset.take(num_batches): (loss, mlm_loss, mlm_acc, sop_loss, sop_acc) = validation_batch(model=model, batch=batch, skip_sop=skip_sop, skip_mlm=skip_mlm) val_loss += loss val_mlm_loss += mlm_loss val_mlm_acc += mlm_acc val_sop_loss += sop_loss val_sop_acc += sop_acc val_loss /= num_batches val_mlm_loss /= num_batches val_mlm_acc /= num_batches val_sop_loss /= num_batches val_sop_acc /= num_batches (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) = (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) </DeepExtract> description = f'Loss: {val_loss:.3f}, MLM: {val_mlm_loss:.3f}, SOP: {val_sop_loss:.3f}, MLM_acc: {val_mlm_acc:.3f}, SOP_acc: {val_sop_acc:.3f}' logger.info(f'Validation step {i} -- {description}') if summary_writer is None: summary_writer = tf.summary.create_file_writer(os.path.join(path_args.filesystem_prefix, path_args.log_dir, run_name)) config = {**asdict(model_args), **asdict(data_args), **asdict(train_args), **asdict(log_args), 'global_batch_size': train_args.per_gpu_batch_size * hvd.size()} if is_wandb_available(): wandb.init(config=config, project=model_args.model_type) wandb.run.save() wandb_run_name = wandb.run.name train_metrics = {'weight_norm': weight_norm, 'grad_norm': grad_norm, 'loss_scale': loss_scale, 'learning_rate': learning_rate, 'train/loss': loss, 'train/mlm_loss': mlm_loss, 'train/mlm_acc': mlm_acc, 'train/sop_loss': sop_loss, 'train/sop_acc': sop_acc} all_metrics = {**train_metrics} if do_validation: val_metrics = {'val/loss': val_loss, 'val/mlm_loss': val_mlm_loss, 'val/mlm_acc': val_mlm_acc, 'val/sop_loss': val_sop_loss, 'val/sop_acc': val_sop_acc} all_metrics = {**all_metrics, **val_metrics} if do_squad: squad_metrics = {'squad/f1': squad_f1, 'squad/exact': squad_exact} all_metrics = {**all_metrics, **squad_metrics} with summary_writer.as_default(): for (name, val) in all_metrics.items(): tf.summary.scalar(name, val, step=i) if is_wandb_available(): wandb.log({'step': i, **all_metrics}) i += 1 if is_final_step: break if hvd.rank() == 0: pbar.close() logger.info(f'Finished pretraining, job name {run_name}')
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, LoggingArguments, PathArguments)) (model_args, data_args, train_args, log_args, path_args, remaining_strings) = parser.parse_args_into_dataclasses(return_remaining_strings=True) assert len(remaining_strings) == 0, f'The args {remaining_strings} could not be parsed.' tf.random.set_seed(train_args.seed) tf.autograph.set_verbosity(0) parse_bool = lambda arg: arg == 'true' do_gradient_accumulation = train_args.gradient_accumulation_steps > 1 do_xla = not parse_bool(train_args.skip_xla) do_eager = parse_bool(train_args.eager) skip_sop = parse_bool(train_args.skip_sop) skip_mlm = parse_bool(train_args.skip_mlm) pre_layer_norm = parse_bool(model_args.pre_layer_norm) fast_squad = parse_bool(log_args.fast_squad) dummy_eval = parse_bool(log_args.dummy_eval) is_sagemaker = path_args.filesystem_prefix.startswith('/opt/ml') disable_tqdm = is_sagemaker global max_grad_norm max_grad_norm = train_args.max_grad_norm hvd.init() gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.set_visible_devices(gpus[hvd.local_rank()], 'GPU') tf.config.optimizer.set_jit(do_xla) tf.config.experimental_run_functions_eagerly(do_eager) if hvd.rank() == 0: current_time = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') platform = 'sm' if is_sagemaker else 'eks' if skip_sop: loss_str = '-skipsop' elif skip_mlm: loss_str = '-skipmlm' else: loss_str = '' if log_args.run_name is None: metadata = f"{model_args.model_type}-{model_args.model_size}-{model_args.load_from}-{hvd.size()}gpus-{train_args.per_gpu_batch_size * hvd.size() * train_args.gradient_accumulation_steps}globalbatch-{train_args.learning_rate}maxlr-{train_args.learning_rate_decay_power}power-{train_args.optimizer}opt-{train_args.total_steps}steps-{('preln' if pre_layer_norm else 'postln')}{loss_str}-{model_args.hidden_dropout_prob}dropout" run_name = f"{current_time}-{platform}-{metadata}-{(train_args.name if train_args.name else 'unnamed')}" else: run_name = log_args.run_name level = logging.INFO format = '%(asctime)-15s %(name)-12s: %(levelname)-8s %(message)s' handlers = [logging.FileHandler(os.path.join(path_args.filesystem_prefix, path_args.log_dir, f'{run_name}.log')), TqdmLoggingHandler()] logging.basicConfig(level=level, format=format, handlers=handlers) assert not (skip_sop and skip_mlm), 'Cannot use --skip_sop and --skip_mlm' global validation_batch validation_batch = rewrap_tf_function(validation_batch) if do_gradient_accumulation: global train_batch train_batch = rewrap_tf_function(train_batch) global allreduce allreduce = rewrap_tf_function(allreduce) else: global train_step train_step = rewrap_tf_function(train_step) if train_args.optimizer == 'lamb': optimizer = get_lamb_optimizer(train_args) elif train_args.optimizer == 'adamw': optimizer = get_adamw_optimizer(train_args) optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer, loss_scale='dynamic') gradient_accumulator = GradientAccumulator() loaded_optimizer_weights = None model = create_model(model_class=TFAutoModelForPreTraining, model_args=model_args) tokenizer = create_tokenizer(model_args.model_type) if model_args.load_from == 'checkpoint': checkpoint_path = os.path.join(path_args.filesystem_prefix, model_args.checkpoint_path) (model_ckpt, optimizer_ckpt) = (f'{checkpoint_path}.ckpt', f'{checkpoint_path}-optimizer.npy') if hvd.rank() == 0: model.load_weights(model_ckpt) if model_args.load_optimizer_state == 'true': loaded_optimizer_weights = np.load(optimizer_ckpt, allow_pickle=True) train_glob = os.path.join(path_args.filesystem_prefix, path_args.train_dir, '*.tfrecord') validation_glob = os.path.join(path_args.filesystem_prefix, path_args.val_dir, '*.tfrecord') train_filenames = glob.glob(train_glob) validation_filenames = glob.glob(validation_glob) train_dataset = get_dataset_from_tfrecords(model_type=model_args.model_type, filenames=train_filenames, max_seq_length=data_args.max_seq_length, max_predictions_per_seq=data_args.max_predictions_per_seq, per_gpu_batch_size=train_args.per_gpu_batch_size) train_dataset = train_dataset.batch(train_args.gradient_accumulation_steps) train_dataset = train_dataset.prefetch(buffer_size=8) if hvd.rank() == 0: validation_dataset = get_dataset_from_tfrecords(model_type=model_args.model_type, filenames=validation_filenames, max_seq_length=data_args.max_seq_length, max_predictions_per_seq=data_args.max_predictions_per_seq, per_gpu_batch_size=train_args.per_gpu_batch_size) validation_dataset = validation_dataset.prefetch(buffer_size=8) pbar = tqdm.tqdm(total=train_args.total_steps, disable=disable_tqdm) summary_writer = None logger.info(f'Starting training, job name {run_name}') i = 1 start_time = time.perf_counter() for batch in train_dataset: learning_rate = optimizer.learning_rate(step=tf.constant(i, dtype=tf.float32)) loss_scale = optimizer.loss_scale (total_loss, total_mlm_loss, total_mlm_acc, total_sop_loss, total_sop_acc) = (tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32), tf.constant(0, dtype=tf.float32)) for step in range(train_args.gradient_accumulation_steps): (loss, mlm_loss, mlm_acc, sop_loss, sop_acc) = train_batch(model=model, optimizer=optimizer, gradient_accumulator=gradient_accumulator, input_dict={'input_ids': batch['input_ids'][step], 'attention_mask': batch['input_mask'][step], 'token_type_ids': batch['segment_ids'][step]}, label_positions=batch['masked_lm_positions'][step], label_ids=batch['masked_lm_ids'][step], label_weights=batch['masked_lm_weights'][step], next_sentence_labels=batch['next_sentence_labels'][step], skip_sop=skip_sop, skip_mlm=skip_mlm) total_loss += tf.cast(loss, total_loss.dtype) total_mlm_loss += tf.cast(mlm_loss, total_mlm_loss.dtype) total_mlm_acc += tf.cast(mlm_acc, total_mlm_acc.dtype) total_sop_loss += tf.cast(sop_loss, total_sop_loss.dtype) total_sop_acc += tf.cast(sop_acc, total_sop_acc.dtype) total_loss /= train_args.gradient_accumulation_steps total_mlm_loss /= train_args.gradient_accumulation_steps total_mlm_acc /= train_args.gradient_accumulation_steps total_sop_loss /= train_args.gradient_accumulation_steps total_sop_acc /= train_args.gradient_accumulation_steps return_tuple = allreduce(model=model, optimizer=optimizer, gradient_accumulator=gradient_accumulator, loss=total_loss, mlm_loss=total_mlm_loss, mlm_acc=total_mlm_acc, sop_loss=total_sop_loss, sop_acc=total_sop_acc) (loss, mlm_loss, mlm_acc, sop_loss, sop_acc, grad_norm, weight_norm) = return_tuple if i == 1: if hvd.rank() == 0 and loaded_optimizer_weights is not None: optimizer.set_weights(loaded_optimizer_weights) hvd.broadcast_variables(model.variables, root_rank=0) hvd.broadcast_variables(optimizer.variables(), root_rank=0) i = optimizer.get_weights()[0] is_final_step = i >= train_args.total_steps do_squad = log_args.squad_frequency != 0 and (i % log_args.squad_frequency == 0 or is_final_step) if do_squad: squad_results = get_squad_results_while_pretraining(model=model, tokenizer=tokenizer, model_size=model_args.model_size, filesystem_prefix=path_args.filesystem_prefix, step=i, dataset=data_args.squad_version, fast=log_args.fast_squad, dummy_eval=log_args.dummy_eval) if hvd.rank() == 0: (squad_exact, squad_f1) = (squad_results['exact'], squad_results['f1']) logger.info(f'SQuAD step {i} -- F1: {squad_f1:.3f}, Exact: {squad_exact:.3f}') global validation_batch validation_batch = rewrap_tf_function(validation_batch) if do_gradient_accumulation: global train_batch train_batch = rewrap_tf_function(train_batch) global allreduce allreduce = rewrap_tf_function(allreduce) else: global train_step train_step = rewrap_tf_function(train_step) gc.collect() if hvd.rank() == 0: do_log = i % log_args.log_frequency == 0 do_checkpoint = log_args.checkpoint_frequency != 0 and (i % log_args.checkpoint_frequency == 0 or is_final_step) do_validation = log_args.validation_frequency != 0 and (i % log_args.validation_frequency == 0 or is_final_step) pbar.update(1) description = f'Loss: {loss:.3f}, MLM: {mlm_loss:.3f}, SOP: {sop_loss:.3f}, MLM_acc: {mlm_acc:.3f}, SOP_acc: {sop_acc:.3f}' pbar.set_description(description) if do_log: elapsed_time = time.perf_counter() - start_time if i == 1: logger.info(f'First step: {elapsed_time:.3f} secs') else: it_per_sec = log_args.log_frequency / elapsed_time logger.info(f'Train step {i} -- {description} -- It/s: {it_per_sec:.2f}') start_time = time.perf_counter() if do_checkpoint: checkpoint_prefix = os.path.join(path_args.filesystem_prefix, path_args.checkpoint_dir, f'{run_name}-step{i}') model_ckpt = f'{checkpoint_prefix}.ckpt' optimizer_ckpt = f'{checkpoint_prefix}-optimizer.npy' logger.info(f'Saving model at {model_ckpt}, optimizer at {optimizer_ckpt}') model.save_weights(model_ckpt) optimizer_weights = optimizer.get_weights() np.save(optimizer_ckpt, optimizer_weights) if do_validation: num_batches = 100 (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) = (0, 0, 0, 0, 0) for batch in validation_dataset.take(num_batches): (loss, mlm_loss, mlm_acc, sop_loss, sop_acc) = validation_batch(model=model, batch=batch, skip_sop=skip_sop, skip_mlm=skip_mlm) val_loss += loss val_mlm_loss += mlm_loss val_mlm_acc += mlm_acc val_sop_loss += sop_loss val_sop_acc += sop_acc val_loss /= num_batches val_mlm_loss /= num_batches val_mlm_acc /= num_batches val_sop_loss /= num_batches val_sop_acc /= num_batches (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) = (val_loss, val_mlm_loss, val_mlm_acc, val_sop_loss, val_sop_acc) description = f'Loss: {val_loss:.3f}, MLM: {val_mlm_loss:.3f}, SOP: {val_sop_loss:.3f}, MLM_acc: {val_mlm_acc:.3f}, SOP_acc: {val_sop_acc:.3f}' logger.info(f'Validation step {i} -- {description}') if summary_writer is None: summary_writer = tf.summary.create_file_writer(os.path.join(path_args.filesystem_prefix, path_args.log_dir, run_name)) config = {**asdict(model_args), **asdict(data_args), **asdict(train_args), **asdict(log_args), 'global_batch_size': train_args.per_gpu_batch_size * hvd.size()} if is_wandb_available(): wandb.init(config=config, project=model_args.model_type) wandb.run.save() wandb_run_name = wandb.run.name train_metrics = {'weight_norm': weight_norm, 'grad_norm': grad_norm, 'loss_scale': loss_scale, 'learning_rate': learning_rate, 'train/loss': loss, 'train/mlm_loss': mlm_loss, 'train/mlm_acc': mlm_acc, 'train/sop_loss': sop_loss, 'train/sop_acc': sop_acc} all_metrics = {**train_metrics} if do_validation: val_metrics = {'val/loss': val_loss, 'val/mlm_loss': val_mlm_loss, 'val/mlm_acc': val_mlm_acc, 'val/sop_loss': val_sop_loss, 'val/sop_acc': val_sop_acc} all_metrics = {**all_metrics, **val_metrics} if do_squad: squad_metrics = {'squad/f1': squad_f1, 'squad/exact': squad_exact} all_metrics = {**all_metrics, **squad_metrics} with summary_writer.as_default(): for (name, val) in all_metrics.items(): tf.summary.scalar(name, val, step=i) if is_wandb_available(): wandb.log({'step': i, **all_metrics}) i += 1 if is_final_step: break if hvd.rank() == 0: pbar.close() logger.info(f'Finished pretraining, job name {run_name}')
deep-learning-models
positive
def visit_Gather(self, operation): x_ = operation.x if isinstance(x_, Operation): <DeepExtract> if x_ not in self.results: result = super().visit(x_) self.results[x_] = result x_ = self.results[x_] </DeepExtract> indices_ = operation.indices if isinstance(indices_, Operation): <DeepExtract> if indices_ not in self.results: result = super().visit(indices_) self.results[indices_] = result indices_ = self.results[indices_] </DeepExtract> @self._cached def gather_func(*inputs): <DeepExtract> concrete_values = [] for variable in [x_, indices_]: if callable(variable): concrete_values.append(variable(*inputs)) else: concrete_values.append(variable) if len(concrete_values) == 1: (x, indices) = concrete_values[0] (x, indices) = concrete_values </DeepExtract> result = tf.gather(x, indices, axis=operation.axis) return result return gather_func
def visit_Gather(self, operation): x_ = operation.x if isinstance(x_, Operation): if x_ not in self.results: result = super().visit(x_) self.results[x_] = result x_ = self.results[x_] indices_ = operation.indices if isinstance(indices_, Operation): if indices_ not in self.results: result = super().visit(indices_) self.results[indices_] = result indices_ = self.results[indices_] @self._cached def gather_func(*inputs): concrete_values = [] for variable in [x_, indices_]: if callable(variable): concrete_values.append(variable(*inputs)) else: concrete_values.append(variable) if len(concrete_values) == 1: (x, indices) = concrete_values[0] (x, indices) = concrete_values result = tf.gather(x, indices, axis=operation.axis) return result return gather_func
DNNV
positive
def intrusive_container_has_size_member(intrusive_container_type): <DeepExtract> n = 0 while True: try: arg = intrusive_container_type.strip_typedefs().template_argument(n) if str(arg).startswith('boost::intrusive::constant_time_size'): constant_size_arg = arg n += 1 except RuntimeError: constant_size_arg = None </DeepExtract> if not constant_size_arg: return True if str(constant_size_arg.template_argument(0)) == 'false': return False return True
def intrusive_container_has_size_member(intrusive_container_type): n = 0 while True: try: arg = intrusive_container_type.strip_typedefs().template_argument(n) if str(arg).startswith('boost::intrusive::constant_time_size'): constant_size_arg = arg n += 1 except RuntimeError: constant_size_arg = None if not constant_size_arg: return True if str(constant_size_arg.template_argument(0)) == 'false': return False return True
Boost-Pretty-Printer
positive
def autoshape(self): <DeepExtract> self.display(pprint=True) </DeepExtract> return self
def autoshape(self): self.display(pprint=True) return self
2nd-place-solution-for-VinBigData-Chest-X-ray-Abnormalities-Detection
positive
def get_current_value(self, asset, quantity): <DeepExtract> asset_price_ccy = None if asset == 'BTC' or asset in config.fiat_list: (asset_price_ccy, name, data_source) = self.price_data.get_latest(asset, config.ccy) else: (asset_price_btc, name, data_source) = self.price_data.get_latest(asset, 'BTC') if asset_price_btc is not None: (btc_price_ccy, _, _) = self.price_data.get_latest('BTC', config.ccy) if btc_price_ccy is not None: asset_price_ccy = btc_price_ccy * asset_price_btc (asset_price_ccy, name, data_source) = (asset_price_ccy, name, data_source) </DeepExtract> if asset_price_ccy is not None: return (asset_price_ccy * quantity, name, data_source) return (None, None, None)
def get_current_value(self, asset, quantity): asset_price_ccy = None if asset == 'BTC' or asset in config.fiat_list: (asset_price_ccy, name, data_source) = self.price_data.get_latest(asset, config.ccy) else: (asset_price_btc, name, data_source) = self.price_data.get_latest(asset, 'BTC') if asset_price_btc is not None: (btc_price_ccy, _, _) = self.price_data.get_latest('BTC', config.ccy) if btc_price_ccy is not None: asset_price_ccy = btc_price_ccy * asset_price_btc (asset_price_ccy, name, data_source) = (asset_price_ccy, name, data_source) if asset_price_ccy is not None: return (asset_price_ccy * quantity, name, data_source) return (None, None, None)
BittyTax
positive
@cli.action('create-profile', BundleIdArgument.BUNDLE_ID_RESOURCE_ID, CertificateArgument.CERTIFICATE_RESOURCE_IDS, DeviceArgument.DEVICE_RESOURCE_IDS, ProfileArgument.PROFILE_TYPE, ProfileArgument.PROFILE_NAME, CommonArgument.SAVE) def create_profile(self, bundle_id_resource_id: ResourceId, certificate_resource_ids: Sequence[ResourceId], device_resource_ids: Sequence[ResourceId], profile_type: ProfileType=ProfileType.IOS_APP_DEVELOPMENT, profile_name: Optional[str]=None, save: bool=False, should_print: bool=True) -> Profile: """ Create provisioning profile of given type """ <DeepExtract> bundle_id = self._get_resource(bundle_id_resource_id, self.api_client.bundle_ids, False) </DeepExtract> if profile_name: name = profile_name elif profile_name is None: name = f'{bundle_id.attributes.name} {profile_type.value.lower()} {int(time.time())}' else: raise AppStoreConnectError(f'"{profile_name}" is not a valid {Profile} name') create_params = dict(name=name, profile_type=profile_type, bundle_id=bundle_id_resource_id, certificates=certificate_resource_ids, devices=[], omit_keys=['devices']) if profile_type.devices_required(): create_params['devices'] = device_resource_ids profile = self._create_resource(self.api_client.profiles, should_print, **create_params) if save: <DeepExtract> profile_path = self._get_unique_path(f'{profile.attributes.profileType}_{profile.id}{profile.profile_extension}', self.profiles_directory) profile_path.write_bytes(profile.profile_content) self.printer.log_saved(profile, profile_path) return profile_path </DeepExtract> return profile
@cli.action('create-profile', BundleIdArgument.BUNDLE_ID_RESOURCE_ID, CertificateArgument.CERTIFICATE_RESOURCE_IDS, DeviceArgument.DEVICE_RESOURCE_IDS, ProfileArgument.PROFILE_TYPE, ProfileArgument.PROFILE_NAME, CommonArgument.SAVE) def create_profile(self, bundle_id_resource_id: ResourceId, certificate_resource_ids: Sequence[ResourceId], device_resource_ids: Sequence[ResourceId], profile_type: ProfileType=ProfileType.IOS_APP_DEVELOPMENT, profile_name: Optional[str]=None, save: bool=False, should_print: bool=True) -> Profile: """ Create provisioning profile of given type """ bundle_id = self._get_resource(bundle_id_resource_id, self.api_client.bundle_ids, False) if profile_name: name = profile_name elif profile_name is None: name = f'{bundle_id.attributes.name} {profile_type.value.lower()} {int(time.time())}' else: raise AppStoreConnectError(f'"{profile_name}" is not a valid {Profile} name') create_params = dict(name=name, profile_type=profile_type, bundle_id=bundle_id_resource_id, certificates=certificate_resource_ids, devices=[], omit_keys=['devices']) if profile_type.devices_required(): create_params['devices'] = device_resource_ids profile = self._create_resource(self.api_client.profiles, should_print, **create_params) if save: profile_path = self._get_unique_path(f'{profile.attributes.profileType}_{profile.id}{profile.profile_extension}', self.profiles_directory) profile_path.write_bytes(profile.profile_content) self.printer.log_saved(profile, profile_path) return profile_path return profile
cli-tools
positive
@pytest.mark.parametrize(['vasprun_parser'], [('spin',)], indirect=True) def test_create_node_dos_spin(fresh_aiida_env, vasprun_parser): """Check that the node composer works for the density of states node and contain the correct density of states for spins.""" node_settings_key = 'dos' assert NODES[node_settings_key]['link_name'] == 'dos' assert NODES[node_settings_key]['type'] == 'core.array' <DeepExtract> requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]} parsed_quantities = {} equivalent_keys = {} for parser in [vasprun_parser]: for item in NODES[node_settings_key]['quantities']: if item in parser.PARSABLE_QUANTITIES: parsed_quantities[item] = parser.get_quantity(item) equivalent_keys[item] = [item] composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities) data_class = get_data_class(NODES[node_settings_key]['type']) assert NODES[node_settings_key]['link_name'] in composed_nodes.successful assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class) composed_nodes = composed_nodes </DeepExtract> data_obj = composed_nodes.successful['dos'] dos = data_obj.get_array('tdos') assert dos.shape == (2, 1000) assert dos[0, 500] == pytest.approx(0.9839) assert dos[1, 500] == pytest.approx(0.9844)
@pytest.mark.parametrize(['vasprun_parser'], [('spin',)], indirect=True) def test_create_node_dos_spin(fresh_aiida_env, vasprun_parser): """Check that the node composer works for the density of states node and contain the correct density of states for spins.""" node_settings_key = 'dos' assert NODES[node_settings_key]['link_name'] == 'dos' assert NODES[node_settings_key]['type'] == 'core.array' requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]} parsed_quantities = {} equivalent_keys = {} for parser in [vasprun_parser]: for item in NODES[node_settings_key]['quantities']: if item in parser.PARSABLE_QUANTITIES: parsed_quantities[item] = parser.get_quantity(item) equivalent_keys[item] = [item] composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities) data_class = get_data_class(NODES[node_settings_key]['type']) assert NODES[node_settings_key]['link_name'] in composed_nodes.successful assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class) composed_nodes = composed_nodes data_obj = composed_nodes.successful['dos'] dos = data_obj.get_array('tdos') assert dos.shape == (2, 1000) assert dos[0, 500] == pytest.approx(0.9839) assert dos[1, 500] == pytest.approx(0.9844)
aiida-vasp
positive
def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for (field, value) in iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, AST): <DeepExtract> f = self.get_visitor(item) if f is not None: return f(item) return self.generic_visit(item) </DeepExtract> elif isinstance(value, AST): <DeepExtract> f = self.get_visitor(value) if f is not None: return f(value) return self.generic_visit(value) </DeepExtract>
def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for (field, value) in iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, AST): f = self.get_visitor(item) if f is not None: return f(item) return self.generic_visit(item) elif isinstance(value, AST): f = self.get_visitor(value) if f is not None: return f(value) return self.generic_visit(value) </DeepExtract>
atsf4g-co
positive
def insert_axioms_in_coq_script(axioms, coq_script): coq_script_lines = coq_script.split('\n') <DeepExtract> for (i, line) in enumerate(coq_script_lines): if line.startswith('Theorem '): theorem_line = i assert False, 'There was no theorem defined in the coq script: {0}'.format('\n'.join(coq_script_lines)) </DeepExtract> for axiom in axioms: axiom_name = axiom.split()[1] coq_script_lines.insert(theorem_line, 'Hint Resolve {0}.'.format(axiom_name)) coq_script_lines.insert(theorem_line, axiom) new_coq_script = '\n'.join(coq_script_lines) return new_coq_script
def insert_axioms_in_coq_script(axioms, coq_script): coq_script_lines = coq_script.split('\n') for (i, line) in enumerate(coq_script_lines): if line.startswith('Theorem '): theorem_line = i assert False, 'There was no theorem defined in the coq script: {0}'.format('\n'.join(coq_script_lines)) for axiom in axioms: axiom_name = axiom.split()[1] coq_script_lines.insert(theorem_line, 'Hint Resolve {0}.'.format(axiom_name)) coq_script_lines.insert(theorem_line, axiom) new_coq_script = '\n'.join(coq_script_lines) return new_coq_script
ccg2lambda
positive
def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = 2 if cfg.CLASS_AGNOSTIC else num_classes num_anchors = cfg.network.NUM_ANCHORS if is_train: data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') gt_boxes = mx.sym.Variable(name='gt_boxes') rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') else: data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') <DeepExtract> conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') conv_feat = res4b22_relu </DeepExtract> <DeepExtract> res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch1 = bn5a_branch1 res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2c = bn5a_branch2c res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') relu1 = res5c_relu </DeepExtract> <DeepExtract> rpn_conv = mx.sym.Convolution(data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_3x3') rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu') rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score') rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred') (rpn_cls_score, rpn_bbox_pred) = (rpn_cls_score, rpn_bbox_pred) </DeepExtract> if is_train: rpn_cls_score_reshape = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_reshape') rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=-1, name='rpn_cls_prob') rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=rpn_bbox_pred - rpn_bbox_target) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE) rpn_cls_act = mx.sym.SoftmaxActivation(data=rpn_cls_score_reshape, mode='channel', name='rpn_cls_act') rpn_cls_act_reshape = mx.sym.Reshape(data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape') if cfg.TRAIN.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal(cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) else: rois = mx.sym.Custom(cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: rpn_cls_score_reshape = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_reshape') rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_reshape, mode='channel', name='rpn_cls_prob') rpn_cls_prob_reshape = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape') if cfg.TEST.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal(cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) else: rois = mx.sym.Custom(cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=256, name='conv_new_1') conv_new_1_relu = mx.sym.Activation(data=conv_new_1, act_type='relu', name='conv_new_1_relu') offset_t = mx.contrib.sym.DeformablePSROIPooling(name='offset_t', data=conv_new_1_relu, rois=rois, group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=0.0625) offset = mx.sym.FullyConnected(name='offset', data=offset_t, num_hidden=7 * 7 * 2, lr_mult=0.01) offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, 7, 7), name='offset_reshape') deformable_roi_pool = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool', data=conv_new_1_relu, rois=rois, trans=offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, spatial_scale=0.0625, trans_std=0.1) fc_new_1 = mx.sym.FullyConnected(name='fc_new_1', data=deformable_roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.sym.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') cls_score = mx.sym.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.sym.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4) if is_train: if cfg.TRAIN.ENABLE_OHEM: (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1) bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS) rcnn_label = label rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred]) self.sym = group return group
def get_symbol(self, cfg, is_train=True): num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = 2 if cfg.CLASS_AGNOSTIC else num_classes num_anchors = cfg.network.NUM_ANCHORS if is_train: data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') gt_boxes = mx.sym.Variable(name='gt_boxes') rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') else: data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a') res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b') res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c') res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a') res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1') res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2') res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3') res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a') res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1') res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2') res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3') res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4') res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5') res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6') res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7') res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8') res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9') res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10') res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11') res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12') res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13') res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14') res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15') res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16') res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17') res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18') res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19') res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20') res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21') res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22') res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') conv_feat = res4b22_relu res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch1 = bn5a_branch1 res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2c = bn5a_branch2c res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a') res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b') res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), cudnn_off=True) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=(2, 2), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=(2, 2), no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c') res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') relu1 = res5c_relu rpn_conv = mx.sym.Convolution(data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_3x3') rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu') rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score') rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred') (rpn_cls_score, rpn_bbox_pred) = (rpn_cls_score, rpn_bbox_pred) if is_train: rpn_cls_score_reshape = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_reshape') rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=-1, name='rpn_cls_prob') rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=rpn_bbox_pred - rpn_bbox_target) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE) rpn_cls_act = mx.sym.SoftmaxActivation(data=rpn_cls_score_reshape, mode='channel', name='rpn_cls_act') rpn_cls_act_reshape = mx.sym.Reshape(data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape') if cfg.TRAIN.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal(cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) else: rois = mx.sym.Custom(cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape') (rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: rpn_cls_score_reshape = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_reshape') rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_reshape, mode='channel', name='rpn_cls_prob') rpn_cls_prob_reshape = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape') if cfg.TEST.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal(cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) else: rois = mx.sym.Custom(cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=256, name='conv_new_1') conv_new_1_relu = mx.sym.Activation(data=conv_new_1, act_type='relu', name='conv_new_1_relu') offset_t = mx.contrib.sym.DeformablePSROIPooling(name='offset_t', data=conv_new_1_relu, rois=rois, group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=0.0625) offset = mx.sym.FullyConnected(name='offset', data=offset_t, num_hidden=7 * 7 * 2, lr_mult=0.01) offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, 7, 7), name='offset_reshape') deformable_roi_pool = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool', data=conv_new_1_relu, rois=rois, trans=offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, spatial_scale=0.0625, trans_std=0.1) fc_new_1 = mx.sym.FullyConnected(name='fc_new_1', data=deformable_roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.sym.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') cls_score = mx.sym.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.sym.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4) if is_train: if cfg.TRAIN.ENABLE_OHEM: (labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1) bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS) rcnn_label = label rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred]) self.sym = group return group
Deformable-ConvNets
positive
def _get_adc_value(self, average=None): def read_data(): self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) return (raw[0] & 4095, raw[1] & 4095) if average: raw_ch0 = 0 raw_ch1 = 0 for _ in range(average): <DeepExtract> self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) (tmp_raw_ch0, tmp_raw_ch1) = (raw[0] & 4095, raw[1] & 4095) </DeepExtract> raw_ch0 += tmp_raw_ch0 raw_ch1 += tmp_raw_ch1 raw_ch0 /= average raw_ch1 /= average else: <DeepExtract> self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) (raw_ch0, raw_ch1) = (raw[0] & 4095, raw[1] & 4095) </DeepExtract> return (raw_ch0, raw_ch1)
def _get_adc_value(self, average=None): def read_data(): self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) return (raw[0] & 4095, raw[1] & 4095) if average: raw_ch0 = 0 raw_ch1 = 0 for _ in range(average): self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) (tmp_raw_ch0, tmp_raw_ch1) = (raw[0] & 4095, raw[1] & 4095) raw_ch0 += tmp_raw_ch0 raw_ch1 += tmp_raw_ch1 raw_ch0 /= average raw_ch1 /= average else: self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', self.ADC_CONF))) data = self._intf.read(self._base_addr + self.MAX11644_ADD | 1, size=4) raw = unpack_from('>HH', data) (raw_ch0, raw_ch1) = (raw[0] & 4095, raw[1] & 4095) return (raw_ch0, raw_ch1)
basil
positive
def initScriptT(context, self): obj = bpy.context.active_object <DeepExtract> is_editmode = obj.mode == 'EDIT' if is_editmode: bpy.ops.object.mode_set(mode='OBJECT') (edges, meshMatrix) = ([], []) mesh = obj.data verts = mesh.vertices for e in mesh.edges: if e.select: edges.append(e) edgenum = 0 for edge_to_test in edges: p1 = verts[edge_to_test.vertices[0]].co p2 = verts[edge_to_test.vertices[1]].co meshMatrix.append([Vector(p1), Vector(p2)]) edgenum += 1 meshMatrix = meshMatrix </DeepExtract> bpy.ops.object.mode_set(mode='EDIT') vSel = bpy.context.active_object.data.total_vert_sel if len(meshMatrix) != 2: print(str(len(meshMatrix)) + ' select 2 edges') else: <DeepExtract> count = 0 if isPointOnEdge(checkEdges(meshMatrix, obj), meshMatrix[0][0], meshMatrix[0][1]): count += 1 if isPointOnEdge(checkEdges(meshMatrix, obj), meshMatrix[1][0], meshMatrix[1][1]): count += 1 count = count </DeepExtract> if count == 1: print('Good, Intersection point lies on one of the two edges!') <DeepExtract> (o, vert_count, edge_count) = GetActiveObject() (vA, vB, vC, vD) = edges_to_points(meshMatrix) AddVertsToObject(vert_count, o, checkEdges(meshMatrix, obj), vA, vB, vC, vD) oe = o.data.edges oe.add(4) if isPointOnEdge(checkEdges(meshMatrix, obj), vA, vB): oe[edge_count].vertices = [vert_count, vert_count + 1] oe[edge_count + 1].vertices = [vert_count, vert_count + 2] if mDist(vD, checkEdges(meshMatrix, obj)) > mDist(vC, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 3] oe[edge_count + 3].vertices = [vert_count + 3, vert_count + 4] if mDist(vC, checkEdges(meshMatrix, obj)) > mDist(vD, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 4] oe[edge_count + 3].vertices = [vert_count + 3, vert_count + 4] if isPointOnEdge(checkEdges(meshMatrix, obj), vC, vD): oe[edge_count].vertices = [vert_count, vert_count + 3] oe[edge_count + 1].vertices = [vert_count, vert_count + 4] if mDist(vB, checkEdges(meshMatrix, obj)) > mDist(vA, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 1] oe[edge_count + 3].vertices = [vert_count + 1, vert_count + 2] if mDist(vA, checkEdges(meshMatrix, obj)) > mDist(vB, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 2] oe[edge_count + 3].vertices = [vert_count + 1, vert_count + 2] </DeepExtract> <DeepExtract> bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.mesh.remove_doubles(threshold=VTX_PRECISION) bpy.ops.mesh.select_all(action='TOGGLE') </DeepExtract> else: print('Intersection point not on chosen edges')
def initScriptT(context, self): obj = bpy.context.active_object is_editmode = obj.mode == 'EDIT' if is_editmode: bpy.ops.object.mode_set(mode='OBJECT') (edges, meshMatrix) = ([], []) mesh = obj.data verts = mesh.vertices for e in mesh.edges: if e.select: edges.append(e) edgenum = 0 for edge_to_test in edges: p1 = verts[edge_to_test.vertices[0]].co p2 = verts[edge_to_test.vertices[1]].co meshMatrix.append([Vector(p1), Vector(p2)]) edgenum += 1 meshMatrix = meshMatrix bpy.ops.object.mode_set(mode='EDIT') vSel = bpy.context.active_object.data.total_vert_sel if len(meshMatrix) != 2: print(str(len(meshMatrix)) + ' select 2 edges') else: count = 0 if isPointOnEdge(checkEdges(meshMatrix, obj), meshMatrix[0][0], meshMatrix[0][1]): count += 1 if isPointOnEdge(checkEdges(meshMatrix, obj), meshMatrix[1][0], meshMatrix[1][1]): count += 1 count = count if count == 1: print('Good, Intersection point lies on one of the two edges!') (o, vert_count, edge_count) = GetActiveObject() (vA, vB, vC, vD) = edges_to_points(meshMatrix) AddVertsToObject(vert_count, o, checkEdges(meshMatrix, obj), vA, vB, vC, vD) oe = o.data.edges oe.add(4) if isPointOnEdge(checkEdges(meshMatrix, obj), vA, vB): oe[edge_count].vertices = [vert_count, vert_count + 1] oe[edge_count + 1].vertices = [vert_count, vert_count + 2] if mDist(vD, checkEdges(meshMatrix, obj)) > mDist(vC, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 3] oe[edge_count + 3].vertices = [vert_count + 3, vert_count + 4] if mDist(vC, checkEdges(meshMatrix, obj)) > mDist(vD, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 4] oe[edge_count + 3].vertices = [vert_count + 3, vert_count + 4] if isPointOnEdge(checkEdges(meshMatrix, obj), vC, vD): oe[edge_count].vertices = [vert_count, vert_count + 3] oe[edge_count + 1].vertices = [vert_count, vert_count + 4] if mDist(vB, checkEdges(meshMatrix, obj)) > mDist(vA, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 1] oe[edge_count + 3].vertices = [vert_count + 1, vert_count + 2] if mDist(vA, checkEdges(meshMatrix, obj)) > mDist(vB, checkEdges(meshMatrix, obj)): oe[edge_count + 2].vertices = [vert_count, vert_count + 2] oe[edge_count + 3].vertices = [vert_count + 1, vert_count + 2] bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.mesh.select_all(action='TOGGLE') bpy.ops.mesh.remove_doubles(threshold=VTX_PRECISION) bpy.ops.mesh.select_all(action='TOGGLE') else: print('Intersection point not on chosen edges')
blender-architecture-scripts
positive
def parse_equations(self, equation_expression_parser: Callable, backend_object_names: Iterable[str]) -> list[ParsedBackendEquation]: f'Parse `expression` and `where` strings of backend object configuration dictionary:\n\n {self._unparsed}\n\n Args:\n equation_expression_parser (Callable): Parsing rule to apply to the string expressions under the `equation(s)` key.\n backend_object_names (Iterable[str]):\n strings referring to valid backend objects to allow the parser to differentiate between them and generic strings.\n\n Returns:\n list[ParsedBackendEquation]:\n List of parsed equations ready to be evaluated.\n The length of the list depends on the product of provided equations and component/index_slice references.\n ' equation_expression_list: list[UnparsedEquationDict] if 'equation' in self._unparsed.keys(): equation_expression_list = [{'expression': self._unparsed['equation']}] else: equation_expression_list = self._unparsed.get('equations', []) <DeepExtract> parsed_equation_list = [] for (idx, expression_data) in enumerate(equation_expression_list): parsed_where = self.parse_where_string(expression_data.get('where', 'True')) parsed_expression = self._parse_string(equation_expression_parser(backend_object_names), expression_data['expression'], 'equations') if parsed_expression is not None and parsed_where is not None: parsed_equation_list.append(ParsedBackendEquation(equation_name=':'.join(filter(None, [self.name, str(idx)])), sets=self.sets, where_list=[parsed_where], expression=parsed_expression)) equations = parsed_equation_list </DeepExtract> component_dict = {c_name: self.generate_expression_list(expression_parser=equation_parser.generate_component_parser(backend_object_names), expression_list=c_list, expression_group='components', id_prefix=c_name) for (c_name, c_list) in self._unparsed.get('components', {}).items()} index_slice_dict = {idx_name: self.generate_expression_list(expression_parser=equation_parser.generate_index_slice_parser(backend_object_names), expression_list=idx_list, expression_group='index_slices', id_prefix=idx_name) for (idx_name, idx_list) in self._unparsed.get('index_slices', {}).items()} if not self._is_valid: exceptions.print_warnings_and_raise_errors(errors=self._errors, during='string parsing') equations_with_components = [] for equation in equations: equations_with_components.extend(self.extend_equation_list_with_expression_group(equation, component_dict, 'components')) equations_with_components_and_index_slices: list[ParsedBackendEquation] = [] for equation in equations_with_components: equations_with_components_and_index_slices.extend(self.extend_equation_list_with_expression_group(equation, index_slice_dict, 'index_slices')) return equations_with_components_and_index_slices
def parse_equations(self, equation_expression_parser: Callable, backend_object_names: Iterable[str]) -> list[ParsedBackendEquation]: f'Parse `expression` and `where` strings of backend object configuration dictionary:\n\n {self._unparsed}\n\n Args:\n equation_expression_parser (Callable): Parsing rule to apply to the string expressions under the `equation(s)` key.\n backend_object_names (Iterable[str]):\n strings referring to valid backend objects to allow the parser to differentiate between them and generic strings.\n\n Returns:\n list[ParsedBackendEquation]:\n List of parsed equations ready to be evaluated.\n The length of the list depends on the product of provided equations and component/index_slice references.\n ' equation_expression_list: list[UnparsedEquationDict] if 'equation' in self._unparsed.keys(): equation_expression_list = [{'expression': self._unparsed['equation']}] else: equation_expression_list = self._unparsed.get('equations', []) parsed_equation_list = [] for (idx, expression_data) in enumerate(equation_expression_list): parsed_where = self.parse_where_string(expression_data.get('where', 'True')) parsed_expression = self._parse_string(equation_expression_parser(backend_object_names), expression_data['expression'], 'equations') if parsed_expression is not None and parsed_where is not None: parsed_equation_list.append(ParsedBackendEquation(equation_name=':'.join(filter(None, [self.name, str(idx)])), sets=self.sets, where_list=[parsed_where], expression=parsed_expression)) equations = parsed_equation_list component_dict = {c_name: self.generate_expression_list(expression_parser=equation_parser.generate_component_parser(backend_object_names), expression_list=c_list, expression_group='components', id_prefix=c_name) for (c_name, c_list) in self._unparsed.get('components', {}).items()} index_slice_dict = {idx_name: self.generate_expression_list(expression_parser=equation_parser.generate_index_slice_parser(backend_object_names), expression_list=idx_list, expression_group='index_slices', id_prefix=idx_name) for (idx_name, idx_list) in self._unparsed.get('index_slices', {}).items()} if not self._is_valid: exceptions.print_warnings_and_raise_errors(errors=self._errors, during='string parsing') equations_with_components = [] for equation in equations: equations_with_components.extend(self.extend_equation_list_with_expression_group(equation, component_dict, 'components')) equations_with_components_and_index_slices: list[ParsedBackendEquation] = [] for equation in equations_with_components: equations_with_components_and_index_slices.extend(self.extend_equation_list_with_expression_group(equation, index_slice_dict, 'index_slices')) return equations_with_components_and_index_slices
calliope
positive
def test_generate_query_simple(): columns = ['a', 'b'] <DeepExtract> acc = '' prev = [] for col in sorted(columns): split = col.split('.') if len(split) > 1 and len(split) > len(prev): if len(prev) == 0: k = len(split) - 1 else: k = len(split) - len(prev) acc += 'struct(' * k if len(split) > 1 and len(split) == len(prev): depth = 0 for (a, b) in list(zip(split[:-1], prev[:-1])): if a != b: break depth += 1 for alias in reversed(prev[depth:-1]): acc = acc.rstrip(',') acc += f') as {alias},' acc += 'struct(' * (len(split) - 1 - depth) if len(split) < len(prev): diff = len(prev) - len(split) prev.pop() for _ in range(diff): c = prev.pop() acc = acc.rstrip(',') acc += f') as {c},' acc += (replacements.get(col, col) if replacements else col) + ',' prev = split if len(prev) > 1: prev.pop() for c in reversed(prev): acc = acc.rstrip(',') acc += f') as {c},' acc = acc.rstrip(',') res = reformat(f"select {acc} from `{'test'}`") </DeepExtract> expect = reformat('select a, b from `test`') assert res == expect, f'expected:\n{expect}\ngot:\n{res}'
def test_generate_query_simple(): columns = ['a', 'b'] acc = '' prev = [] for col in sorted(columns): split = col.split('.') if len(split) > 1 and len(split) > len(prev): if len(prev) == 0: k = len(split) - 1 else: k = len(split) - len(prev) acc += 'struct(' * k if len(split) > 1 and len(split) == len(prev): depth = 0 for (a, b) in list(zip(split[:-1], prev[:-1])): if a != b: break depth += 1 for alias in reversed(prev[depth:-1]): acc = acc.rstrip(',') acc += f') as {alias},' acc += 'struct(' * (len(split) - 1 - depth) if len(split) < len(prev): diff = len(prev) - len(split) prev.pop() for _ in range(diff): c = prev.pop() acc = acc.rstrip(',') acc += f') as {c},' acc += (replacements.get(col, col) if replacements else col) + ',' prev = split if len(prev) > 1: prev.pop() for c in reversed(prev): acc = acc.rstrip(',') acc += f') as {c},' acc = acc.rstrip(',') res = reformat(f"select {acc} from `{'test'}`") expect = reformat('select a, b from `test`') assert res == expect, f'expected:\n{expect}\ngot:\n{res}'
bigquery-etl
positive
def _unwrap_input_data(args, kwargs, *xargs, vtype=False, skip=False): """ Unwrap (return the wrapped vtk object) wrappers in `args` and `kwargs`. E.g., ``xargs=(0, 2, 'key1')`` unwrap positional arguments in positions 0 and 2, and keyword arg 'key1'. Parameters ---------- args : tuple Function args. kwargs : dict Keyword args. xargs : sequence of int and str Positional indices (integers) and keys as strings (for keyword args) to unwrap. If not specified, try to unwrap all arguments. If ``skip == True``, unwrap all arguments except these ones. skip : bool, optional Unwrap all arguments except those in `wrap_args`. Default is False. Returns ------- unwrapped_args : args Return args with unwrapped vtk objects. unwrapped_kwargs: kwargs Return keyword args with unwrapped vtk objects. """ dv = False if not isinstance(vtype, dict): if vtype in [True, None]: dv = None vtype = {} list_args = list(range(len(args))) + list(kwargs.keys()) if len(xargs) == 0: xargs = list_args if skip: xargs = [a for a in list_args if a not in xargs] new_args = list(args) for (i, a) in enumerate(new_args): if i in xargs: <DeepExtract> try: new_args[i] = unwrap_vtk(a, vtype=vtype.get(i, dv)) except: pass new_args[i] = a </DeepExtract> for (k, v) in kwargs.items(): if k in xargs: <DeepExtract> try: kwargs[k] = unwrap_vtk(v, vtype=vtype.get(k, dv)) except: pass kwargs[k] = v </DeepExtract> return (new_args, kwargs)
def _unwrap_input_data(args, kwargs, *xargs, vtype=False, skip=False): """ Unwrap (return the wrapped vtk object) wrappers in `args` and `kwargs`. E.g., ``xargs=(0, 2, 'key1')`` unwrap positional arguments in positions 0 and 2, and keyword arg 'key1'. Parameters ---------- args : tuple Function args. kwargs : dict Keyword args. xargs : sequence of int and str Positional indices (integers) and keys as strings (for keyword args) to unwrap. If not specified, try to unwrap all arguments. If ``skip == True``, unwrap all arguments except these ones. skip : bool, optional Unwrap all arguments except those in `wrap_args`. Default is False. Returns ------- unwrapped_args : args Return args with unwrapped vtk objects. unwrapped_kwargs: kwargs Return keyword args with unwrapped vtk objects. """ dv = False if not isinstance(vtype, dict): if vtype in [True, None]: dv = None vtype = {} list_args = list(range(len(args))) + list(kwargs.keys()) if len(xargs) == 0: xargs = list_args if skip: xargs = [a for a in list_args if a not in xargs] new_args = list(args) for (i, a) in enumerate(new_args): if i in xargs: try: new_args[i] = unwrap_vtk(a, vtype=vtype.get(i, dv)) except: pass new_args[i] = a for (k, v) in kwargs.items(): if k in xargs: try: kwargs[k] = unwrap_vtk(v, vtype=vtype.get(k, dv)) except: pass kwargs[k] = v return (new_args, kwargs)
BrainSpace
positive
def start_response(self, status, response_headers): self.response_status = int(status.split(' ')[0]) for (name, value) in response_headers: <DeepExtract> name = '-'.join([w.lower().capitalize() for w in name.split('-')]) </DeepExtract> self.response_headers[name] = value.strip() self.start_response_called = True
def start_response(self, status, response_headers): self.response_status = int(status.split(' ')[0]) for (name, value) in response_headers: name = '-'.join([w.lower().capitalize() for w in name.split('-')]) self.response_headers[name] = value.strip() self.start_response_called = True
couchdbkit
positive
def __init__(self): self.__rootMo = topRoot(Dn()) self.__classIndex = dict() self.__dnIndex = dict() self.__deletedIndex = dict() <DeepExtract> self.__updateClassIndex(self.__rootMo) self.__updateDnIndex(self.__rootMo) if None and None.status.deleted: self.__rootMo.delete() if self.__rootMo.status.deleted: self.__deletedIndex[self.__rootMo.dn] = self.__rootMo </DeepExtract> self.__index = 0
def __init__(self): self.__rootMo = topRoot(Dn()) self.__classIndex = dict() self.__dnIndex = dict() self.__deletedIndex = dict() self.__updateClassIndex(self.__rootMo) self.__updateDnIndex(self.__rootMo) if None and None.status.deleted: self.__rootMo.delete() if self.__rootMo.status.deleted: self.__deletedIndex[self.__rootMo.dn] = self.__rootMo self.__index = 0
cobra
positive
def bboxes_iou(boxes1, boxes2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-07): """ box1 is N,4 xyxy format box2 is N,4 xyxy format """ n = boxes1.shape[0] res = torch.zeros([n, boxes2.shape[0]]) for i in range(n): box1 = boxes1[i] <DeepExtract> boxes2 = boxes2.T if x1y1x2y2: (b1_x1, b1_y1, b1_x2, b1_y2) = (box1[0], box1[1], box1[2], box1[3]) (b2_x1, b2_y1, b2_x2, b2_y2) = (boxes2[0], boxes2[1], boxes2[2], boxes2[3]) else: (b1_x1, b1_x2) = (box1[0] - box1[2] / 2, box1[0] + box1[2] / 2) (b1_y1, b1_y2) = (box1[1] - box1[3] / 2, box1[1] + box1[3] / 2) (b2_x1, b2_x2) = (boxes2[0] - boxes2[2] / 2, boxes2[0] + boxes2[2] / 2) (b2_y1, b2_y2) = (boxes2[1] - boxes2[3] / 2, boxes2[1] + boxes2[3] / 2) inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) (w1, h1) = (b1_x2 - b1_x1, b1_y2 - b1_y1 + eps) (w2, h2) = (b2_x2 - b2_x1, b2_y2 - b2_y1 + eps) union = w1 * h1 + w2 * h2 - inter + eps iou = inter / union if GIoU or DIoU or CIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) if CIoU or DIoU: c2 = cw ** 2 + ch ** 2 + eps rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 if DIoU: a = iou - rho2 / c2 elif CIoU: v = 4 / math.pi ** 2 * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) a = iou - (rho2 / c2 + v * alpha) else: c_area = cw * ch + eps a = iou - (c_area - union) / c_area else: a = iou </DeepExtract> res[i, :] = a return res
def bboxes_iou(boxes1, boxes2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-07): """ box1 is N,4 xyxy format box2 is N,4 xyxy format """ n = boxes1.shape[0] res = torch.zeros([n, boxes2.shape[0]]) for i in range(n): box1 = boxes1[i] boxes2 = boxes2.T if x1y1x2y2: (b1_x1, b1_y1, b1_x2, b1_y2) = (box1[0], box1[1], box1[2], box1[3]) (b2_x1, b2_y1, b2_x2, b2_y2) = (boxes2[0], boxes2[1], boxes2[2], boxes2[3]) else: (b1_x1, b1_x2) = (box1[0] - box1[2] / 2, box1[0] + box1[2] / 2) (b1_y1, b1_y2) = (box1[1] - box1[3] / 2, box1[1] + box1[3] / 2) (b2_x1, b2_x2) = (boxes2[0] - boxes2[2] / 2, boxes2[0] + boxes2[2] / 2) (b2_y1, b2_y2) = (boxes2[1] - boxes2[3] / 2, boxes2[1] + boxes2[3] / 2) inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) (w1, h1) = (b1_x2 - b1_x1, b1_y2 - b1_y1 + eps) (w2, h2) = (b2_x2 - b2_x1, b2_y2 - b2_y1 + eps) union = w1 * h1 + w2 * h2 - inter + eps iou = inter / union if GIoU or DIoU or CIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) if CIoU or DIoU: c2 = cw ** 2 + ch ** 2 + eps rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 if DIoU: a = iou - rho2 / c2 elif CIoU: v = 4 / math.pi ** 2 * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) a = iou - (rho2 / c2 + v * alpha) else: c_area = cw * ch + eps a = iou - (c_area - union) / c_area else: a = iou res[i, :] = a return res
alfred
positive
def centroid_box_iou(box1, box2): def _interval_overlap(interval_a, interval_b): (x1, x2) = interval_a (x3, x4) = interval_b if x3 < x1: if x4 < x1: return 0 else: return min(x2, x4) - x1 elif x2 < x3: return 0 else: return min(x2, x4) - x3 (_, _, w1, h1) = box1.reshape(-1) (_, _, w2, h2) = box2.reshape(-1) (x1_min, y1_min, x1_max, y1_max) = to_minmax(box1.reshape(-1, 4)).reshape(-1) (x2_min, y2_min, x2_max, y2_max) = to_minmax(box2.reshape(-1, 4)).reshape(-1) <DeepExtract> (x1, x2) = [x1_min, x1_max] (x3, x4) = [x2_min, x2_max] if x3 < x1: if x4 < x1: intersect_w = 0 else: intersect_w = min(x2, x4) - x1 elif x2 < x3: intersect_w = 0 else: intersect_w = min(x2, x4) - x3 </DeepExtract> <DeepExtract> (x1, x2) = [y1_min, y1_max] (x3, x4) = [y2_min, y2_max] if x3 < x1: if x4 < x1: intersect_h = 0 else: intersect_h = min(x2, x4) - x1 elif x2 < x3: intersect_h = 0 else: intersect_h = min(x2, x4) - x3 </DeepExtract> intersect = intersect_w * intersect_h union = w1 * h1 + w2 * h2 - intersect return float(intersect) / union
def centroid_box_iou(box1, box2): def _interval_overlap(interval_a, interval_b): (x1, x2) = interval_a (x3, x4) = interval_b if x3 < x1: if x4 < x1: return 0 else: return min(x2, x4) - x1 elif x2 < x3: return 0 else: return min(x2, x4) - x3 (_, _, w1, h1) = box1.reshape(-1) (_, _, w2, h2) = box2.reshape(-1) (x1_min, y1_min, x1_max, y1_max) = to_minmax(box1.reshape(-1, 4)).reshape(-1) (x2_min, y2_min, x2_max, y2_max) = to_minmax(box2.reshape(-1, 4)).reshape(-1) (x1, x2) = [x1_min, x1_max] (x3, x4) = [x2_min, x2_max] if x3 < x1: if x4 < x1: intersect_w = 0 else: intersect_w = min(x2, x4) - x1 elif x2 < x3: intersect_w = 0 else: intersect_w = min(x2, x4) - x3 (x1, x2) = [y1_min, y1_max] (x3, x4) = [y2_min, y2_max] if x3 < x1: if x4 < x1: intersect_h = 0 else: intersect_h = min(x2, x4) - x1 elif x2 < x3: intersect_h = 0 else: intersect_h = min(x2, x4) - x3 intersect = intersect_w * intersect_h union = w1 * h1 + w2 * h2 - intersect return float(intersect) / union
aXeleRate
positive
def init_dataset(self): opts = self.opts if opts.pascal_class == 'bird': self.pascal_dataloader = cub_data.cub_dataloader(opts) self.all_dataloloader = cub_data.cub_dataloader(opts) else: opts_copy = copy.deepcopy(opts) opts_copy.dl_out_imnet = False self.pascal_dataloader = pascal_data.pascal_dataloader(opts_copy) self.all_dataloloader = pascal_data.pascal_dataloader(opts) self.dataloader = self.all_dataloloader self.resnet_transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if opts.warmup_semi_supv > 0: self.dataloader = self.pascal_dataloader self.kp_perm = self.pascal_dataloader.dataset.kp_perm self.kp_perm = torch.LongTensor(self.kp_perm) <DeepExtract> opts = self.opts (model_dir, self.mean_shape, self.mean_shape_np) = model_utils.load_template_shapes(opts) (dpm, parts_data, self.kp_vertex_ids) = model_utils.init_dpm(self.dataloader.dataset.kp_names, model_dir, self.mean_shape, opts.parts_file) opts.nparts = self.mean_shape['alpha'].shape[1] (self.part_active_state, self.part_axis_init, self.part_perm) = model_utils.load_active_parts(model_dir, self.save_dir, dpm, parts_data, suffix='') return </DeepExtract> <DeepExtract> opts = self.opts self.model_obj = pymesh.form_mesh(self.mean_shape['verts'].data.cpu().numpy(), self.mean_shape['faces'].data.cpu().numpy()) model_obj_dir = osp.join(self.save_dir, 'model') visutil.mkdir(model_obj_dir) self.model_obj_path = osp.join(model_obj_dir, 'mean_{}.obj'.format(opts.pascal_class)) pymesh.meshio.save_mesh(self.model_obj_path, self.model_obj) nkps = len(self.kp_vertex_ids) self.keypoint_cmap = [cm(i * 255 // nkps) for i in range(nkps)] faces_np = self.mean_shape['faces'].data.cpu().numpy() verts_np = self.mean_shape['sphere_verts'].data.cpu().numpy() uv_sampler = mesh.compute_uvsampler(verts_np, faces_np, tex_size=opts.tex_size) uv_sampler = torch.from_numpy(uv_sampler).float().cuda() self.uv_sampler = uv_sampler.view(-1, len(faces_np), opts.tex_size * opts.tex_size, 2) self.verts_uv = self.mean_shape['uv_verts'] self.verts_obj = self.mean_shape['verts'] vis_rend = bird_vis.VisRenderer(opts.img_size, faces_np) self.visdom_renderer = visdom_render.VisdomRenderer(vis_rend, self.verts_obj, self.uv_sampler, self.offset_z, self.mean_shape_np, self.model_obj_path, self.keypoint_cmap, self.opts) vis_rend = bird_vis.VisRenderer(opts.img_size, faces_np) renderer_no_light = visdom_render.VisdomRenderer(vis_rend, self.verts_obj, self.uv_sampler, self.offset_z, self.mean_shape_np, self.model_obj_path, self.keypoint_cmap, self.opts) renderer_no_light.vis_rend.set_light_status(False) renderer_no_light.vis_rend.set_bgcolor((255, 255, 255)) self.renderer_no_light = renderer_no_light self.sphere_uv_img = scipy.misc.imread(osp.join(opts.cachedir, 'color_maps', 'sphere.png')) self.sphere_uv_img = torch.FloatTensor(self.sphere_uv_img) / 255 self.sphere_uv_img = self.sphere_uv_img.permute(2, 0, 1) return </DeepExtract> return
def init_dataset(self): opts = self.opts if opts.pascal_class == 'bird': self.pascal_dataloader = cub_data.cub_dataloader(opts) self.all_dataloloader = cub_data.cub_dataloader(opts) else: opts_copy = copy.deepcopy(opts) opts_copy.dl_out_imnet = False self.pascal_dataloader = pascal_data.pascal_dataloader(opts_copy) self.all_dataloloader = pascal_data.pascal_dataloader(opts) self.dataloader = self.all_dataloloader self.resnet_transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if opts.warmup_semi_supv > 0: self.dataloader = self.pascal_dataloader self.kp_perm = self.pascal_dataloader.dataset.kp_perm self.kp_perm = torch.LongTensor(self.kp_perm) opts = self.opts (model_dir, self.mean_shape, self.mean_shape_np) = model_utils.load_template_shapes(opts) (dpm, parts_data, self.kp_vertex_ids) = model_utils.init_dpm(self.dataloader.dataset.kp_names, model_dir, self.mean_shape, opts.parts_file) opts.nparts = self.mean_shape['alpha'].shape[1] (self.part_active_state, self.part_axis_init, self.part_perm) = model_utils.load_active_parts(model_dir, self.save_dir, dpm, parts_data, suffix='') return opts = self.opts self.model_obj = pymesh.form_mesh(self.mean_shape['verts'].data.cpu().numpy(), self.mean_shape['faces'].data.cpu().numpy()) model_obj_dir = osp.join(self.save_dir, 'model') visutil.mkdir(model_obj_dir) self.model_obj_path = osp.join(model_obj_dir, 'mean_{}.obj'.format(opts.pascal_class)) pymesh.meshio.save_mesh(self.model_obj_path, self.model_obj) nkps = len(self.kp_vertex_ids) self.keypoint_cmap = [cm(i * 255 // nkps) for i in range(nkps)] faces_np = self.mean_shape['faces'].data.cpu().numpy() verts_np = self.mean_shape['sphere_verts'].data.cpu().numpy() uv_sampler = mesh.compute_uvsampler(verts_np, faces_np, tex_size=opts.tex_size) uv_sampler = torch.from_numpy(uv_sampler).float().cuda() self.uv_sampler = uv_sampler.view(-1, len(faces_np), opts.tex_size * opts.tex_size, 2) self.verts_uv = self.mean_shape['uv_verts'] self.verts_obj = self.mean_shape['verts'] vis_rend = bird_vis.VisRenderer(opts.img_size, faces_np) self.visdom_renderer = visdom_render.VisdomRenderer(vis_rend, self.verts_obj, self.uv_sampler, self.offset_z, self.mean_shape_np, self.model_obj_path, self.keypoint_cmap, self.opts) vis_rend = bird_vis.VisRenderer(opts.img_size, faces_np) renderer_no_light = visdom_render.VisdomRenderer(vis_rend, self.verts_obj, self.uv_sampler, self.offset_z, self.mean_shape_np, self.model_obj_path, self.keypoint_cmap, self.opts) renderer_no_light.vis_rend.set_light_status(False) renderer_no_light.vis_rend.set_bgcolor((255, 255, 255)) self.renderer_no_light = renderer_no_light self.sphere_uv_img = scipy.misc.imread(osp.join(opts.cachedir, 'color_maps', 'sphere.png')) self.sphere_uv_img = torch.FloatTensor(self.sphere_uv_img) / 255 self.sphere_uv_img = self.sphere_uv_img.permute(2, 0, 1) return return
acsm
positive
def test_array_to_event(): test_cases = [([], None, []), ([0], None, []), ([1], None, [[0, 0]]), ([0, 0, 1, 1], None, [[2, 3]]), ([1, 1, 1, 0, 0], None, [[0, 2]]), ([0, 0, 1, 1, 1, 0, 0, 0], None, [[2, 4]]), ([1, 1, 1, 0, 1, 1], None, [[0, 2], [4, 5]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1], None, [[2, 4], [8, 9], [11, 11]]), ([], 3, []), ([0], 3, []), ([1], 3, [[0, 0]]), ([0, 0, 1, 1], 3, [[2, 3]]), ([0, 0, 1, 1], 1, [[2, 2], [3, 3]]), ([1, 1, 1, 0, 0], 3, [[0, 2]]), ([1, 1, 1, 1, 0, 0], 4, [[0, 3]]), ([1, 1, 1, 1, 0, 0], 3, [[0, 2], [3, 3]]), ([1, 1, 1, 1, 0, 0], 2, [[0, 1], [2, 3]]), ([1, 1, 1, 1, 0, 0], 1, [[0, 0], [1, 1], [2, 2], [3, 3]]), ([0, 0, 1, 1, 1, 0, 0, 0], 4, [[2, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 3, [[2, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 2, [[2, 3], [4, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 1, [[2, 2], [3, 3], [4, 4]]), ([0, 1, 1, 1, 1, 1, 0, 0, 0], 3, [[1, 3], [4, 5]]), ([1, 1, 1, 0, 1, 1], 4, [[0, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 3, [[0, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 2, [[0, 1], [2, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 1, [[0, 0], [1, 1], [2, 2], [4, 4], [5, 5]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1], 3, [[2, 4], [8, 9], [11, 11]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], 3, [[2, 4], [8, 10], [11, 13], [14, 15], [17, 17]]), ([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 3, [[0, 2], [3, 5], [6, 8], [9, 9]])] for c in test_cases: <DeepExtract> event = [] c[0] = copy.deepcopy(c[0]) c[0].insert(0, 0) if c[1] is not None and c[1] < 1: c[1] = None for i in range(len(c[0]) - 1): a_i1 = c[0][i + 1] diff = a_i1 - c[0][i] if diff == 1: event.append([i, i]) if c[1] == 1: c[0][i + 1] = 0 elif diff == 0: if a_i1 == 1: event[-1][1] = i if c[1] is not None and i - event[-1][0] + 1 >= c[1]: c[0][i + 1] = 0 output_c = event </DeepExtract> if output_c == c[2]: print('pass') else: print('WRONG!') print('Input: %r max_len=%r' % (c[0], c[1])) print('Output: %r' % array_to_event(c[0], max_len=c[1])) print('Desired output: %r' % c[2]) print('-' * 50)
def test_array_to_event(): test_cases = [([], None, []), ([0], None, []), ([1], None, [[0, 0]]), ([0, 0, 1, 1], None, [[2, 3]]), ([1, 1, 1, 0, 0], None, [[0, 2]]), ([0, 0, 1, 1, 1, 0, 0, 0], None, [[2, 4]]), ([1, 1, 1, 0, 1, 1], None, [[0, 2], [4, 5]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1], None, [[2, 4], [8, 9], [11, 11]]), ([], 3, []), ([0], 3, []), ([1], 3, [[0, 0]]), ([0, 0, 1, 1], 3, [[2, 3]]), ([0, 0, 1, 1], 1, [[2, 2], [3, 3]]), ([1, 1, 1, 0, 0], 3, [[0, 2]]), ([1, 1, 1, 1, 0, 0], 4, [[0, 3]]), ([1, 1, 1, 1, 0, 0], 3, [[0, 2], [3, 3]]), ([1, 1, 1, 1, 0, 0], 2, [[0, 1], [2, 3]]), ([1, 1, 1, 1, 0, 0], 1, [[0, 0], [1, 1], [2, 2], [3, 3]]), ([0, 0, 1, 1, 1, 0, 0, 0], 4, [[2, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 3, [[2, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 2, [[2, 3], [4, 4]]), ([0, 0, 1, 1, 1, 0, 0, 0], 1, [[2, 2], [3, 3], [4, 4]]), ([0, 1, 1, 1, 1, 1, 0, 0, 0], 3, [[1, 3], [4, 5]]), ([1, 1, 1, 0, 1, 1], 4, [[0, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 3, [[0, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 2, [[0, 1], [2, 2], [4, 5]]), ([1, 1, 1, 0, 1, 1], 1, [[0, 0], [1, 1], [2, 2], [4, 4], [5, 5]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1], 3, [[2, 4], [8, 9], [11, 11]]), ([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], 3, [[2, 4], [8, 10], [11, 13], [14, 15], [17, 17]]), ([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 3, [[0, 2], [3, 5], [6, 8], [9, 9]])] for c in test_cases: event = [] c[0] = copy.deepcopy(c[0]) c[0].insert(0, 0) if c[1] is not None and c[1] < 1: c[1] = None for i in range(len(c[0]) - 1): a_i1 = c[0][i + 1] diff = a_i1 - c[0][i] if diff == 1: event.append([i, i]) if c[1] == 1: c[0][i + 1] = 0 elif diff == 0: if a_i1 == 1: event[-1][1] = i if c[1] is not None and i - event[-1][0] + 1 >= c[1]: c[0][i + 1] = 0 output_c = event if output_c == c[2]: print('pass') else: print('WRONG!') print('Input: %r max_len=%r' % (c[0], c[1])) print('Output: %r' % array_to_event(c[0], max_len=c[1])) print('Desired output: %r' % c[2]) print('-' * 50)
deep-smoke-machine
positive
def cluster(new_articles, snip=True): """ Clusters a list of Articles into Events. Set `snip=False` if you do not want the events and stories generated. """ if os.path.exists(LOCK): raise LockException('The hierarchy is locked.') open(LOCK, 'a').close() try: <DeepExtract> PATH = os.path.expanduser(conf['hierarchy_path']) if os.path.exists(PATH): h = Hierarchy.load(PATH) else: h = Hierarchy(metric=conf['metric'], lower_limit_scale=conf['lower_limit_scale'], upper_limit_scale=conf['upper_limit_scale']) </DeepExtract> <DeepExtract> (pub_vecs, bow_vecs, con_vecs) = ([], [], []) for a in new_articles: pub_vecs.append(np.array([a.published])) bow_vecs.append(vectorize(a.text)) con_vecs.append(concept_vectorize([c.slug for c in a.concepts])) pub_vecs = normalize(csr_matrix(pub_vecs), copy=False) bow_vecs = normalize(csr_matrix(bow_vecs), copy=False) con_vecs = normalize(csr_matrix(con_vecs), copy=False) vecs = hstack([pub_vecs, bow_vecs, con_vecs]) vecs = vecs.tolil() vecs[:, 0] *= conf['weights'][0] vecs[:, 1:101] *= conf['weights'][1] vecs[:, 101:] *= conf['weights'][2] vecs = vecs.toarray() </DeepExtract> node_ids = h.fit(vecs) for (i, a) in enumerate(new_articles): a.node_id = int(node_ids[i]) db.session.commit() h.save(os.path.expanduser(conf['hierarchy_path'])) if snip: <DeepExtract> h = h event_clusters = h.clusters(distance_threshold=conf['event_threshold'], with_labels=False) story_clusters = h.clusters(distance_threshold=conf['story_threshold'], with_labels=False) event_clusters = [clus for clus in event_clusters if len(clus) >= conf['min_articles']] process_events(h, event_clusters) story_clusters_ = [] for clus in story_clusters: events = [] processed_articles = [] for a_id in clus: a_id = a_id.item() if a_id not in processed_articles: a = Article.query.filter_by(node_id=a_id).first() if a.events: e = Article.query.filter_by(node_id=a_id).first().events[0] processed_articles += [a.node_id for a in e.articles] events.append(e.id) else: processed_articles.append(a_id) story_clusters_.append(events) story_clusters = [clus for clus in story_clusters_ if len(clus) >= conf['min_events']] process_stories(story_clusters) </DeepExtract> finally: os.remove(LOCK)
def cluster(new_articles, snip=True): """ Clusters a list of Articles into Events. Set `snip=False` if you do not want the events and stories generated. """ if os.path.exists(LOCK): raise LockException('The hierarchy is locked.') open(LOCK, 'a').close() try: PATH = os.path.expanduser(conf['hierarchy_path']) if os.path.exists(PATH): h = Hierarchy.load(PATH) else: h = Hierarchy(metric=conf['metric'], lower_limit_scale=conf['lower_limit_scale'], upper_limit_scale=conf['upper_limit_scale']) (pub_vecs, bow_vecs, con_vecs) = ([], [], []) for a in new_articles: pub_vecs.append(np.array([a.published])) bow_vecs.append(vectorize(a.text)) con_vecs.append(concept_vectorize([c.slug for c in a.concepts])) pub_vecs = normalize(csr_matrix(pub_vecs), copy=False) bow_vecs = normalize(csr_matrix(bow_vecs), copy=False) con_vecs = normalize(csr_matrix(con_vecs), copy=False) vecs = hstack([pub_vecs, bow_vecs, con_vecs]) vecs = vecs.tolil() vecs[:, 0] *= conf['weights'][0] vecs[:, 1:101] *= conf['weights'][1] vecs[:, 101:] *= conf['weights'][2] vecs = vecs.toarray() node_ids = h.fit(vecs) for (i, a) in enumerate(new_articles): a.node_id = int(node_ids[i]) db.session.commit() h.save(os.path.expanduser(conf['hierarchy_path'])) if snip: h = h event_clusters = h.clusters(distance_threshold=conf['event_threshold'], with_labels=False) story_clusters = h.clusters(distance_threshold=conf['story_threshold'], with_labels=False) event_clusters = [clus for clus in event_clusters if len(clus) >= conf['min_articles']] process_events(h, event_clusters) story_clusters_ = [] for clus in story_clusters: events = [] processed_articles = [] for a_id in clus: a_id = a_id.item() if a_id not in processed_articles: a = Article.query.filter_by(node_id=a_id).first() if a.events: e = Article.query.filter_by(node_id=a_id).first().events[0] processed_articles += [a.node_id for a in e.articles] events.append(e.id) else: processed_articles.append(a_id) story_clusters_.append(events) story_clusters = [clus for clus in story_clusters_ if len(clus) >= conf['min_events']] process_stories(story_clusters) finally: os.remove(LOCK)
argos
positive
def test_version_for_specified_package_not_found(mocker): dummy_packages = [{'name': 'package1', 'version': '1.2.3', 'releaseVersion': 0}, {'name': 'package2', 'version': '1.2.4', 'releaseVersion': 0}, {'name': 'package1', 'version': '1.2.5', 'releaseVersion': 0}] <DeepExtract> pm = PackageManager() pm._get_packages = mocker.MagicMock(return_value=dummy_packages) pm = pm </DeepExtract> versions = pm.get_package_versions(package_name='package_not_found') assert versions == []
def test_version_for_specified_package_not_found(mocker): dummy_packages = [{'name': 'package1', 'version': '1.2.3', 'releaseVersion': 0}, {'name': 'package2', 'version': '1.2.4', 'releaseVersion': 0}, {'name': 'package1', 'version': '1.2.5', 'releaseVersion': 0}] pm = PackageManager() pm._get_packages = mocker.MagicMock(return_value=dummy_packages) pm = pm versions = pm.get_package_versions(package_name='package_not_found') assert versions == []
dcos-jenkins-service
positive
def __init__(self, modeldir=None, segarch=None, segvocab=None, segsizes=None, segdiv=None, epoch=None): if modeldir == None: modeldir = 'datasets/segmodel' if segvocab == None: segvocab = 'baseline' if segarch == None: segarch = ('resnet50_dilated8', 'ppm_bilinear_deepsup') if segdiv == None: segdiv = 'undivided' elif isinstance(segarch, str): segarch = segarch.split(',') <DeepExtract> segmodel_dir = 'datasets/segmodel/%s-%s-%s' % ((segvocab,) + segarch) with open(os.path.join(segmodel_dir, 'labels.json')) as f: labeldata = EasyDict(json.load(f)) if epoch is None: choices = [os.path.basename(n)[14:-4] for n in glob.glob(os.path.join(segmodel_dir, 'encoder_epoch_*.pth'))] epoch = max([int(c) for c in choices if c.isdigit()]) segbuilder = segmodel_module.ModelBuilder() seg_encoder = segbuilder.build_encoder(arch=segarch[0], weights=os.path.join(segmodel_dir, 'encoder_epoch_%d.pth' % epoch)) seg_decoder = segbuilder.build_decoder(arch=segarch[1], use_softmax=True, num_class=len(labeldata.labels), weights=os.path.join(segmodel_dir, 'decoder_epoch_%d.pth' % epoch)) segmodel = segmodel_module.SegmentationModule(seg_encoder, seg_decoder, torch.nn.NLLLoss(ignore_index=-1)) segmodel.categories = [cat.name for cat in labeldata.categories] segmodel.labels = [label.name for label in labeldata.labels] categories = OrderedDict() label_category = numpy.zeros(len(segmodel.labels), dtype=int) for (i, label) in enumerate(labeldata.labels): label_category[i] = segmodel.categories.index(label.category) segmodel.meta = labeldata segmodel.eval() segmodel = segmodel </DeepExtract> if segsizes is None: segsizes = getattr(segmodel.meta, 'segsizes', [256]) self.segsizes = segsizes assert len(segmodel.meta.labels) == list((c for c in segmodel.modules() if isinstance(c, torch.nn.Conv2d)))[-1].out_channels segmodel.cuda() self.segmodel = segmodel self.segdiv = segdiv self.bgr = segmodel.meta.imageformat.byteorder == 'BGR' self.imagemean = torch.tensor(segmodel.meta.imageformat.mean) self.imagestd = torch.tensor(segmodel.meta.imageformat.stdev) self.labelmap = {'-': 0} self.channelmap = {'-': []} self.labels = [('-', '-')] num_labels = 1 self.num_underlying_classes = len(segmodel.meta.labels) for (i, label) in enumerate(segmodel.meta.labels): if label.name not in self.channelmap: self.channelmap[label.name] = [] self.channelmap[label.name].append(i) if getattr(label, 'internal', None) or label.name in self.labelmap: continue self.labelmap[label.name] = num_labels num_labels += 1 self.labels.append((label.name, label.category)) self.category_indexes = {category.name: [i for (i, label) in enumerate(segmodel.meta.labels) if label.category == category.name] for category in segmodel.meta.categories} self.catindexmap = {} for (catname, indexlist) in self.category_indexes.items(): for (index, i) in enumerate(indexlist): self.catindexmap[segmodel.meta.labels[i].name] = (catname, index) self.category_map = {catname: torch.tensor([self.labelmap.get(segmodel.meta.labels[ind].name, 0) for ind in catindex]) for (catname, catindex) in self.category_indexes.items()} self.category_rules = segmodel.meta.categories mult = 1 if self.segdiv == 'quad': mult = 5 suffixes = ['t', 'l', 'b', 'r'] divided_labels = [] for suffix in suffixes: divided_labels.extend([('%s-%s' % (label, suffix), cat) for (label, cat) in self.labels[1:]]) self.channelmap.update({'%s-%s' % (label, suffix): self.channelmap[label] for (label, cat) in self.labels[1:]}) self.labels.extend(divided_labels) self.channellist = [self.channelmap[name] for (name, _) in self.labels]
def __init__(self, modeldir=None, segarch=None, segvocab=None, segsizes=None, segdiv=None, epoch=None): if modeldir == None: modeldir = 'datasets/segmodel' if segvocab == None: segvocab = 'baseline' if segarch == None: segarch = ('resnet50_dilated8', 'ppm_bilinear_deepsup') if segdiv == None: segdiv = 'undivided' elif isinstance(segarch, str): segarch = segarch.split(',') segmodel_dir = 'datasets/segmodel/%s-%s-%s' % ((segvocab,) + segarch) with open(os.path.join(segmodel_dir, 'labels.json')) as f: labeldata = EasyDict(json.load(f)) if epoch is None: choices = [os.path.basename(n)[14:-4] for n in glob.glob(os.path.join(segmodel_dir, 'encoder_epoch_*.pth'))] epoch = max([int(c) for c in choices if c.isdigit()]) segbuilder = segmodel_module.ModelBuilder() seg_encoder = segbuilder.build_encoder(arch=segarch[0], weights=os.path.join(segmodel_dir, 'encoder_epoch_%d.pth' % epoch)) seg_decoder = segbuilder.build_decoder(arch=segarch[1], use_softmax=True, num_class=len(labeldata.labels), weights=os.path.join(segmodel_dir, 'decoder_epoch_%d.pth' % epoch)) segmodel = segmodel_module.SegmentationModule(seg_encoder, seg_decoder, torch.nn.NLLLoss(ignore_index=-1)) segmodel.categories = [cat.name for cat in labeldata.categories] segmodel.labels = [label.name for label in labeldata.labels] categories = OrderedDict() label_category = numpy.zeros(len(segmodel.labels), dtype=int) for (i, label) in enumerate(labeldata.labels): label_category[i] = segmodel.categories.index(label.category) segmodel.meta = labeldata segmodel.eval() segmodel = segmodel if segsizes is None: segsizes = getattr(segmodel.meta, 'segsizes', [256]) self.segsizes = segsizes assert len(segmodel.meta.labels) == list((c for c in segmodel.modules() if isinstance(c, torch.nn.Conv2d)))[-1].out_channels segmodel.cuda() self.segmodel = segmodel self.segdiv = segdiv self.bgr = segmodel.meta.imageformat.byteorder == 'BGR' self.imagemean = torch.tensor(segmodel.meta.imageformat.mean) self.imagestd = torch.tensor(segmodel.meta.imageformat.stdev) self.labelmap = {'-': 0} self.channelmap = {'-': []} self.labels = [('-', '-')] num_labels = 1 self.num_underlying_classes = len(segmodel.meta.labels) for (i, label) in enumerate(segmodel.meta.labels): if label.name not in self.channelmap: self.channelmap[label.name] = [] self.channelmap[label.name].append(i) if getattr(label, 'internal', None) or label.name in self.labelmap: continue self.labelmap[label.name] = num_labels num_labels += 1 self.labels.append((label.name, label.category)) self.category_indexes = {category.name: [i for (i, label) in enumerate(segmodel.meta.labels) if label.category == category.name] for category in segmodel.meta.categories} self.catindexmap = {} for (catname, indexlist) in self.category_indexes.items(): for (index, i) in enumerate(indexlist): self.catindexmap[segmodel.meta.labels[i].name] = (catname, index) self.category_map = {catname: torch.tensor([self.labelmap.get(segmodel.meta.labels[ind].name, 0) for ind in catindex]) for (catname, catindex) in self.category_indexes.items()} self.category_rules = segmodel.meta.categories mult = 1 if self.segdiv == 'quad': mult = 5 suffixes = ['t', 'l', 'b', 'r'] divided_labels = [] for suffix in suffixes: divided_labels.extend([('%s-%s' % (label, suffix), cat) for (label, cat) in self.labels[1:]]) self.channelmap.update({'%s-%s' % (label, suffix): self.channelmap[label] for (label, cat) in self.labels[1:]}) self.labels.extend(divided_labels) self.channellist = [self.channelmap[name] for (name, _) in self.labels]
dissect
positive
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:mstns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd" xmlns:None="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd" ', name_='filesType', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('filesType') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'filesType': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' <DeepExtract> if pretty_print: for idx in range(level): outfile.write(' ') </DeepExtract> outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '')) already_processed = set() <DeepExtract> if self.id is not None and 'id' not in already_processed: already_processed.add('id') outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')),)) if self.version is not None and 'version' not in already_processed: already_processed.add('version') outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')),)) if self.include is not None and 'include' not in already_processed: already_processed.add('include') outfile.write(' include=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.include), input_name='include')),)) if self.exclude is not None and 'exclude' not in already_processed: already_processed.add('exclude') outfile.write(' exclude=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.exclude), input_name='exclude')),)) </DeepExtract> if self.hasContent_(): outfile.write('>%s' % (eol_,)) <DeepExtract> pass </DeepExtract> <DeepExtract> if pretty_print: for idx in range(level): outfile.write(' ') </DeepExtract> outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_,))
def export(self, outfile, level, namespaceprefix_='', namespacedef_='xmlns:mstns="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd" xmlns:None="http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd" ', name_='filesType', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('filesType') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'filesType': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' if pretty_print: for idx in range(level): outfile.write(' ') outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '')) already_processed = set() if self.id is not None and 'id' not in already_processed: already_processed.add('id') outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')),)) if self.version is not None and 'version' not in already_processed: already_processed.add('version') outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')),)) if self.include is not None and 'include' not in already_processed: already_processed.add('include') outfile.write(' include=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.include), input_name='include')),)) if self.exclude is not None and 'exclude' not in already_processed: already_processed.add('exclude') outfile.write(' exclude=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.exclude), input_name='exclude')),)) if self.hasContent_(): outfile.write('>%s' % (eol_,)) pass if pretty_print: for idx in range(level): outfile.write(' ') outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_,))
autopkg
positive
@pytest.mark.parametrize('timestepper', [timesteppers.SBDF1]) @pytest.mark.parametrize('Nphi', [16]) @pytest.mark.parametrize('Nr', [15]) @pytest.mark.parametrize('dtype', [np.complex128, np.float64]) @pytest.mark.parametrize('dealias', [1, 3 / 2]) def test_annulus_AdvectiveCFL(Nr, Nphi, timestepper, dtype, dealias): radii = (0.5, 2) <DeepExtract> c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) dealias_tuple = (dealias, dealias) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii, dealias=dealias_tuple, dtype=dtype) grid_scale_tuple = (1, 1) (phi, r) = b.local_grids(grid_scale_tuple) (x, y) = c.cartesian(phi, r) (c, d, db, phi, r, x, y) = (c, d, b, phi, r, x, y) </DeepExtract> f = field.Field(name='f', dist=d, bases=(db,), dtype=dtype) f['g'] = x * y u = operators.Gradient(f, c).evaluate() cfl = operators.AdvectiveCFL(u, c) cfl_freq = cfl.evaluate()['g'] comparison_freq = np.abs(u['g'][0]) / cfl.cfl_spacing()[0] comparison_freq += np.abs(u['g'][1]) / cfl.cfl_spacing()[1] assert np.allclose(cfl_freq, comparison_freq)
@pytest.mark.parametrize('timestepper', [timesteppers.SBDF1]) @pytest.mark.parametrize('Nphi', [16]) @pytest.mark.parametrize('Nr', [15]) @pytest.mark.parametrize('dtype', [np.complex128, np.float64]) @pytest.mark.parametrize('dealias', [1, 3 / 2]) def test_annulus_AdvectiveCFL(Nr, Nphi, timestepper, dtype, dealias): radii = (0.5, 2) c = coords.PolarCoordinates('phi', 'r') d = distributor.Distributor((c,)) dealias_tuple = (dealias, dealias) b = basis.AnnulusBasis(c, (Nphi, Nr), radii=radii, dealias=dealias_tuple, dtype=dtype) grid_scale_tuple = (1, 1) (phi, r) = b.local_grids(grid_scale_tuple) (x, y) = c.cartesian(phi, r) (c, d, db, phi, r, x, y) = (c, d, b, phi, r, x, y) f = field.Field(name='f', dist=d, bases=(db,), dtype=dtype) f['g'] = x * y u = operators.Gradient(f, c).evaluate() cfl = operators.AdvectiveCFL(u, c) cfl_freq = cfl.evaluate()['g'] comparison_freq = np.abs(u['g'][0]) / cfl.cfl_spacing()[0] comparison_freq += np.abs(u['g'][1]) / cfl.cfl_spacing()[1] assert np.allclose(cfl_freq, comparison_freq)
dedalus
positive
def test_cis_xlsx_to_oscal_catalog_simulate(tmp_path: pathlib.Path): """Test simulate call.""" <DeepExtract> config = configparser.ConfigParser() config_path = pathlib.Path(ocp_config) config.read(config_path) section = config['task.cis-xlsx-to-oscal-catalog'] section['output-dir'] = str(tmp_path) section = section </DeepExtract> tgt = cis_xlsx_to_oscal_catalog.CisXlsxToOscalCatalog(section) retval = tgt.simulate() assert retval == TaskOutcome.SIM_SUCCESS assert len(os.listdir(str(tmp_path))) == 0
def test_cis_xlsx_to_oscal_catalog_simulate(tmp_path: pathlib.Path): """Test simulate call.""" config = configparser.ConfigParser() config_path = pathlib.Path(ocp_config) config.read(config_path) section = config['task.cis-xlsx-to-oscal-catalog'] section['output-dir'] = str(tmp_path) section = section tgt = cis_xlsx_to_oscal_catalog.CisXlsxToOscalCatalog(section) retval = tgt.simulate() assert retval == TaskOutcome.SIM_SUCCESS assert len(os.listdir(str(tmp_path))) == 0
compliance-trestle
positive
def on_options_changed(self): super().on_options_changed() <DeepExtract> self._shape_id = self.get_option_value('shape_type') if self._shape_id == 'rectangle' or self._shape_id == 'polygon': self._join_id = cairo.LineJoin.MITER else: self._join_id = cairo.LineJoin.ROUND </DeepExtract> <DeepExtract> self._filling_id = self.get_option_value('shape_filling') </DeepExtract> <DeepExtract> self._outline_id = self.get_option_value('shape_outline') </DeepExtract> if self._path is None: return <DeepExtract> pixelart_mode = self.get_image().is_zoomed_surface_sharp() operation = {'tool_id': self.id, 'rgba_main': self.main_color, 'rgba_secd': self.secondary_color, 'antialias': self._use_antialias, 'operator': self._operator, 'line_join': self._join_id, 'line_width': self.tool_width, 'filling': self._filling_id, 'outline': self._outline_id, 'smooth': self._shape_id == 'freeshape' and (not pixelart_mode), 'closed': True, 'path': self._path} operation = operation </DeepExtract> if self._shape_id in ['polygon', 'freeshape']: operation['closed'] = False <DeepExtract> cairo_context = self.start_tool_operation(operation) line_width = operation['line_width'] cairo_context.set_line_width(line_width) cairo_context.set_line_join(operation['line_join']) if operation['smooth']: utilities_smooth_path(cairo_context, operation['path']) else: cairo_context.append_path(operation['path']) if operation['closed']: cairo_context.close_path() cairo_context.set_operator(operation['operator']) color_main = operation['rgba_main'] color_secd = operation['rgba_secd'] filling = operation['filling'] if filling == 'secondary': self._fill_plain(cairo_context, color_secd) elif filling == 'filled': self._fill_plain(cairo_context, color_main) elif filling == 'h-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() pattern = self.get_pattern_h(x1, x2) self._fill_pattern(cairo_context, pattern, color_main, color_secd) elif filling == 'v-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() pattern = self.get_pattern_v(y1, y2) self._fill_pattern(cairo_context, pattern, color_main, color_secd) elif filling == 'r-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() ddx = abs(x1 - x2) / 2 ddy = abs(y1 - y2) / 2 center_x = min(x1, x2) + ddx center_y = min(y1, y2) + ddy rad = max(ddx, ddy) pattern = self.get_pattern_r(center_x, center_y, rad) self._fill_pattern(cairo_context, pattern, color_main, color_secd) else: pass outline = operation['outline'] cairo_context.set_source_rgba(*color_main) if outline == 'dashed': cairo_context.set_dash([2 * line_width, 2 * line_width]) if outline != 'none': cairo_context.stroke() cairo_context.new_path() </DeepExtract>
def on_options_changed(self): super().on_options_changed() self._shape_id = self.get_option_value('shape_type') if self._shape_id == 'rectangle' or self._shape_id == 'polygon': self._join_id = cairo.LineJoin.MITER else: self._join_id = cairo.LineJoin.ROUND self._filling_id = self.get_option_value('shape_filling') self._outline_id = self.get_option_value('shape_outline') if self._path is None: return pixelart_mode = self.get_image().is_zoomed_surface_sharp() operation = {'tool_id': self.id, 'rgba_main': self.main_color, 'rgba_secd': self.secondary_color, 'antialias': self._use_antialias, 'operator': self._operator, 'line_join': self._join_id, 'line_width': self.tool_width, 'filling': self._filling_id, 'outline': self._outline_id, 'smooth': self._shape_id == 'freeshape' and (not pixelart_mode), 'closed': True, 'path': self._path} operation = operation if self._shape_id in ['polygon', 'freeshape']: operation['closed'] = False cairo_context = self.start_tool_operation(operation) line_width = operation['line_width'] cairo_context.set_line_width(line_width) cairo_context.set_line_join(operation['line_join']) if operation['smooth']: utilities_smooth_path(cairo_context, operation['path']) else: cairo_context.append_path(operation['path']) if operation['closed']: cairo_context.close_path() cairo_context.set_operator(operation['operator']) color_main = operation['rgba_main'] color_secd = operation['rgba_secd'] filling = operation['filling'] if filling == 'secondary': self._fill_plain(cairo_context, color_secd) elif filling == 'filled': self._fill_plain(cairo_context, color_main) elif filling == 'h-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() pattern = self.get_pattern_h(x1, x2) self._fill_pattern(cairo_context, pattern, color_main, color_secd) elif filling == 'v-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() pattern = self.get_pattern_v(y1, y2) self._fill_pattern(cairo_context, pattern, color_main, color_secd) elif filling == 'r-gradient': (x1, y1, x2, y2) = cairo_context.path_extents() ddx = abs(x1 - x2) / 2 ddy = abs(y1 - y2) / 2 center_x = min(x1, x2) + ddx center_y = min(y1, y2) + ddy rad = max(ddx, ddy) pattern = self.get_pattern_r(center_x, center_y, rad) self._fill_pattern(cairo_context, pattern, color_main, color_secd) else: pass outline = operation['outline'] cairo_context.set_source_rgba(*color_main) if outline == 'dashed': cairo_context.set_dash([2 * line_width, 2 * line_width]) if outline != 'none': cairo_context.stroke() cairo_context.new_path() </DeepExtract>
drawing
positive
def _revlist_item_from_oid(repo, oid, require_meta): <DeepExtract> commit = cache_get_commit_item(oid, need_meta=require_meta) if commit and (not require_meta or isinstance(commit.meta, Metadata)): commit = commit it = repo.cat(hexlify(oid)) (_, typ, size) = next(it) assert typ == b'commit' commit = _commit_item_from_data(oid, b''.join(it)) if require_meta: meta = _find_treeish_oid_metadata(repo, commit.oid) if meta: commit = commit._replace(meta=meta) commit_key = b'itm:' + oid cache_notice(commit_key, commit, overwrite=True) commit = commit </DeepExtract> return RevList(oid=oid, meta=commit.meta)
def _revlist_item_from_oid(repo, oid, require_meta): commit = cache_get_commit_item(oid, need_meta=require_meta) if commit and (not require_meta or isinstance(commit.meta, Metadata)): commit = commit it = repo.cat(hexlify(oid)) (_, typ, size) = next(it) assert typ == b'commit' commit = _commit_item_from_data(oid, b''.join(it)) if require_meta: meta = _find_treeish_oid_metadata(repo, commit.oid) if meta: commit = commit._replace(meta=meta) commit_key = b'itm:' + oid cache_notice(commit_key, commit, overwrite=True) commit = commit return RevList(oid=oid, meta=commit.meta)
bup
positive
def Activated(self): import FreeCADGui sel = FreeCADGui.Selection.getSelection() if sel: <DeepExtract> import FreeCADGui from PySide import QtCore, QtGui mw = FreeCADGui.getMainWindow() if mw: st = mw.statusBar() statuswidget = st.findChild(QtGui.QToolBar, 'BIMStatusWidget') if statuswidget: nudgeValue = statuswidget.nudge.text().replace('&', '') dist = 0 if 'auto' in nudgeValue.lower(): unit = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Units').GetInt('UserSchema', 0) if unit in [2, 3, 5, 7]: scale = [1.5875, 3.175, 6.35, 25.4, 152.4, 304.8] else: scale = [1, 5, 10, 50, 100, 500] viewsize = FreeCADGui.ActiveDocument.ActiveView.getCameraNode().getViewVolume().getWidth() if viewsize < 250: dist = scale[0] elif viewsize < 750: dist = scale[1] elif viewsize < 4500: dist = scale[2] elif viewsize < 8000: dist = scale[3] elif viewsize < 25000: dist = scale[4] else: dist = scale[5] statuswidget.nudge.setText(translate('BIM', 'Auto')) else: try: dist = FreeCAD.Units.Quantity(nudgeValue) except ValueError: try: dist = float(nudgeValue) except ValueError: nudge = None else: dist = dist.Value if not dist: nudge = None if 'right' == 'dist': nudge = dist elif 'right' == 'up': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.v).multiply(dist) elif 'right' == 'down': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.v).negative().multiply(dist) elif 'right' == 'right': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.u).multiply(dist) elif 'right' == 'left': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.u).negative().multiply(dist) nudge = None </DeepExtract> if nudge: FreeCADGui.addModule('Draft') FreeCADGui.doCommand('Draft.move(' + self.toStr(sel) + ',FreeCAD.' + str(nudge) + ')') FreeCADGui.doCommand('FreeCAD.ActiveDocument.recompute()')
def Activated(self): import FreeCADGui sel = FreeCADGui.Selection.getSelection() if sel: import FreeCADGui from PySide import QtCore, QtGui mw = FreeCADGui.getMainWindow() if mw: st = mw.statusBar() statuswidget = st.findChild(QtGui.QToolBar, 'BIMStatusWidget') if statuswidget: nudgeValue = statuswidget.nudge.text().replace('&', '') dist = 0 if 'auto' in nudgeValue.lower(): unit = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Units').GetInt('UserSchema', 0) if unit in [2, 3, 5, 7]: scale = [1.5875, 3.175, 6.35, 25.4, 152.4, 304.8] else: scale = [1, 5, 10, 50, 100, 500] viewsize = FreeCADGui.ActiveDocument.ActiveView.getCameraNode().getViewVolume().getWidth() if viewsize < 250: dist = scale[0] elif viewsize < 750: dist = scale[1] elif viewsize < 4500: dist = scale[2] elif viewsize < 8000: dist = scale[3] elif viewsize < 25000: dist = scale[4] else: dist = scale[5] statuswidget.nudge.setText(translate('BIM', 'Auto')) else: try: dist = FreeCAD.Units.Quantity(nudgeValue) except ValueError: try: dist = float(nudgeValue) except ValueError: nudge = None else: dist = dist.Value if not dist: nudge = None if 'right' == 'dist': nudge = dist elif 'right' == 'up': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.v).multiply(dist) elif 'right' == 'down': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.v).negative().multiply(dist) elif 'right' == 'right': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.u).multiply(dist) elif 'right' == 'left': nudge = FreeCAD.Vector(FreeCAD.DraftWorkingPlane.u).negative().multiply(dist) nudge = None if nudge: FreeCADGui.addModule('Draft') FreeCADGui.doCommand('Draft.move(' + self.toStr(sel) + ',FreeCAD.' + str(nudge) + ')') FreeCADGui.doCommand('FreeCAD.ActiveDocument.recompute()')
BIM_Workbench
positive
def unfreeze_path(self, t, Path): for i in range(self.M): <DeepExtract> if i in Path[0, :] and i not in self.bestPath[0:t, 0, :]: HATutils.set_req_grad(self.conv1[i], True) else: HATutils.set_req_grad(self.conv1[i], False) return </DeepExtract> <DeepExtract> if i in Path[1, :] and i not in self.bestPath[0:t, 1, :]: HATutils.set_req_grad(self.conv2[i], True) else: HATutils.set_req_grad(self.conv2[i], False) return </DeepExtract> <DeepExtract> if i in Path[2, :] and i not in self.bestPath[0:t, 2, :]: HATutils.set_req_grad(self.conv3[i], True) else: HATutils.set_req_grad(self.conv3[i], False) return </DeepExtract> <DeepExtract> if i in Path[3, :] and i not in self.bestPath[0:t, 3, :]: HATutils.set_req_grad(self.fc1[i], True) else: HATutils.set_req_grad(self.fc1[i], False) return </DeepExtract> <DeepExtract> if i in Path[4, :] and i not in self.bestPath[0:t, 4, :]: HATutils.set_req_grad(self.fc2[i], True) else: HATutils.set_req_grad(self.fc2[i], False) return </DeepExtract> return
def unfreeze_path(self, t, Path): for i in range(self.M): if i in Path[0, :] and i not in self.bestPath[0:t, 0, :]: HATutils.set_req_grad(self.conv1[i], True) else: HATutils.set_req_grad(self.conv1[i], False) return if i in Path[1, :] and i not in self.bestPath[0:t, 1, :]: HATutils.set_req_grad(self.conv2[i], True) else: HATutils.set_req_grad(self.conv2[i], False) return if i in Path[2, :] and i not in self.bestPath[0:t, 2, :]: HATutils.set_req_grad(self.conv3[i], True) else: HATutils.set_req_grad(self.conv3[i], False) return if i in Path[3, :] and i not in self.bestPath[0:t, 3, :]: HATutils.set_req_grad(self.fc1[i], True) else: HATutils.set_req_grad(self.fc1[i], False) return if i in Path[4, :] and i not in self.bestPath[0:t, 4, :]: HATutils.set_req_grad(self.fc2[i], True) else: HATutils.set_req_grad(self.fc2[i], False) return return
CLsurvey
positive
@maintain_gateway def list_identity(self): """List Identity for target device. Synchronous (waits for and returns response value). Updates self.identity w/ latest value returned. """ <DeepExtract> with self.gateway as connection: connection.list_identity(timeout=self.timeout) (rsp, ela) = client.await_response(connection, timeout=self.timeout) assert rsp, 'No response to List Identity within timeout: %r' % self.timeout (rsp, ela) = (rsp, ela) </DeepExtract> assert rsp.enip.status == 0, 'List Identity responded with EtherNet/IP error status: %r' % rsp.enip.status self.identity = rsp.enip.CIP.list_identity.CPF.item[0].identity_object log.normal('Device Identity: %r', self.identity) return self.identity
@maintain_gateway def list_identity(self): """List Identity for target device. Synchronous (waits for and returns response value). Updates self.identity w/ latest value returned. """ with self.gateway as connection: connection.list_identity(timeout=self.timeout) (rsp, ela) = client.await_response(connection, timeout=self.timeout) assert rsp, 'No response to List Identity within timeout: %r' % self.timeout (rsp, ela) = (rsp, ela) assert rsp.enip.status == 0, 'List Identity responded with EtherNet/IP error status: %r' % rsp.enip.status self.identity = rsp.enip.CIP.list_identity.CPF.item[0].identity_object log.normal('Device Identity: %r', self.identity) return self.identity
cpppo
positive
def generate_samples(model, tokenizer, args, device): context_count = 0 model.eval() with torch.no_grad(): while True: torch.distributed.barrier(group=mpu.get_model_parallel_group()) terminate_runs = 0 if mpu.get_model_parallel_rank() == 0: if args.input_text: raw_text = open(args.input_text).read().strip() else: raw_text = input('\nContext prompt (stop to exit) >>> ') while not raw_text: print('Prompt should not be empty!') raw_text = input('\nContext prompt (stop to exit) >>> ') if 'stop' in raw_text: terminate_runs = 1 else: context_tokens = tokenizer.encode(raw_text) context_length = len(context_tokens) if context_length >= args.seq_length // 2: print('\nContext length', context_length, '\nPlease give smaller context (half of the sequence length)!') continue else: context_tokens = tokenizer.encode('空文本') context_length = len(context_tokens) terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs]) torch.distributed.broadcast(terminate_runs_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) terminate_runs = terminate_runs_tensor[0].item() if terminate_runs == 1: return pad_id = tokenizer.encoder['<pad>'] args.eod_token = tokenizer.encoder['<eod>'] if context_length < args.seq_length: context_tokens.extend([pad_id] * (args.seq_length - context_length)) context_tokens_tensor = torch.cuda.LongTensor(context_tokens) context_length_tensor = torch.cuda.LongTensor([context_length]) torch.distributed.broadcast(context_length_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) torch.distributed.broadcast(context_tokens_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) context_length = context_length_tensor[0].item() <DeepExtract> tokens = context_tokens_tensor tokens = tokens.view(args.batch_size, -1).contiguous() tokens = tokens.to(device) (attention_mask, loss_mask, position_ids) = get_masks_and_position_ids(tokens, args.eod_token, args.reset_position_ids, args.reset_attention_mask) (tokens, attention_mask, position_ids) = (tokens, attention_mask, position_ids) </DeepExtract> start_time = time.time() counter = 0 org_context_length = context_length past_key_values = None while counter < org_context_length + args.out_seq_length: if counter == 0: (logits, past_key_values) = model(tokens[:, :context_length], position_ids[:, :context_length], attention_mask[:, :, :context_length, :context_length], past_key_values=past_key_values, use_cache=True) logits = logits[:, context_length - 1, :] else: (logits, past_key_values) = model(tokens[:, context_length - 1:context_length], position_ids[:, context_length - 1:context_length], attention_mask[:, :, context_length - 1, :context_length], past_key_values=past_key_values, use_cache=True) logits = logits[:, 0, :] if args.fp16: past_key_values = [x.half() for x in past_key_values] else: past_key_values = [x for x in past_key_values] <DeepExtract> if args.top_k > 0: indices_to_remove = logits < torch.topk(logits, args.top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if args.top_p > 0.0: logits = logits.view(logits.size()[1]).contiguous() (sorted_logits, sorted_indices) = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > args.top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value logits = logits.view(1, -1).contiguous() logits = logits </DeepExtract> log_probs = F.softmax(logits / args.temperature, dim=-1) prev = torch.multinomial(log_probs, num_samples=1) tokens[0, context_length] = prev[0] torch.distributed.broadcast(tokens, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) context_length += 1 counter += 1 output_tokens_list = tokens.view(-1).contiguous() decode_tokens = tokenizer.decode(output_tokens_list.tolist()) token_end = decode_tokens.find('<eod>') if mpu.get_model_parallel_rank() == 0 and (counter % 16 == 0 or token_end != -1): os.system('clear') print('\nTaken time {:.2f}\n'.format(time.time() - start_time), flush=True) print('\nContext:', raw_text, flush=True) trim_decode_tokens = decode_tokens[len(raw_text):decode_tokens.find('<eod>')] print('\nCPM:', trim_decode_tokens, flush=True) if token_end != -1: break if mpu.get_model_parallel_rank() == 0: os.system('clear') print('\nTaken time {:.2f}\n'.format(time.time() - start_time), flush=True) print('\nContext:', raw_text, flush=True) output_tokens_list = tokens.view(-1).contiguous() decode_tokens = tokenizer.decode(output_tokens_list.tolist()) trim_decode_tokens = decode_tokens[len(raw_text):decode_tokens.find('<eod>')] print('\nCPM:', trim_decode_tokens, flush=True) raw_text = None torch.distributed.barrier(group=mpu.get_model_parallel_group()) context_count += 1 if args.input_text: break
def generate_samples(model, tokenizer, args, device): context_count = 0 model.eval() with torch.no_grad(): while True: torch.distributed.barrier(group=mpu.get_model_parallel_group()) terminate_runs = 0 if mpu.get_model_parallel_rank() == 0: if args.input_text: raw_text = open(args.input_text).read().strip() else: raw_text = input('\nContext prompt (stop to exit) >>> ') while not raw_text: print('Prompt should not be empty!') raw_text = input('\nContext prompt (stop to exit) >>> ') if 'stop' in raw_text: terminate_runs = 1 else: context_tokens = tokenizer.encode(raw_text) context_length = len(context_tokens) if context_length >= args.seq_length // 2: print('\nContext length', context_length, '\nPlease give smaller context (half of the sequence length)!') continue else: context_tokens = tokenizer.encode('空文本') context_length = len(context_tokens) terminate_runs_tensor = torch.cuda.LongTensor([terminate_runs]) torch.distributed.broadcast(terminate_runs_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) terminate_runs = terminate_runs_tensor[0].item() if terminate_runs == 1: return pad_id = tokenizer.encoder['<pad>'] args.eod_token = tokenizer.encoder['<eod>'] if context_length < args.seq_length: context_tokens.extend([pad_id] * (args.seq_length - context_length)) context_tokens_tensor = torch.cuda.LongTensor(context_tokens) context_length_tensor = torch.cuda.LongTensor([context_length]) torch.distributed.broadcast(context_length_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) torch.distributed.broadcast(context_tokens_tensor, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) context_length = context_length_tensor[0].item() tokens = context_tokens_tensor tokens = tokens.view(args.batch_size, -1).contiguous() tokens = tokens.to(device) (attention_mask, loss_mask, position_ids) = get_masks_and_position_ids(tokens, args.eod_token, args.reset_position_ids, args.reset_attention_mask) (tokens, attention_mask, position_ids) = (tokens, attention_mask, position_ids) start_time = time.time() counter = 0 org_context_length = context_length past_key_values = None while counter < org_context_length + args.out_seq_length: if counter == 0: (logits, past_key_values) = model(tokens[:, :context_length], position_ids[:, :context_length], attention_mask[:, :, :context_length, :context_length], past_key_values=past_key_values, use_cache=True) logits = logits[:, context_length - 1, :] else: (logits, past_key_values) = model(tokens[:, context_length - 1:context_length], position_ids[:, context_length - 1:context_length], attention_mask[:, :, context_length - 1, :context_length], past_key_values=past_key_values, use_cache=True) logits = logits[:, 0, :] if args.fp16: past_key_values = [x.half() for x in past_key_values] else: past_key_values = [x for x in past_key_values] if args.top_k > 0: indices_to_remove = logits < torch.topk(logits, args.top_k)[0][..., -1, None] logits[indices_to_remove] = filter_value if args.top_p > 0.0: logits = logits.view(logits.size()[1]).contiguous() (sorted_logits, sorted_indices) = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > args.top_p sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices[sorted_indices_to_remove] logits[indices_to_remove] = filter_value logits = logits.view(1, -1).contiguous() logits = logits log_probs = F.softmax(logits / args.temperature, dim=-1) prev = torch.multinomial(log_probs, num_samples=1) tokens[0, context_length] = prev[0] torch.distributed.broadcast(tokens, mpu.get_model_parallel_src_rank(), group=mpu.get_model_parallel_group()) context_length += 1 counter += 1 output_tokens_list = tokens.view(-1).contiguous() decode_tokens = tokenizer.decode(output_tokens_list.tolist()) token_end = decode_tokens.find('<eod>') if mpu.get_model_parallel_rank() == 0 and (counter % 16 == 0 or token_end != -1): os.system('clear') print('\nTaken time {:.2f}\n'.format(time.time() - start_time), flush=True) print('\nContext:', raw_text, flush=True) trim_decode_tokens = decode_tokens[len(raw_text):decode_tokens.find('<eod>')] print('\nCPM:', trim_decode_tokens, flush=True) if token_end != -1: break if mpu.get_model_parallel_rank() == 0: os.system('clear') print('\nTaken time {:.2f}\n'.format(time.time() - start_time), flush=True) print('\nContext:', raw_text, flush=True) output_tokens_list = tokens.view(-1).contiguous() decode_tokens = tokenizer.decode(output_tokens_list.tolist()) trim_decode_tokens = decode_tokens[len(raw_text):decode_tokens.find('<eod>')] print('\nCPM:', trim_decode_tokens, flush=True) raw_text = None torch.distributed.barrier(group=mpu.get_model_parallel_group()) context_count += 1 if args.input_text: break
CPM-1-Generate
positive
def _set_backend_policy(self, port, policies): <DeepExtract> if not self.elb: backend_policies = {} server_descriptions = self.elb.get('BackendServerDescriptions', []) policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} backend_policies = policies </DeepExtract> current_policies = set(backend_policies.get(port, [])) if current_policies == set(policies): return False self.changed = True if self.check_mode: return True try: self.client.set_load_balancer_policies_for_backend_server(aws_retry=True, LoadBalancerName=self.name, InstancePort=port, PolicyNames=policies) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg='Failed to set load balancer backend policies', port=port, policies=policies) return True
def _set_backend_policy(self, port, policies): if not self.elb: backend_policies = {} server_descriptions = self.elb.get('BackendServerDescriptions', []) policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} backend_policies = policies current_policies = set(backend_policies.get(port, [])) if current_policies == set(policies): return False self.changed = True if self.check_mode: return True try: self.client.set_load_balancer_policies_for_backend_server(aws_retry=True, LoadBalancerName=self.name, InstancePort=port, PolicyNames=policies) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg='Failed to set load balancer backend policies', port=port, policies=policies) return True
amazon.aws
positive
def get_sensibility(self): reg = self.currentMaxSet_ref['read'] <DeepExtract> (self.sensibility, error) = self.i2c.readU8(reg) </DeepExtract> if self.debug & error: print('Error reading address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
def get_sensibility(self): reg = self.currentMaxSet_ref['read'] (self.sensibility, error) = self.i2c.readU8(reg) if self.debug & error: print('Error reading address: %d, reg: 0x%02X-%s' % (self.id, reg, self.reg_list[reg]))
choreograph-git
positive
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info('Writing predictions to: %s' % output_prediction_file) tf.logging.info('Writing nbest to: %s' % output_nbest_file) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit']) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] score_null = 1000000 min_null_feature_index = 0 null_start_logit = 0 null_end_logit = 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] <DeepExtract> index_and_score = sorted(enumerate(result.start_logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) start_indexes = best_indexes </DeepExtract> <DeepExtract> index_and_score = sorted(enumerate(result.end_logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) end_indexes = best_indexes </DeepExtract> if FLAGS.version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if FLAGS.version_2_with_negative: prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted(prelim_predictions, key=lambda x: x.start_logit + x.end_logit, reverse=True) _NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit']) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: tok_tokens = feature.tokens[pred.start_index:pred.end_index + 1] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:orig_doc_end + 1] tok_text = ' '.join(tok_tokens) tok_text = tok_text.replace(' ##', '') tok_text = tok_text.replace('##', '') tok_text = tok_text.strip() tok_text = ' '.join(tok_text.split()) orig_text = ' '.join(orig_tokens) <DeepExtract> def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) final_text = (ns_text, ns_to_s_map) tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = ' '.join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(tok_text) if start_position == -1: if FLAGS.verbose_logging: tf.logging.info("Unable to find text: '%s' in '%s'" % (tok_text, orig_text)) final_text = orig_text end_position = start_position + len(tok_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if FLAGS.verbose_logging: tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) final_text = orig_text tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map start position") final_text = orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map end position") final_text = orig_text output_text = orig_text[orig_start_position:orig_end_position + 1] final_text = output_text </DeepExtract> if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = '' seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) if FLAGS.version_2_with_negative: if '' not in seen_predictions: nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit)) if not nbest: nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry <DeepExtract> if not total_scores: probs = [] max_score = None for score in total_scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in total_scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) probs = probs </DeepExtract> nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output['text'] = entry.text output['probability'] = probs[i] output['start_logit'] = entry.start_logit output['end_logit'] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not FLAGS.version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]['text'] else: score_diff = score_null - best_non_null_entry.start_logit - best_non_null_entry.end_logit scores_diff_json[example.qas_id] = score_diff if score_diff > FLAGS.null_score_diff_threshold: all_predictions[example.qas_id] = '' else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with tf.gfile.GFile(output_prediction_file, 'w') as writer: writer.write(json.dumps(all_predictions, indent=4) + '\n') with tf.gfile.GFile(output_nbest_file, 'w') as writer: writer.write(json.dumps(all_nbest_json, indent=4) + '\n') if FLAGS.version_2_with_negative: with tf.gfile.GFile(output_null_log_odds_file, 'w') as writer: writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file): """Write final predictions to the json file and log-odds of null if needed.""" tf.logging.info('Writing predictions to: %s' % output_prediction_file) tf.logging.info('Writing nbest to: %s' % output_nbest_file) example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit']) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] score_null = 1000000 min_null_feature_index = 0 null_start_logit = 0 null_end_logit = 0 for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] index_and_score = sorted(enumerate(result.start_logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) start_indexes = best_indexes index_and_score = sorted(enumerate(result.end_logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) end_indexes = best_indexes if FLAGS.version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if FLAGS.version_2_with_negative: prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted(prelim_predictions, key=lambda x: x.start_logit + x.end_logit, reverse=True) _NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit']) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: tok_tokens = feature.tokens[pred.start_index:pred.end_index + 1] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:orig_doc_end + 1] tok_text = ' '.join(tok_tokens) tok_text = tok_text.replace(' ##', '') tok_text = tok_text.replace('##', '') tok_text = tok_text.strip() tok_text = ' '.join(tok_text.split()) orig_text = ' '.join(orig_tokens) def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == ' ': continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = ''.join(ns_chars) final_text = (ns_text, ns_to_s_map) tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = ' '.join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(tok_text) if start_position == -1: if FLAGS.verbose_logging: tf.logging.info("Unable to find text: '%s' in '%s'" % (tok_text, orig_text)) final_text = orig_text end_position = start_position + len(tok_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if FLAGS.verbose_logging: tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) final_text = orig_text tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map start position") final_text = orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if FLAGS.verbose_logging: tf.logging.info("Couldn't map end position") final_text = orig_text output_text = orig_text[orig_start_position:orig_end_position + 1] final_text = output_text if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = '' seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) if FLAGS.version_2_with_negative: if '' not in seen_predictions: nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit)) if not nbest: nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry if not total_scores: probs = [] max_score = None for score in total_scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in total_scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) probs = probs nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output['text'] = entry.text output['probability'] = probs[i] output['start_logit'] = entry.start_logit output['end_logit'] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not FLAGS.version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]['text'] else: score_diff = score_null - best_non_null_entry.start_logit - best_non_null_entry.end_logit scores_diff_json[example.qas_id] = score_diff if score_diff > FLAGS.null_score_diff_threshold: all_predictions[example.qas_id] = '' else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json with tf.gfile.GFile(output_prediction_file, 'w') as writer: writer.write(json.dumps(all_predictions, indent=4) + '\n') with tf.gfile.GFile(output_nbest_file, 'w') as writer: writer.write(json.dumps(all_nbest_json, indent=4) + '\n') if FLAGS.version_2_with_negative: with tf.gfile.GFile(output_null_log_odds_file, 'w') as writer: writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
coref
positive
def injecting_nodes(data): """ injecting nodes to adj, features, and assign labels to the injected nodes """ (adj, features, labels) = (data.adj, data.features, data.labels) N = adj.shape[0] D = features.shape[1] n_added = int(args.ratio * N) print('number of injected nodes: %s' % n_added) data.adj = reshape_mx(adj, shape=(N + n_added, N + n_added)) enlarged_features = reshape_mx(features, shape=(N + n_added, D)) <DeepExtract> enlarged_features = enlarged_features.tolil() avg = np.tile(enlarged_features.mean(0), (n_added, 1)) enlarged_features[-n_added:] = avg + np.random.normal(0, 1, (n_added, enlarged_features.shape[1])) data.features = enlarged_features </DeepExtract> data.features = normalize_feature(data.features) injected_labels = np.random.choice(labels.max() + 1, n_added) data.labels = np.hstack((labels, injected_labels))
def injecting_nodes(data): """ injecting nodes to adj, features, and assign labels to the injected nodes """ (adj, features, labels) = (data.adj, data.features, data.labels) N = adj.shape[0] D = features.shape[1] n_added = int(args.ratio * N) print('number of injected nodes: %s' % n_added) data.adj = reshape_mx(adj, shape=(N + n_added, N + n_added)) enlarged_features = reshape_mx(features, shape=(N + n_added, D)) enlarged_features = enlarged_features.tolil() avg = np.tile(enlarged_features.mean(0), (n_added, 1)) enlarged_features[-n_added:] = avg + np.random.normal(0, 1, (n_added, enlarged_features.shape[1])) data.features = enlarged_features data.features = normalize_feature(data.features) injected_labels = np.random.choice(labels.max() + 1, n_added) data.labels = np.hstack((labels, injected_labels))
DeepRobust
positive
def readSpectrumFromFile(self, fileName): f = open(fileName, 'r') data = [] for d in f: data.append(d.strip()) self.M = int(data[0]) self.N = int(data[1]) data = [int(s) for s in data[2].strip().split()] self.spectrum = sorted(data) self.parentMass = max(data) self.spectrumDict = dict() for s in data: self.spectrumDict[s] = self.spectrumDict.get(s, 0) + 1 <DeepExtract> l = len(self.spectrum) cDict = dict() for i in range(l): for j in range(i + 1, l): a = self.spectrum[j] - self.spectrum[i] if 57 <= a and a <= 200: cDict[a] = cDict.get(a, 0) + 1 sortedMass = sorted(cDict.items(), key=lambda a: a[1], reverse=True) mass = [str(a[0]) for a in sortedMass] multi = [a[1] for a in sortedMass] t = multi[self.M - 1] for j in range(self.M, l): if multi[j] < t: self.mass = mass[:j] self.mass = mass </DeepExtract> return
def readSpectrumFromFile(self, fileName): f = open(fileName, 'r') data = [] for d in f: data.append(d.strip()) self.M = int(data[0]) self.N = int(data[1]) data = [int(s) for s in data[2].strip().split()] self.spectrum = sorted(data) self.parentMass = max(data) self.spectrumDict = dict() for s in data: self.spectrumDict[s] = self.spectrumDict.get(s, 0) + 1 l = len(self.spectrum) cDict = dict() for i in range(l): for j in range(i + 1, l): a = self.spectrum[j] - self.spectrum[i] if 57 <= a and a <= 200: cDict[a] = cDict.get(a, 0) + 1 sortedMass = sorted(cDict.items(), key=lambda a: a[1], reverse=True) mass = [str(a[0]) for a in sortedMass] multi = [a[1] for a in sortedMass] t = multi[self.M - 1] for j in range(self.M, l): if multi[j] < t: self.mass = mass[:j] self.mass = mass return
Coursera-Bioinformatics
positive
def get_market_depth(self, market): """return sum of all bids and asks""" <DeepExtract> r = self.api({'command': 'returnOrderBook', 'currencyPair': self.format_pair(market), 'depth': depth}) order_book = {k: v for (k, v) in r.items() if k in ['asks', 'bids']} </DeepExtract> return {'bids': sum([Decimal(i[0]) * Decimal(i[1]) for i in order_book['bids']]), 'asks': sum([Decimal(i[1]) for i in order_book['asks']])}
def get_market_depth(self, market): """return sum of all bids and asks""" r = self.api({'command': 'returnOrderBook', 'currencyPair': self.format_pair(market), 'depth': depth}) order_book = {k: v for (k, v) in r.items() if k in ['asks', 'bids']} return {'bids': sum([Decimal(i[0]) * Decimal(i[1]) for i in order_book['bids']]), 'asks': sum([Decimal(i[1]) for i in order_book['asks']])}
cryptotik
positive
def __init__(self, pool_host, pool_port, cowrie_plugin): self.cowrie_plugin = cowrie_plugin self.pool_ip: str = pool_host self.pool_port: int = pool_port self.pool_ready: bool = False self.client_factory = PoolClientFactory(self) <DeepExtract> if not True and (not self.pool_ready): raise PoolNotReadyError() endpoint = TCP4ClientEndpoint(reactor, self.pool_ip, self.pool_port, timeout=10) d = endpoint.connect(self.client_factory) </DeepExtract> d.addCallback(self.initial_pool_connection_success) d.addErrback(self.initial_pool_connection_error)
def __init__(self, pool_host, pool_port, cowrie_plugin): self.cowrie_plugin = cowrie_plugin self.pool_ip: str = pool_host self.pool_port: int = pool_port self.pool_ready: bool = False self.client_factory = PoolClientFactory(self) if not True and (not self.pool_ready): raise PoolNotReadyError() endpoint = TCP4ClientEndpoint(reactor, self.pool_ip, self.pool_port, timeout=10) d = endpoint.connect(self.client_factory) d.addCallback(self.initial_pool_connection_success) d.addErrback(self.initial_pool_connection_error)
cowrie
positive
def __init__(self): global FPS_CLOCK, DISPLAYSURF, BASIC_FONT pygame.init() FPS_CLOCK = pygame.time.Clock() DISPLAYSURF = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT)) pygame.display.set_caption('Dodge') BASIC_FONT = pygame.font.Font('freesansbold.ttf', 16) self.init = True self.start_time = time.time() self.my_radius = 10 self.my_init_position = [CENTER_X - int(self.my_radius / 2), CENTER_Y - int(self.my_radius / 2)] self.my_position = self.my_init_position self.my_speed = 10 self.num_balls = 5 self.gap_balls = 50 self.min_ball_speed = 3.0 self.max_ball_speed = 6.0 <DeepExtract> rand_pos_x = 0 rand_pos_y = 0 rand_vel_x = 0 rand_vel_y = 0 ball_list = [] for i in range(self.num_balls): ball_list.append([]) rand_pos_x = random.random() rand_pos_y = random.random() rand_vel_x = random.random() rand_vel_y = random.random() ball_list[i].append(i) if rand_pos_x > 0.5: ball_list[i].append(random.randint(CENTER_X + self.gap_balls, WINDOW_WIDTH - self.gap_balls)) else: ball_list[i].append(random.randint(self.gap_balls, CENTER_X - self.gap_balls)) if rand_pos_y > 0.5: ball_list[i].append(random.randint(CENTER_Y + self.gap_balls, WINDOW_HEIGHT - self.gap_balls)) else: ball_list[i].append(random.randint(TOP_WIDTH + self.gap_balls, CENTER_Y - self.gap_balls)) if rand_vel_x > 0.5: ball_list[i].append(random.uniform(self.min_ball_speed, self.max_ball_speed)) else: ball_list[i].append(-random.uniform(self.min_ball_speed, self.max_ball_speed)) if rand_vel_y > 0.5: ball_list[i].append(random.uniform(self.min_ball_speed, self.max_ball_speed)) else: ball_list[i].append(-random.uniform(self.min_ball_speed, self.max_ball_speed)) self.ball_list = ball_list </DeepExtract> self.ball_radius = 5
def __init__(self): global FPS_CLOCK, DISPLAYSURF, BASIC_FONT pygame.init() FPS_CLOCK = pygame.time.Clock() DISPLAYSURF = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT)) pygame.display.set_caption('Dodge') BASIC_FONT = pygame.font.Font('freesansbold.ttf', 16) self.init = True self.start_time = time.time() self.my_radius = 10 self.my_init_position = [CENTER_X - int(self.my_radius / 2), CENTER_Y - int(self.my_radius / 2)] self.my_position = self.my_init_position self.my_speed = 10 self.num_balls = 5 self.gap_balls = 50 self.min_ball_speed = 3.0 self.max_ball_speed = 6.0 rand_pos_x = 0 rand_pos_y = 0 rand_vel_x = 0 rand_vel_y = 0 ball_list = [] for i in range(self.num_balls): ball_list.append([]) rand_pos_x = random.random() rand_pos_y = random.random() rand_vel_x = random.random() rand_vel_y = random.random() ball_list[i].append(i) if rand_pos_x > 0.5: ball_list[i].append(random.randint(CENTER_X + self.gap_balls, WINDOW_WIDTH - self.gap_balls)) else: ball_list[i].append(random.randint(self.gap_balls, CENTER_X - self.gap_balls)) if rand_pos_y > 0.5: ball_list[i].append(random.randint(CENTER_Y + self.gap_balls, WINDOW_HEIGHT - self.gap_balls)) else: ball_list[i].append(random.randint(TOP_WIDTH + self.gap_balls, CENTER_Y - self.gap_balls)) if rand_vel_x > 0.5: ball_list[i].append(random.uniform(self.min_ball_speed, self.max_ball_speed)) else: ball_list[i].append(-random.uniform(self.min_ball_speed, self.max_ball_speed)) if rand_vel_y > 0.5: ball_list[i].append(random.uniform(self.min_ball_speed, self.max_ball_speed)) else: ball_list[i].append(-random.uniform(self.min_ball_speed, self.max_ball_speed)) self.ball_list = ball_list self.ball_radius = 5
DRL
positive
def smallRotation(v): parent = v.parent if parent == None: return grandparent = v.parent.parent if parent.left == v: m = v.right v.right = parent parent.left = m else: m = v.left v.left = parent parent.right = m <DeepExtract> if parent == None: return parent.sum = parent.key + (parent.left.sum if parent.left != None else 0) + (parent.right.sum if parent.right != None else 0) if parent.left != None: parent.left.parent = parent if parent.right != None: parent.right.parent = parent </DeepExtract> <DeepExtract> if v == None: return v.sum = v.key + (v.left.sum if v.left != None else 0) + (v.right.sum if v.right != None else 0) if v.left != None: v.left.parent = v if v.right != None: v.right.parent = v </DeepExtract> v.parent = grandparent if grandparent != None: if grandparent.left == parent: grandparent.left = v else: grandparent.right = v
def smallRotation(v): parent = v.parent if parent == None: return grandparent = v.parent.parent if parent.left == v: m = v.right v.right = parent parent.left = m else: m = v.left v.left = parent parent.right = m if parent == None: return parent.sum = parent.key + (parent.left.sum if parent.left != None else 0) + (parent.right.sum if parent.right != None else 0) if parent.left != None: parent.left.parent = parent if parent.right != None: parent.right.parent = parent if v == None: return v.sum = v.key + (v.left.sum if v.left != None else 0) + (v.right.sum if v.right != None else 0) if v.left != None: v.left.parent = v if v.right != None: v.right.parent = v v.parent = grandparent if grandparent != None: if grandparent.left == parent: grandparent.left = v else: grandparent.right = v
Coursera_Data_Structures_and_Algorithms_Specialization
positive
def get_chunk(stream, name): <DeepExtract> DXBCHeader = namedtuple('DXBCHeader', ['signature', 'hash', 'unknown1', 'size', 'chunks']) header = DXBCHeader(*struct.unpack('<4s16s3I', stream.read(32))) assert header.signature == b'DXBC' assert header.unknown1 == 1 header = header </DeepExtract> <DeepExtract> chunk_offsets = struct.unpack('<{}I'.format(header.chunks), stream.read(4 * header.chunks)) </DeepExtract> for idx in range(header.chunks): <DeepExtract> stream.seek(chunk_offsets[idx]) (signature, size) = struct.unpack('<4sI', stream.read(8)) (signature, size) = (signature, size) </DeepExtract> if signature == name: return stream.read(size)
def get_chunk(stream, name): DXBCHeader = namedtuple('DXBCHeader', ['signature', 'hash', 'unknown1', 'size', 'chunks']) header = DXBCHeader(*struct.unpack('<4s16s3I', stream.read(32))) assert header.signature == b'DXBC' assert header.unknown1 == 1 header = header chunk_offsets = struct.unpack('<{}I'.format(header.chunks), stream.read(4 * header.chunks)) for idx in range(header.chunks): stream.seek(chunk_offsets[idx]) (signature, size) = struct.unpack('<4sI', stream.read(8)) (signature, size) = (signature, size) if signature == name: return stream.read(size)
3d-fixes
positive
def cursor(self, *cursors): """Instantiates and returns a cursor By default, :class:`Cursor` is returned. It is possible to also give a custom cursor through the cursor_class parameter, but it needs to be a subclass of :class:`Cursor` :param cursor: custom cursor class. :returns: instance of cursor, by default :class:`Cursor` :raises TypeError: cursor_class is not a subclass of Cursor. """ <DeepExtract> if not self._writer: if self._close_reason is None: raise InterfaceError("(0, 'Not connected')") else: raise InterfaceError(self._close_reason) </DeepExtract> self._last_usage = self._loop.time() try: if cursors and any((not issubclass(cursor, Cursor) for cursor in cursors)): raise TypeError('Custom cursor must be subclass of Cursor') except TypeError: raise TypeError('Custom cursor must be subclass of Cursor') if cursors and len(cursors) == 1: cur = cursors[0](self, self._echo) elif cursors: cursor_name = ''.join(map(lambda x: x.__name__, cursors)).replace('Cursor', '') + 'Cursor' cursor_class = type(cursor_name, cursors, {}) cur = cursor_class(self, self._echo) else: cur = self.cursorclass(self, self._echo) fut = self._loop.create_future() fut.set_result(cur) return _ContextManager(fut)
def cursor(self, *cursors): """Instantiates and returns a cursor By default, :class:`Cursor` is returned. It is possible to also give a custom cursor through the cursor_class parameter, but it needs to be a subclass of :class:`Cursor` :param cursor: custom cursor class. :returns: instance of cursor, by default :class:`Cursor` :raises TypeError: cursor_class is not a subclass of Cursor. """ if not self._writer: if self._close_reason is None: raise InterfaceError("(0, 'Not connected')") else: raise InterfaceError(self._close_reason) self._last_usage = self._loop.time() try: if cursors and any((not issubclass(cursor, Cursor) for cursor in cursors)): raise TypeError('Custom cursor must be subclass of Cursor') except TypeError: raise TypeError('Custom cursor must be subclass of Cursor') if cursors and len(cursors) == 1: cur = cursors[0](self, self._echo) elif cursors: cursor_name = ''.join(map(lambda x: x.__name__, cursors)).replace('Cursor', '') + 'Cursor' cursor_class = type(cursor_name, cursors, {}) cur = cursor_class(self, self._echo) else: cur = self.cursorclass(self, self._echo) fut = self._loop.create_future() fut.set_result(cur) return _ContextManager(fut)
aiomysql
positive
def depart_desc_signature(self, node): if not node.get('is_multiline'): <DeepExtract> pass </DeepExtract> if not self.v2: self.body.append(self.context.pop())
def depart_desc_signature(self, node): if not node.get('is_multiline'): pass if not self.v2: self.body.append(self.context.pop())
confluencebuilder
positive
def test_simple_signature(): @deal.ensure(lambda _: _.a > 0 and _.b > 0 and (_.result != 'same number')) def func(a, b) -> str: if a == b: return 'same number' else: return 'different numbers' assert func(1, 2) == 'different numbers' with pytest.raises(deal.PostContractError): <DeepExtract> if 0 == 1: return 'same number' else: return 'different numbers' </DeepExtract> with pytest.raises(deal.PostContractError): <DeepExtract> if 1 == 0: return 'same number' else: return 'different numbers' </DeepExtract> with pytest.raises(deal.PostContractError): <DeepExtract> if 1 == 1: return 'same number' else: return 'different numbers' </DeepExtract>
def test_simple_signature(): @deal.ensure(lambda _: _.a > 0 and _.b > 0 and (_.result != 'same number')) def func(a, b) -> str: if a == b: return 'same number' else: return 'different numbers' assert func(1, 2) == 'different numbers' with pytest.raises(deal.PostContractError): if 0 == 1: return 'same number' else: return 'different numbers' with pytest.raises(deal.PostContractError): if 1 == 0: return 'same number' else: return 'different numbers' with pytest.raises(deal.PostContractError): if 1 == 1: return 'same number' else: return 'different numbers' </DeepExtract>
deal
positive
def values_list(self, *fields, **kwargs): <DeepExtract> (fields, extra_filters) = [name if name in self.query.annotations else self.field_translator(name) for name in fields] </DeepExtract> return self._filter_extra(extra_filters).values_list(*fields, **kwargs)
def values_list(self, *fields, **kwargs): (fields, extra_filters) = [name if name in self.query.annotations else self.field_translator(name) for name in fields] return self._filter_extra(extra_filters).values_list(*fields, **kwargs)
django-hvad
positive
def process_other(in_path, member): <DeepExtract> if diff is None: return diff['before_header'] = in_path if member.isdir(): diff['before'] = '(directory)' elif member.issym() or member.islnk(): diff['before'] = member.linkname elif member.ischr(): diff['before'] = '(character device)' elif member.isblk(): diff['before'] = '(block device)' elif member.isfifo(): diff['before'] = '(fifo)' elif member.isdev(): diff['before'] = '(device)' elif member.isfile(): raise DockerUnexpectedError('should not be a regular file') else: diff['before'] = '(unknown filesystem object)' </DeepExtract> return (container_path, mode, False)
def process_other(in_path, member): if diff is None: return diff['before_header'] = in_path if member.isdir(): diff['before'] = '(directory)' elif member.issym() or member.islnk(): diff['before'] = member.linkname elif member.ischr(): diff['before'] = '(character device)' elif member.isblk(): diff['before'] = '(block device)' elif member.isfifo(): diff['before'] = '(fifo)' elif member.isdev(): diff['before'] = '(device)' elif member.isfile(): raise DockerUnexpectedError('should not be a regular file') else: diff['before'] = '(unknown filesystem object)' return (container_path, mode, False)
community.docker
positive
def conv_filter_tile(filters): (n_filters, n_channels, height, width) = filters.shape tile_shape = None if n_channels == 3: filters = np.transpose(filters, (0, 2, 3, 1)) else: tile_shape = (n_channels, n_filters) filters = np.transpose(filters, (1, 0, 2, 3)) filters = np.resize(filters, (n_filters * n_channels, height, width)) <DeepExtract> filters = filters.astype(float) filters -= np.min(filters) filters /= np.max(filters) + 1e-12 filters = filters </DeepExtract> return img_tile(filters, tile_shape=tile_shape)
def conv_filter_tile(filters): (n_filters, n_channels, height, width) = filters.shape tile_shape = None if n_channels == 3: filters = np.transpose(filters, (0, 2, 3, 1)) else: tile_shape = (n_channels, n_filters) filters = np.transpose(filters, (1, 0, 2, 3)) filters = np.resize(filters, (n_filters * n_channels, height, width)) filters = filters.astype(float) filters -= np.min(filters) filters /= np.max(filters) + 1e-12 filters = filters return img_tile(filters, tile_shape=tile_shape)
DualLearning
positive
def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg, rescale=False, nms=True): assert len(cls_scores) == len(pts_preds_refine) bbox_preds_refine = [self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine] num_levels = len(cls_scores) mlvl_points = [self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i]) for i in range(num_levels)] result_list = [] for img_id in range(len(img_metas)): cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)] bbox_pred_list = [bbox_preds_refine[i][img_id].detach() for i in range(num_levels)] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] <DeepExtract> assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for (i_lvl, (cls_score, bbox_pred, points)) in enumerate(zip(cls_score_list, bbox_pred_list, mlvl_points)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: (max_scores, _) = scores.max(dim=1) else: (max_scores, _) = scores[:, 1:].max(dim=1) (_, topk_inds) = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0]) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) if nms: (det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) proposals = (det_bboxes, det_labels) else: proposals = (mlvl_bboxes, mlvl_scores) </DeepExtract> result_list.append(proposals) return result_list
def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg, rescale=False, nms=True): assert len(cls_scores) == len(pts_preds_refine) bbox_preds_refine = [self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine] num_levels = len(cls_scores) mlvl_points = [self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i]) for i in range(num_levels)] result_list = [] for img_id in range(len(img_metas)): cls_score_list = [cls_scores[i][img_id].detach() for i in range(num_levels)] bbox_pred_list = [bbox_preds_refine[i][img_id].detach() for i in range(num_levels)] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for (i_lvl, (cls_score, bbox_pred, points)) in enumerate(zip(cls_score_list, bbox_pred_list, mlvl_points)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: (max_scores, _) = scores.max(dim=1) else: (max_scores, _) = scores[:, 1:].max(dim=1) (_, topk_inds) = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0]) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) if nms: (det_bboxes, det_labels) = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) proposals = (det_bboxes, det_labels) else: proposals = (mlvl_bboxes, mlvl_scores) result_list.append(proposals) return result_list
D2Det
positive
def _steps_stats(self, update_index=False): rewards = self.all_rewards[-self.interval:] dones = self.all_dones[-self.interval:] <DeepExtract> episode_rewards = [] episode_lengths = [] accum = 0.0 length = 0 for (r, d) in zip(rewards, dones): if not isinstance(d, bool): d = bool(d.flat[0]) r = float(r.flat[0]) if not d: accum += r length += 1 else: episode_rewards.append(accum) episode_lengths.append(length) accum = 0.0 length = 0 if length > 0: episode_rewards.append(accum) episode_lengths.append(length) (rewards, lengths) = (episode_rewards, episode_lengths) </DeepExtract> stats = {'episode_rewards': rewards, 'episode_lengths': lengths, 'num_episodes': len(rewards)} for key in self.values.keys(): idx = self.values_idx[key] stats[key] = self.values[key][idx:] if update_index: self.values_idx[key] = len(self.values[key]) - 1 return stats
def _steps_stats(self, update_index=False): rewards = self.all_rewards[-self.interval:] dones = self.all_dones[-self.interval:] episode_rewards = [] episode_lengths = [] accum = 0.0 length = 0 for (r, d) in zip(rewards, dones): if not isinstance(d, bool): d = bool(d.flat[0]) r = float(r.flat[0]) if not d: accum += r length += 1 else: episode_rewards.append(accum) episode_lengths.append(length) accum = 0.0 length = 0 if length > 0: episode_rewards.append(accum) episode_lengths.append(length) (rewards, lengths) = (episode_rewards, episode_lengths) stats = {'episode_rewards': rewards, 'episode_lengths': lengths, 'num_episodes': len(rewards)} for key in self.values.keys(): idx = self.values_idx[key] stats[key] = self.values[key][idx:] if update_index: self.values_idx[key] = len(self.values[key]) - 1 return stats
cherry
positive
def _create_test_attachment(self, path): url = reverse('wiki:attachments_index', kwargs={'path': path}) <DeepExtract> filename = kwargs.get('filename', 'test.txt') data = self.test_data.encode('utf-8') filedata = BytesIO(data) filestream = InMemoryUploadedFile(filedata, None, filename, 'text', len(data), None) filestream = filestream </DeepExtract> response = self.client.post(url, {'description': self.test_description, 'file': filestream, 'save': '1'}) self.assertRedirects(response, url)
def _create_test_attachment(self, path): url = reverse('wiki:attachments_index', kwargs={'path': path}) filename = kwargs.get('filename', 'test.txt') data = self.test_data.encode('utf-8') filedata = BytesIO(data) filestream = InMemoryUploadedFile(filedata, None, filename, 'text', len(data), None) filestream = filestream response = self.client.post(url, {'description': self.test_description, 'file': filestream, 'save': '1'}) self.assertRedirects(response, url)
django-wiki
positive
def mergeSort(arr, start, end): if start < end: mid = (start + end - 1) // 2 <DeepExtract> if start < mid: mid = (start + mid - 1) // 2 mergeSort(arr, start, mid) mergeSort(arr, mid + 1, mid) merge(arr, start, mid, mid) return arr </DeepExtract> <DeepExtract> if mid + 1 < end: mid = (mid + 1 + end - 1) // 2 mergeSort(arr, mid + 1, mid) mergeSort(arr, mid + 1, end) merge(arr, mid + 1, mid, end) return arr </DeepExtract> <DeepExtract> n1 = mid - start + 1 n2 = end - mid L = [0] * n1 R = [0] * n2 for i in range(0, n1): L[i] = arr[start + i] for j in range(0, n2): R[j] = arr[mid + 1 + j] i = 0 j = 0 k = start while i < n1 and j < n2: if L[i] <= R[j]: arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 k += 1 while i < n1: arr[k] = L[i] i += 1 k += 1 while j < n2: arr[k] = R[j] j += 1 k += 1 </DeepExtract> return arr
def mergeSort(arr, start, end): if start < end: mid = (start + end - 1) // 2 if start < mid: mid = (start + mid - 1) // 2 mergeSort(arr, start, mid) mergeSort(arr, mid + 1, mid) merge(arr, start, mid, mid) return arr if mid + 1 < end: mid = (mid + 1 + end - 1) // 2 mergeSort(arr, mid + 1, mid) mergeSort(arr, mid + 1, end) merge(arr, mid + 1, mid, end) return arr n1 = mid - start + 1 n2 = end - mid L = [0] * n1 R = [0] * n2 for i in range(0, n1): L[i] = arr[start + i] for j in range(0, n2): R[j] = arr[mid + 1 + j] i = 0 j = 0 k = start while i < n1 and j < n2: if L[i] <= R[j]: arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 k += 1 while i < n1: arr[k] = L[i] i += 1 k += 1 while j < n2: arr[k] = R[j] j += 1 k += 1 return arr
Competitive-Coding-Platforms
positive
def ScaleFrame(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .ScaleFrame import ScaleFrame <DeepExtract> o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .ScaleFrame import ScaleFrame obj = ScaleFrame() obj.Init(self._tab.Bytes, x) obj = obj obj = None </DeepExtract> obj.Init(self._tab.Bytes, x) return obj return None
def ScaleFrame(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .ScaleFrame import ScaleFrame o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .ScaleFrame import ScaleFrame obj = ScaleFrame() obj.Init(self._tab.Bytes, x) obj = obj obj = None obj.Init(self._tab.Bytes, x) return obj return None
csb2csd
positive
def dice_coefficient(target, pred, num_classes=2, mean=True, ignore=None): """Sørensen–Dice coefficient a.k.a. DSC a.k.a. F1 score (in %)""" <DeepExtract> cm = torch.empty(num_classes, 4, dtype=dtype, device=device) for c in range(num_classes): pos_pred = pred == c neg_pred = ~pos_pred pos_target = target == c if ignore is not None: ign_target = target == ignore else: ign_target = False ign_target = torch.tensor(ign_target, dtype=torch.bool, device=device) neg_target = ~pos_target true_pos = (pos_pred & pos_target & ~ign_target).sum(dtype=dtype) true_neg = (neg_pred & neg_target & ~ign_target).sum(dtype=dtype) false_pos = (pos_pred & neg_target & ~ign_target).sum(dtype=dtype) false_neg = (neg_pred & pos_target & ~ign_target).sum(dtype=dtype) cm[c] = torch.tensor([true_pos, true_neg, false_pos, false_neg]) if nan_when_empty and pos_target.sum(dtype=dtype) == 0: cm[c] = torch.tensor([float('nan')] * 4) cm = cm </DeepExtract> (tp, tn, fp, fn) = cm.transpose(0, 1) dsc = 2 * tp / (2 * tp + fp + fn + eps) if mean: dsc = dsc.mean().item() return dsc * 100
def dice_coefficient(target, pred, num_classes=2, mean=True, ignore=None): """Sørensen–Dice coefficient a.k.a. DSC a.k.a. F1 score (in %)""" cm = torch.empty(num_classes, 4, dtype=dtype, device=device) for c in range(num_classes): pos_pred = pred == c neg_pred = ~pos_pred pos_target = target == c if ignore is not None: ign_target = target == ignore else: ign_target = False ign_target = torch.tensor(ign_target, dtype=torch.bool, device=device) neg_target = ~pos_target true_pos = (pos_pred & pos_target & ~ign_target).sum(dtype=dtype) true_neg = (neg_pred & neg_target & ~ign_target).sum(dtype=dtype) false_pos = (pos_pred & neg_target & ~ign_target).sum(dtype=dtype) false_neg = (neg_pred & pos_target & ~ign_target).sum(dtype=dtype) cm[c] = torch.tensor([true_pos, true_neg, false_pos, false_neg]) if nan_when_empty and pos_target.sum(dtype=dtype) == 0: cm[c] = torch.tensor([float('nan')] * 4) cm = cm (tp, tn, fp, fn) = cm.transpose(0, 1) dsc = 2 * tp / (2 * tp + fp + fn + eps) if mean: dsc = dsc.mean().item() return dsc * 100
elektronn3
positive
def get_unet(nClasses, input_height=256, input_width=256, n_filters=16, dropout=0.1, batchnorm=True, n_channels=10): input_img = Input(shape=(input_height, input_width, n_channels)) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x </DeepExtract> p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) <DeepExtract> x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x </DeepExtract> p2 = MaxPooling2D((2, 2))(c2) p2 = Dropout(dropout)(p2) <DeepExtract> x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p2) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x </DeepExtract> p3 = MaxPooling2D((2, 2))(c3) p3 = Dropout(dropout)(p3) <DeepExtract> x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c4 = x </DeepExtract> p4 = MaxPooling2D(pool_size=(2, 2))(c4) p4 = Dropout(dropout)(p4) <DeepExtract> x = Conv2D(filters=n_filters * 16, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p4) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 16, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c5 = x </DeepExtract> u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5) u6 = concatenate([u6, c4]) u6 = Dropout(dropout)(u6) <DeepExtract> x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u6) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c6 = x </DeepExtract> u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6) u7 = concatenate([u7, c3]) u7 = Dropout(dropout)(u7) <DeepExtract> x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u7) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c7 = x </DeepExtract> u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7) u8 = concatenate([u8, c2]) u8 = Dropout(dropout)(u8) <DeepExtract> x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u8) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c8 = x </DeepExtract> u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8) u9 = concatenate([u9, c1], axis=3) u9 = Dropout(dropout)(u9) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u9) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c9 = x </DeepExtract> outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9) model = Model(inputs=[input_img], outputs=[outputs]) return model
def get_unet(nClasses, input_height=256, input_width=256, n_filters=16, dropout=0.1, batchnorm=True, n_channels=10): input_img = Input(shape=(input_height, input_width, n_channels)) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x p2 = MaxPooling2D((2, 2))(c2) p2 = Dropout(dropout)(p2) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p2) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x p3 = MaxPooling2D((2, 2))(c3) p3 = Dropout(dropout)(p3) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c4 = x p4 = MaxPooling2D(pool_size=(2, 2))(c4) p4 = Dropout(dropout)(p4) x = Conv2D(filters=n_filters * 16, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p4) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 16, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c5 = x u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5) u6 = concatenate([u6, c4]) u6 = Dropout(dropout)(u6) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u6) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 8, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c6 = x u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6) u7 = concatenate([u7, c3]) u7 = Dropout(dropout)(u7) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u7) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c7 = x u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7) u8 = concatenate([u8, c2]) u8 = Dropout(dropout)(u8) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u8) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 2, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c8 = x u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8) u9 = concatenate([u9, c1], axis=3) u9 = Dropout(dropout)(u9) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u9) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c9 = x outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9) model = Model(inputs=[input_img], outputs=[outputs]) return model
activefire
positive
def applyFilter(self, event): self.opts.filterParameters['apply'] = True self.axstk.clear() self.ppm.axpp.clear() self.axs['Fron'].clear() self.axs['Prev'].clear() self.axs['Next'].clear() self.axs['Last'].clear() self.axs['Zoba'].clear() self.axs['Shdo'].clear() self.axs['Shfp'].clear() self.axs['Shod'].clear() self.axs['Quit'].clear() self.ppm = ppk.PickPhaseMenu(self.gsac, self.opts, self.axs) if self.opts.pick_on: self.ppm.axpp.get_legend().set_visible(False) <DeepExtract> colorwave = self.opts.pppara.colorwave stkybase = 0 ppstk = ppk.PickPhase(self.gsac.stkdh, self.opts, self.axstk, stkybase, colorwave, 1) ppstk.plotPicks() ppstk.disconnectPick() self.ppstk = ppstk self.axstk.set_title('Array Stack') self.ppstk.stalabel.set_visible(False) if self.opts.ynorm == 1.0: self.axstk.set_ylim(stkybase - 0.5, stkybase + 0.5) self.axstk.set_yticks([stkybase]) self.axstk.set_yticklabels([]) self.axstk.axvline(x=0, color='k', ls=':') pppara = self.opts.pppara putil.pickLegend(self.axstk, pppara.npick, pppara.pickcolors, pppara.pickstyles, False) self.plotSpan() </DeepExtract> self.ppm.axpp.figure.canvas.draw() self.axstk.figure.canvas.draw() self.bnorder.disconnect(self.cidorder) self.bnunapply.disconnect(self.cidunapply) self.bnband.disconnect(self.cidband) self.filterAxs['amVfreq'].figure.canvas.mpl_disconnect(self.cidSelectFreq) plt.close()
def applyFilter(self, event): self.opts.filterParameters['apply'] = True self.axstk.clear() self.ppm.axpp.clear() self.axs['Fron'].clear() self.axs['Prev'].clear() self.axs['Next'].clear() self.axs['Last'].clear() self.axs['Zoba'].clear() self.axs['Shdo'].clear() self.axs['Shfp'].clear() self.axs['Shod'].clear() self.axs['Quit'].clear() self.ppm = ppk.PickPhaseMenu(self.gsac, self.opts, self.axs) if self.opts.pick_on: self.ppm.axpp.get_legend().set_visible(False) colorwave = self.opts.pppara.colorwave stkybase = 0 ppstk = ppk.PickPhase(self.gsac.stkdh, self.opts, self.axstk, stkybase, colorwave, 1) ppstk.plotPicks() ppstk.disconnectPick() self.ppstk = ppstk self.axstk.set_title('Array Stack') self.ppstk.stalabel.set_visible(False) if self.opts.ynorm == 1.0: self.axstk.set_ylim(stkybase - 0.5, stkybase + 0.5) self.axstk.set_yticks([stkybase]) self.axstk.set_yticklabels([]) self.axstk.axvline(x=0, color='k', ls=':') pppara = self.opts.pppara putil.pickLegend(self.axstk, pppara.npick, pppara.pickcolors, pppara.pickstyles, False) self.plotSpan() self.ppm.axpp.figure.canvas.draw() self.axstk.figure.canvas.draw() self.bnorder.disconnect(self.cidorder) self.bnunapply.disconnect(self.cidunapply) self.bnband.disconnect(self.cidband) self.filterAxs['amVfreq'].figure.canvas.mpl_disconnect(self.cidSelectFreq) plt.close()
aimbat
positive
def destroy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f'eksctl-orbit-{context.name}-cluster' _logger.debug('EKSCTL stack name: %s', eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name) and team_context.team_helm_repository: kubectl.write_kubeconfig(context=context) <DeepExtract> try: _logger.debug('Uninstalling all charts in namespace %s', team_context.name) sh.run(f'./uninstall_namespace_charts.sh {team_context.name}', cwd=CHARTS_PATH) except exceptions.FailedShellCommand as e: _logger.error(e) </DeepExtract> if team_context.team_helm_repository: sh.run(f'aws s3 rm --recursive {team_context.team_helm_repository}') if team_context.user_helm_repository: sh.run(f'aws s3 rm --recursive {team_context.user_helm_repository}')
def destroy_team(context: Context, team_context: TeamContext) -> None: eks_stack_name: str = f'eksctl-orbit-{context.name}-cluster' _logger.debug('EKSCTL stack name: %s', eks_stack_name) if cfn.does_stack_exist(stack_name=eks_stack_name) and team_context.team_helm_repository: kubectl.write_kubeconfig(context=context) try: _logger.debug('Uninstalling all charts in namespace %s', team_context.name) sh.run(f'./uninstall_namespace_charts.sh {team_context.name}', cwd=CHARTS_PATH) except exceptions.FailedShellCommand as e: _logger.error(e) if team_context.team_helm_repository: sh.run(f'aws s3 rm --recursive {team_context.team_helm_repository}') if team_context.user_helm_repository: sh.run(f'aws s3 rm --recursive {team_context.user_helm_repository}')
aws-orbit-workbench
positive
def draw_gene_with_introns(ax, bed, ypos, rgb, edgecolor): """ draws a gene like in flybase gbrowse. """ from matplotlib.patches import Polygon properties = self.properties height = float(properties['interval_height']) if bed.block_count == 0 and bed.thick_start == bed.start and (bed.thick_end == bed.end): <DeepExtract> from matplotlib.patches import Polygon properties = self.properties height = float(properties['interval_height']) if bed.strand not in ['+', '-']: ax.add_patch(Rectangle((bed.start, ypos), bed.end - bed.start, height, edgecolor=edgecolor, facecolor=rgb, linewidth=0.5)) else: vertices = self.draw_arrow(ax, bed.start, bed.end, bed.strand, ypos) ax.add_patch(Polygon(vertices, closed=True, fill=True, edgecolor=edgecolor, facecolor=rgb, linewidth=0.5)) </DeepExtract> return half_height = height / 2 quarter_height = height / 4 three_quarter_height = quarter_height * 3 ax.plot([bed.start, bed.end], [ypos + half_height, ypos + half_height], 'black', linewidth=0.5, zorder=-1) for idx in range(bed.block_count): x0 = bed.start + bed.block_starts[idx] x1 = x0 + bed.block_sizes[idx] if x1 < bed.thick_start or x0 > bed.thick_end: y0 = ypos + quarter_height y1 = ypos + three_quarter_height else: y0 = ypos y1 = ypos + height if x0 < bed.thick_start < x1: vertices = [(x0, ypos + quarter_height), (x0, ypos + three_quarter_height), (bed.thick_start, ypos + three_quarter_height), (bed.thick_start, ypos + height), (bed.thick_start, ypos + height), (x1, ypos + height), (x1, ypos), (bed.thick_start, ypos), (bed.thick_start, ypos + quarter_height)] elif x0 < bed.thick_end < x1: vertices = [(x0, ypos), (x0, ypos + height), (bed.thick_end, ypos + height), (bed.thick_end, ypos + three_quarter_height), (x1, ypos + three_quarter_height), (x1, ypos + quarter_height), (bed.thick_end, ypos + quarter_height), (bed.thick_end, ypos)] else: vertices = [(x0, y0), (x0, y1), (x1, y1), (x1, y0)] ax.add_patch(Polygon(vertices, closed=True, fill=True, linewidth=0.1, edgecolor='none', facecolor=rgb)) if idx < bed.block_count - 1: intron_length = bed.block_starts[idx + 1] - (bed.block_starts[idx] + bed.block_sizes[idx]) marker = 5 if bed.strand == '+' else 4 if intron_length > 3 * self.small_relative: pos = np.arange(x1 + 1 * self.small_relative, x1 + intron_length + self.small_relative, int(2 * self.small_relative)) ax.plot(pos, np.zeros(len(pos)) + ypos + half_height, '.', marker=marker, fillstyle='none', color='blue', markersize=3) elif intron_length > self.small_relative: intron_center = x1 + int(intron_length) / 2 ax.plot([intron_center], [ypos + half_height], '.', marker=5, fillstyle='none', color='blue', markersize=3)
def draw_gene_with_introns(ax, bed, ypos, rgb, edgecolor): """ draws a gene like in flybase gbrowse. """ from matplotlib.patches import Polygon properties = self.properties height = float(properties['interval_height']) if bed.block_count == 0 and bed.thick_start == bed.start and (bed.thick_end == bed.end): from matplotlib.patches import Polygon properties = self.properties height = float(properties['interval_height']) if bed.strand not in ['+', '-']: ax.add_patch(Rectangle((bed.start, ypos), bed.end - bed.start, height, edgecolor=edgecolor, facecolor=rgb, linewidth=0.5)) else: vertices = self.draw_arrow(ax, bed.start, bed.end, bed.strand, ypos) ax.add_patch(Polygon(vertices, closed=True, fill=True, edgecolor=edgecolor, facecolor=rgb, linewidth=0.5)) return half_height = height / 2 quarter_height = height / 4 three_quarter_height = quarter_height * 3 ax.plot([bed.start, bed.end], [ypos + half_height, ypos + half_height], 'black', linewidth=0.5, zorder=-1) for idx in range(bed.block_count): x0 = bed.start + bed.block_starts[idx] x1 = x0 + bed.block_sizes[idx] if x1 < bed.thick_start or x0 > bed.thick_end: y0 = ypos + quarter_height y1 = ypos + three_quarter_height else: y0 = ypos y1 = ypos + height if x0 < bed.thick_start < x1: vertices = [(x0, ypos + quarter_height), (x0, ypos + three_quarter_height), (bed.thick_start, ypos + three_quarter_height), (bed.thick_start, ypos + height), (bed.thick_start, ypos + height), (x1, ypos + height), (x1, ypos), (bed.thick_start, ypos), (bed.thick_start, ypos + quarter_height)] elif x0 < bed.thick_end < x1: vertices = [(x0, ypos), (x0, ypos + height), (bed.thick_end, ypos + height), (bed.thick_end, ypos + three_quarter_height), (x1, ypos + three_quarter_height), (x1, ypos + quarter_height), (bed.thick_end, ypos + quarter_height), (bed.thick_end, ypos)] else: vertices = [(x0, y0), (x0, y1), (x1, y1), (x1, y0)] ax.add_patch(Polygon(vertices, closed=True, fill=True, linewidth=0.1, edgecolor='none', facecolor=rgb)) if idx < bed.block_count - 1: intron_length = bed.block_starts[idx + 1] - (bed.block_starts[idx] + bed.block_sizes[idx]) marker = 5 if bed.strand == '+' else 4 if intron_length > 3 * self.small_relative: pos = np.arange(x1 + 1 * self.small_relative, x1 + intron_length + self.small_relative, int(2 * self.small_relative)) ax.plot(pos, np.zeros(len(pos)) + ypos + half_height, '.', marker=marker, fillstyle='none', color='blue', markersize=3) elif intron_length > self.small_relative: intron_center = x1 + int(intron_length) / 2 ax.plot([intron_center], [ypos + half_height], '.', marker=5, fillstyle='none', color='blue', markersize=3)
CoolBox
positive
def __len__(self): if self.count is None: if self.objects is not None: self.count = len(self.objects) else: with self.backend.transaction(): <DeepExtract> s = self.get_bare_select(columns=[self.table.c.pk]) count_select = select([func.count()]).select_from(s.alias()) count_select = count_select </DeepExtract> result = self.backend.connection.execute(count_select) self.count = result.first()[0] result.close() return self.count
def __len__(self): if self.count is None: if self.objects is not None: self.count = len(self.objects) else: with self.backend.transaction(): s = self.get_bare_select(columns=[self.table.c.pk]) count_select = select([func.count()]).select_from(s.alias()) count_select = count_select result = self.backend.connection.execute(count_select) self.count = result.first()[0] result.close() return self.count
blitzdb
positive
def initialize(self, opt): BaseModel.initialize(self, opt) if opt.resize_or_crop != 'none' or not opt.isTrain: torch.backends.cudnn.benchmark = True self.isTrain = opt.isTrain input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc netG_input_nc = input_nc self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids) if self.isTrain: use_sigmoid = opt.no_lsgan netD_input_nc = input_nc + opt.output_nc netB_input_nc = opt.output_nc * 2 self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid, opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids) self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids) if self.opt.verbose: print('---------- Networks initialized -------------') if not self.isTrain or opt.continue_train or opt.load_pretrain: pretrained_path = '' if not self.isTrain else opt.load_pretrain print(pretrained_path) self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path) if self.isTrain: self.load_network(self.netB, 'B', opt.which_epoch, pretrained_path) self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path) if self.isTrain: if opt.pool_size > 0 and len(self.gpu_ids) > 1: raise NotImplementedError('Fake Pool Not Implemented for MultiGPU') self.fake_pool = ImagePool(opt.pool_size) self.old_lr = opt.lr <DeepExtract> flags = (True, not opt.no_ganFeat_loss, not opt.no_vgg_loss, True, not opt.no_ganFeat_loss, not opt.no_vgg_loss, True, True, True, True) def loss_filter(g_gan, g_gan_feat, g_vgg, gb_gan, gb_gan_feat, gb_vgg, d_real, d_fake, d_blend): self.loss_filter = [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, gb_gan, gb_gan_feat, gb_vgg, d_real, d_fake, d_blend), flags) if f] self.loss_filter = loss_filter </DeepExtract> self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor) self.criterionFeat = torch.nn.L1Loss() if not opt.no_vgg_loss: self.criterionVGG = networks.VGGLoss(self.gpu_ids) <DeepExtract> self.loss_names = [l for (l, f) in zip(('G_GAN', 'G_GAN_Feat', 'G_VGG', 'GB_GAN', 'GB_GAN_Feat', 'GB_VGG', 'D_real', 'D_fake', 'D_blend'), flags) if f] </DeepExtract> if opt.niter_fix_global > 0: import sys if sys.version_info >= (3, 0): finetune_list = set() else: from sets import Set finetune_list = Set() params_dict = dict(self.netG.named_parameters()) params = [] for (key, value) in params_dict.items(): if key.startswith('model' + str(opt.n_local_enhancers)): params += [value] finetune_list.add(key.split('.')[0]) print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global) print('The layers that are finetuned are ', sorted(finetune_list)) else: params = list(self.netG.parameters()) self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) params = list(self.netD.parameters()) self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) params = list(self.netG.parameters()) + list(self.netB.parameters()) self.optimizer_GB = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
def initialize(self, opt): BaseModel.initialize(self, opt) if opt.resize_or_crop != 'none' or not opt.isTrain: torch.backends.cudnn.benchmark = True self.isTrain = opt.isTrain input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc netG_input_nc = input_nc self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids) if self.isTrain: use_sigmoid = opt.no_lsgan netD_input_nc = input_nc + opt.output_nc netB_input_nc = opt.output_nc * 2 self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid, opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids) self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids) if self.opt.verbose: print('---------- Networks initialized -------------') if not self.isTrain or opt.continue_train or opt.load_pretrain: pretrained_path = '' if not self.isTrain else opt.load_pretrain print(pretrained_path) self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path) if self.isTrain: self.load_network(self.netB, 'B', opt.which_epoch, pretrained_path) self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path) if self.isTrain: if opt.pool_size > 0 and len(self.gpu_ids) > 1: raise NotImplementedError('Fake Pool Not Implemented for MultiGPU') self.fake_pool = ImagePool(opt.pool_size) self.old_lr = opt.lr flags = (True, not opt.no_ganFeat_loss, not opt.no_vgg_loss, True, not opt.no_ganFeat_loss, not opt.no_vgg_loss, True, True, True, True) def loss_filter(g_gan, g_gan_feat, g_vgg, gb_gan, gb_gan_feat, gb_vgg, d_real, d_fake, d_blend): self.loss_filter = [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, gb_gan, gb_gan_feat, gb_vgg, d_real, d_fake, d_blend), flags) if f] self.loss_filter = loss_filter self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor) self.criterionFeat = torch.nn.L1Loss() if not opt.no_vgg_loss: self.criterionVGG = networks.VGGLoss(self.gpu_ids) self.loss_names = [l for (l, f) in zip(('G_GAN', 'G_GAN_Feat', 'G_VGG', 'GB_GAN', 'GB_GAN_Feat', 'GB_VGG', 'D_real', 'D_fake', 'D_blend'), flags) if f] if opt.niter_fix_global > 0: import sys if sys.version_info >= (3, 0): finetune_list = set() else: from sets import Set finetune_list = Set() params_dict = dict(self.netG.named_parameters()) params = [] for (key, value) in params_dict.items(): if key.startswith('model' + str(opt.n_local_enhancers)): params += [value] finetune_list.add(key.split('.')[0]) print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global) print('The layers that are finetuned are ', sorted(finetune_list)) else: params = list(self.netG.parameters()) self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) params = list(self.netD.parameters()) self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) params = list(self.netG.parameters()) + list(self.netB.parameters()) self.optimizer_GB = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
dualFace
positive
@arg.project @arg.email def project__user_remove(self) -> None: """Remove a user from the project""" <DeepExtract> if getattr(self.args, 'project', None) and self.args.project: project_name = self.args.project default_project = self.config.get('default_project', '') if raise_if_none and (not default_project): raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.') project_name = default_project </DeepExtract> try: self.client.remove_project_user(project=project_name, user_email=self.args.email) except client.Error as ex: print(ex.response.text) raise argx.UserError("Project '{}' removal of user {} failed".format(project_name, self.args.email)) self.log.info('Removed %r from project %r', self.args.email, project_name)
@arg.project @arg.email def project__user_remove(self) -> None: """Remove a user from the project""" if getattr(self.args, 'project', None) and self.args.project: project_name = self.args.project default_project = self.config.get('default_project', '') if raise_if_none and (not default_project): raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.') project_name = default_project try: self.client.remove_project_user(project=project_name, user_email=self.args.email) except client.Error as ex: print(ex.response.text) raise argx.UserError("Project '{}' removal of user {} failed".format(project_name, self.args.email)) self.log.info('Removed %r from project %r', self.args.email, project_name)
aiven-client
positive
def test_item_attachments(self): <DeepExtract> item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories item = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) </DeepExtract> item.attachments = [] <DeepExtract> item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item1 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) </DeepExtract> attached_item1.attachments = [] if hasattr(attached_item1, 'is_all_day'): attached_item1.is_all_day = False attached_item1.save() attachment1 = ItemAttachment(name='attachment1', item=attached_item1) item.attach(attachment1) self.assertEqual(len(item.attachments), 1) item.save() fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 1) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) self.assertEqual(fresh_attachments[0].name, 'attachment1') self.assertIsInstance(fresh_attachments[0].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'is_read': continue if f.name == 'reminder_due_by': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) <DeepExtract> item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item2 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) </DeepExtract> attached_item2.attachments = [] if hasattr(attached_item2, 'is_all_day'): attached_item2.is_all_day = False attached_item2.save() attachment2 = ItemAttachment(name='attachment2', item=attached_item2) item.attach(attachment2) self.assertEqual(len(item.attachments), 2) fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 2) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) self.assertEqual(fresh_attachments[0].name, 'attachment1') self.assertIsInstance(fresh_attachments[0].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) self.assertEqual(fresh_attachments[1].name, 'attachment2') self.assertIsInstance(fresh_attachments[1].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item2, f.name) new_val = getattr(fresh_attachments[1].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) item.detach(attachment2) self.assertTrue(attachment2.attachment_id is None) self.assertTrue(attachment2.parent_item is None) fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 1) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) <DeepExtract> item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item3 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) </DeepExtract> attached_item3.attachments = [] if hasattr(attached_item3, 'is_all_day'): attached_item3.is_all_day = False attachment3 = ItemAttachment(name='attachment2', item=attached_item3) item.attach(attachment3) item.detach(attachment3)
def test_item_attachments(self): item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories item = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) item.attachments = [] item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item1 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) attached_item1.attachments = [] if hasattr(attached_item1, 'is_all_day'): attached_item1.is_all_day = False attached_item1.save() attachment1 = ItemAttachment(name='attachment1', item=attached_item1) item.attach(attachment1) self.assertEqual(len(item.attachments), 1) item.save() fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 1) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) self.assertEqual(fresh_attachments[0].name, 'attachment1') self.assertIsInstance(fresh_attachments[0].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'is_read': continue if f.name == 'reminder_due_by': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item2 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) attached_item2.attachments = [] if hasattr(attached_item2, 'is_all_day'): attached_item2.is_all_day = False attached_item2.save() attachment2 = ItemAttachment(name='attachment2', item=attached_item2) item.attach(attachment2) self.assertEqual(len(item.attachments), 2) fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 2) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) self.assertEqual(fresh_attachments[0].name, 'attachment1') self.assertIsInstance(fresh_attachments[0].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) self.assertEqual(fresh_attachments[1].name, 'attachment2') self.assertIsInstance(fresh_attachments[1].item, self.ITEM_CLASS) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item2, f.name) new_val = getattr(fresh_attachments[1].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) item.detach(attachment2) self.assertTrue(attachment2.attachment_id is None) self.assertTrue(attachment2.parent_item is None) fresh_item = list(self.account.fetch(ids=[item]))[0] self.assertEqual(len(fresh_item.attachments), 1) fresh_attachments = sorted(fresh_item.attachments, key=lambda a: a.name) for f in self.ITEM_CLASS.FIELDS: if f.is_read_only: continue if self.ITEM_CLASS == CalendarItem and f in CalendarItem.timezone_fields(): continue if isinstance(f, ExtendedPropertyField): continue if f.name == 'reminder_due_by': continue if f.name == 'is_read': continue if f.name == 'mime_content': continue old_val = getattr(attached_item1, f.name) new_val = getattr(fresh_attachments[0].item, f.name) if f.is_list: (old_val, new_val) = (set(old_val or ()), set(new_val or ())) self.assertEqual(old_val, new_val, (f.name, old_val, new_val)) item_kwargs = self.get_random_insert_kwargs() item_kwargs['categories'] = categories or self.categories attached_item3 = self.ITEM_CLASS(folder=self.test_folder or self.test_folder, **item_kwargs) attached_item3.attachments = [] if hasattr(attached_item3, 'is_all_day'): attached_item3.is_all_day = False attachment3 = ItemAttachment(name='attachment2', item=attached_item3) item.attach(attachment3) item.detach(attachment3)
exchangelib
positive
def delete_eni(connection, module): <DeepExtract> if eni: if 'NetworkInterfaceId' in eni: eni_id = eni['NetworkInterfaceId'] else: eni_id = None else: eni_id = module.params.get('eni_id') private_ip_address = module.params.get('private_ip_address') subnet_id = module.params.get('subnet_id') instance_id = module.params.get('instance_id') device_index = module.params.get('device_index') attached = module.params.get('attached') name = module.params.get('name') filters = [] if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None): eni = None if eni_id: filters.append({'Name': 'network-interface-id', 'Values': [eni_id]}) if private_ip_address and subnet_id and (not filters): filters.append({'Name': 'private-ip-address', 'Values': [private_ip_address]}) filters.append({'Name': 'subnet-id', 'Values': [subnet_id]}) if not attached and instance_id and device_index and (not filters): filters.append({'Name': 'attachment.instance-id', 'Values': [instance_id]}) filters.append({'Name': 'attachment.device-index', 'Values': [str(device_index)]}) if name and subnet_id and (not filters): filters.append({'Name': 'tag:Name', 'Values': [name]}) filters.append({'Name': 'subnet-id', 'Values': [subnet_id]}) if not filters: eni = None try: eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)['NetworkInterfaces'] if len(eni_result) == 1: eni = eni_result[0] else: eni = None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, 'Failed to find unique eni with filters: {0}'.format(filters)) eni = None </DeepExtract> if not eni: module.exit_json(changed=False) if module.check_mode: module.exit_json(changed=True, msg='Would have deleted ENI if not in check mode.') eni_id = eni['NetworkInterfaceId'] force_detach = module.params.get('force_detach') try: if force_detach is True: if 'Attachment' in eni: connection.detach_network_interface(aws_retry=True, AttachmentId=eni['Attachment']['AttachmentId'], Force=True) <DeepExtract> try: get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id], WaiterConfig={'Delay': 5, 'MaxAttempts': 80}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, 'Timeout waiting for ENI {0} to detach'.format(eni_id)) </DeepExtract> connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) changed = True else: connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) changed = True module.exit_json(changed=changed) except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): module.exit_json(changed=False) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, 'Failure during delete of {0}'.format(eni_id))
def delete_eni(connection, module): if eni: if 'NetworkInterfaceId' in eni: eni_id = eni['NetworkInterfaceId'] else: eni_id = None else: eni_id = module.params.get('eni_id') private_ip_address = module.params.get('private_ip_address') subnet_id = module.params.get('subnet_id') instance_id = module.params.get('instance_id') device_index = module.params.get('device_index') attached = module.params.get('attached') name = module.params.get('name') filters = [] if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None): eni = None if eni_id: filters.append({'Name': 'network-interface-id', 'Values': [eni_id]}) if private_ip_address and subnet_id and (not filters): filters.append({'Name': 'private-ip-address', 'Values': [private_ip_address]}) filters.append({'Name': 'subnet-id', 'Values': [subnet_id]}) if not attached and instance_id and device_index and (not filters): filters.append({'Name': 'attachment.instance-id', 'Values': [instance_id]}) filters.append({'Name': 'attachment.device-index', 'Values': [str(device_index)]}) if name and subnet_id and (not filters): filters.append({'Name': 'tag:Name', 'Values': [name]}) filters.append({'Name': 'subnet-id', 'Values': [subnet_id]}) if not filters: eni = None try: eni_result = connection.describe_network_interfaces(aws_retry=True, Filters=filters)['NetworkInterfaces'] if len(eni_result) == 1: eni = eni_result[0] else: eni = None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, 'Failed to find unique eni with filters: {0}'.format(filters)) eni = None if not eni: module.exit_json(changed=False) if module.check_mode: module.exit_json(changed=True, msg='Would have deleted ENI if not in check mode.') eni_id = eni['NetworkInterfaceId'] force_detach = module.params.get('force_detach') try: if force_detach is True: if 'Attachment' in eni: connection.detach_network_interface(aws_retry=True, AttachmentId=eni['Attachment']['AttachmentId'], Force=True) try: get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id], WaiterConfig={'Delay': 5, 'MaxAttempts': 80}) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, 'Timeout waiting for ENI {0} to detach'.format(eni_id)) connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) changed = True else: connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) changed = True module.exit_json(changed=changed) except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): module.exit_json(changed=False) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, 'Failure during delete of {0}'.format(eni_id))
amazon.aws
positive
def test_api_get_subject_versions(kafka_cluster, load_file): """ Registers a Schema with a subject, lists the versions associated with that subject and ensures the versions and their schemas match what was registered. Args: kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture load_file (callable(str)): Schema fixture constructor. """ sr = kafka_cluster.schema_registry() <DeepExtract> subject = 'list-version-test' + '-' + str(uuid1()) </DeepExtract> sr.set_compatibility(level='NONE') avscs = ['basic_schema.avsc', 'primitive_string.avsc', 'primitive_bool.avsc', 'primitive_float.avsc'] schemas = [] for avsc in avscs: schema = Schema(load_file(avsc), schema_type='AVRO') schemas.append(schema) sr.register_schema(subject, schema) versions = sr.get_versions(subject) assert len(versions) == len(avscs) for schema in schemas: registered_schema = sr.lookup_schema(subject, schema) assert registered_schema.subject == subject assert registered_schema.version in versions sr.set_compatibility(level='BACKWARD')
def test_api_get_subject_versions(kafka_cluster, load_file): """ Registers a Schema with a subject, lists the versions associated with that subject and ensures the versions and their schemas match what was registered. Args: kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture load_file (callable(str)): Schema fixture constructor. """ sr = kafka_cluster.schema_registry() subject = 'list-version-test' + '-' + str(uuid1()) sr.set_compatibility(level='NONE') avscs = ['basic_schema.avsc', 'primitive_string.avsc', 'primitive_bool.avsc', 'primitive_float.avsc'] schemas = [] for avsc in avscs: schema = Schema(load_file(avsc), schema_type='AVRO') schemas.append(schema) sr.register_schema(subject, schema) versions = sr.get_versions(subject) assert len(versions) == len(avscs) for schema in schemas: registered_schema = sr.lookup_schema(subject, schema) assert registered_schema.subject == subject assert registered_schema.version in versions sr.set_compatibility(level='BACKWARD')
confluent-kafka-python
positive
def _update_all_controls_in_list(self, controls: List[cat.Control]) -> List[cat.Control]: """ Given a list of controls, create fresh list pulled from the control dict. Args: controls: a list of controls in the original catalog Returns: The new list of updated controls, possibly with some missing if they have been removed from the dict. Children are inserted as needed into parent controls. """ new_list: List[cat.Control] = [] for control in controls: <DeepExtract> new_control = None if control.id not in self._control_dict else self._control_dict[control.id].control </DeepExtract> if new_control: <DeepExtract> new_list: List[cat.Control] = [] for control in as_list(control.controls): new_control = self.get_control(control.id) if new_control: new_control.controls = self._update_all_controls_in_list(as_list(control.controls)) new_control.controls = none_if_empty(new_control.controls) new_list.append(new_control) new_control.controls = new_list </DeepExtract> new_control.controls = none_if_empty(new_control.controls) new_list.append(new_control) return new_list
def _update_all_controls_in_list(self, controls: List[cat.Control]) -> List[cat.Control]: """ Given a list of controls, create fresh list pulled from the control dict. Args: controls: a list of controls in the original catalog Returns: The new list of updated controls, possibly with some missing if they have been removed from the dict. Children are inserted as needed into parent controls. """ new_list: List[cat.Control] = [] for control in controls: new_control = None if control.id not in self._control_dict else self._control_dict[control.id].control if new_control: new_list: List[cat.Control] = [] for control in as_list(control.controls): new_control = self.get_control(control.id) if new_control: new_control.controls = self._update_all_controls_in_list(as_list(control.controls)) new_control.controls = none_if_empty(new_control.controls) new_list.append(new_control) new_control.controls = new_list new_control.controls = none_if_empty(new_control.controls) new_list.append(new_control) return new_list
compliance-trestle
positive
def run(self, params=None): databus = conpot_core.get_databus() cmd_output = '' if params: cmd_output = '\r\nOK\r\n' if len(params) == 1 and params == '0': databus.set_value('access_control_status', 'DISABLED') elif len(params) == 1 and params == '1': databus.set_value('access_control_status', 'ENABLED') elif len(params.split(' ')) == 3: (cmd, acl_number, ip_address) = params.split(' ') if cmd == '0': <DeepExtract> databus = conpot_core.get_databus() if ip_address.count('.') == 3: if any((x in acl_number for x in ['1', '2', '3', '4', '5'])): acl_number = int(acl_number) final_ip = parse_ip(ip_address) databus.set_value('access_control_{0}'.format(acl_number), final_ip) </DeepExtract> return cmd_output + self.CMD_OUTPUT.format(access_control_status=databus.get_value('access_control_status'), access_control_1=databus.get_value('access_control_1'), access_control_2=databus.get_value('access_control_2'), access_control_3=databus.get_value('access_control_3'), access_control_4=databus.get_value('access_control_4'), access_control_5=databus.get_value('access_control_5'))
def run(self, params=None): databus = conpot_core.get_databus() cmd_output = '' if params: cmd_output = '\r\nOK\r\n' if len(params) == 1 and params == '0': databus.set_value('access_control_status', 'DISABLED') elif len(params) == 1 and params == '1': databus.set_value('access_control_status', 'ENABLED') elif len(params.split(' ')) == 3: (cmd, acl_number, ip_address) = params.split(' ') if cmd == '0': databus = conpot_core.get_databus() if ip_address.count('.') == 3: if any((x in acl_number for x in ['1', '2', '3', '4', '5'])): acl_number = int(acl_number) final_ip = parse_ip(ip_address) databus.set_value('access_control_{0}'.format(acl_number), final_ip) return cmd_output + self.CMD_OUTPUT.format(access_control_status=databus.get_value('access_control_status'), access_control_1=databus.get_value('access_control_1'), access_control_2=databus.get_value('access_control_2'), access_control_3=databus.get_value('access_control_3'), access_control_4=databus.get_value('access_control_4'), access_control_5=databus.get_value('access_control_5'))
conpot
positive
def evaluation(self): """ Admin function: Runs the whole evaluation """ aicrowd_helpers.execution_start() try: with time_limit(self.inference_setup_timeout): <DeepExtract> raise NotImplementedError </DeepExtract> except NotImplementedError: print("prediction_setup doesn't exist for this run, skipping...") aicrowd_helpers.execution_running() <DeepExtract> valid_music_names = None if self.partial_run: valid_music_names = self.partial_run.split(',') music_names = [] for folder in listdir(self.test_data_path): if not isfile(join(self.test_data_path, folder)): if valid_music_names is None or folder in valid_music_names: music_names.append(folder) music_names = music_names </DeepExtract> for music_name in music_names: with time_limit(self.inference_per_music_timeout): <DeepExtract> raise NotImplementedError </DeepExtract> if not self.verify_results(music_name): raise Exception('verification failed, demixed files not found.') aicrowd_helpers.execution_success()
def evaluation(self): """ Admin function: Runs the whole evaluation """ aicrowd_helpers.execution_start() try: with time_limit(self.inference_setup_timeout): raise NotImplementedError except NotImplementedError: print("prediction_setup doesn't exist for this run, skipping...") aicrowd_helpers.execution_running() valid_music_names = None if self.partial_run: valid_music_names = self.partial_run.split(',') music_names = [] for folder in listdir(self.test_data_path): if not isfile(join(self.test_data_path, folder)): if valid_music_names is None or folder in valid_music_names: music_names.append(folder) music_names = music_names for music_name in music_names: with time_limit(self.inference_per_music_timeout): raise NotImplementedError if not self.verify_results(music_name): raise Exception('verification failed, demixed files not found.') aicrowd_helpers.execution_success()
DNN-based_source_separation
positive
@mock.patch('drf_spectacular.settings.spectacular_settings.RENDERER_WHITELIST', [renderers.MultiPartRenderer]) @mock.patch('drf_spectacular.settings.spectacular_settings.PARSER_WHITELIST', [parsers.MultiPartParser]) def test_renderer_parser_whitelist(no_warnings): class XSerializer(serializers.Serializer): field = serializers.CharField() class XViewset(viewsets.ModelViewSet): serializer_class = XSerializer queryset = SimpleModel.objects.none() renderer_classes = [renderers.MultiPartRenderer, renderers.JSONRenderer] parser_classes = [parsers.MultiPartParser, parsers.JSONParser] schema = generate_schema('/x', XViewset) <DeepExtract> request_types = super().list(schema['paths']['/x/']['post']['requestBody']['content'].keys(), *args, **kwargs) </DeepExtract> <DeepExtract> response_types = super().list(schema['paths']['/x/']['post']['responses']['201']['content'].keys(), *args, **kwargs) </DeepExtract> assert response_types == request_types == ['multipart/form-data']
@mock.patch('drf_spectacular.settings.spectacular_settings.RENDERER_WHITELIST', [renderers.MultiPartRenderer]) @mock.patch('drf_spectacular.settings.spectacular_settings.PARSER_WHITELIST', [parsers.MultiPartParser]) def test_renderer_parser_whitelist(no_warnings): class XSerializer(serializers.Serializer): field = serializers.CharField() class XViewset(viewsets.ModelViewSet): serializer_class = XSerializer queryset = SimpleModel.objects.none() renderer_classes = [renderers.MultiPartRenderer, renderers.JSONRenderer] parser_classes = [parsers.MultiPartParser, parsers.JSONParser] schema = generate_schema('/x', XViewset) request_types = super().list(schema['paths']['/x/']['post']['requestBody']['content'].keys(), *args, **kwargs) response_types = super().list(schema['paths']['/x/']['post']['responses']['201']['content'].keys(), *args, **kwargs) assert response_types == request_types == ['multipart/form-data']
drf-spectacular
positive
def initialize_cds(self): for each in range(20): analog_name = 'A{}'.format(each) binary_name = 'B{}'.format(each) multistates_name = 'M{}'.format(each) multistates_states = 'M{}_state'.format(each) virtuals_name = 'V{}'.format(each) _temp = {analog_name: None, binary_name: None, multistates_name: None, multistates_states: None, virtuals_name: None} self._cds_struct.update(_temp) self._cds_struct.update({'index': 'index', 'time_s': 'time_s'}) <DeepExtract> def _translate_binary_values(val): if val == 'active:': df = 0 elif val == 'inactive': df = 1 elif isinstance(val, str) and ':' in val: df = int(val.split(':')[0]) else: df = val def _add_mv_states(val): try: df = val.split(':')[1] except AttributeError: df = val self._log.debug('Building dataframe') r = {} for point in self.network.trends: name = '{}/{}'.format(point.properties.device.properties.name, point.properties.name) if point.history.dtype == 'O': r[name] = point.history.apply(lambda x: _translate_binary_values(x)) else: r[name] = point.history if 'multi' in point.properties.type: _name = name + '_state' _duplicate = name + '_duplicate' r[_name] = point.history.apply(lambda x: _add_mv_states(x)) r[_duplicate] = r[name].eq(r[name].shift()) r[_name].loc[r[_duplicate]] = '' del r[_duplicate] try: df = pd.DataFrame(r) except ValueError: self._log.error('Problem with dataframe creation. {}'.format(r.keys())) self.trouble_r = r raise df = df.reset_index() df['time_s'] = df['index'].apply(str) try: df = df.fillna(method='ffill').fillna(method='bfill').replace(['inactive', 'active'], [0, 1]) except TypeError: df = df.fillna(method='ffill').fillna(method='bfill') df = df </DeepExtract> self.new_data = {} for (k, v) in self._cds_struct.items(): self.new_data[k] = None for (k, v) in self.new_data.items(): try: self.new_data[k] = df[self._cds_struct[k]].values except KeyError: self.new_data[k] = df['time_s'].values return ColumnDataSource(self.new_data)
def initialize_cds(self): for each in range(20): analog_name = 'A{}'.format(each) binary_name = 'B{}'.format(each) multistates_name = 'M{}'.format(each) multistates_states = 'M{}_state'.format(each) virtuals_name = 'V{}'.format(each) _temp = {analog_name: None, binary_name: None, multistates_name: None, multistates_states: None, virtuals_name: None} self._cds_struct.update(_temp) self._cds_struct.update({'index': 'index', 'time_s': 'time_s'}) def _translate_binary_values(val): if val == 'active:': df = 0 elif val == 'inactive': df = 1 elif isinstance(val, str) and ':' in val: df = int(val.split(':')[0]) else: df = val def _add_mv_states(val): try: df = val.split(':')[1] except AttributeError: df = val self._log.debug('Building dataframe') r = {} for point in self.network.trends: name = '{}/{}'.format(point.properties.device.properties.name, point.properties.name) if point.history.dtype == 'O': r[name] = point.history.apply(lambda x: _translate_binary_values(x)) else: r[name] = point.history if 'multi' in point.properties.type: _name = name + '_state' _duplicate = name + '_duplicate' r[_name] = point.history.apply(lambda x: _add_mv_states(x)) r[_duplicate] = r[name].eq(r[name].shift()) r[_name].loc[r[_duplicate]] = '' del r[_duplicate] try: df = pd.DataFrame(r) except ValueError: self._log.error('Problem with dataframe creation. {}'.format(r.keys())) self.trouble_r = r raise df = df.reset_index() df['time_s'] = df['index'].apply(str) try: df = df.fillna(method='ffill').fillna(method='bfill').replace(['inactive', 'active'], [0, 1]) except TypeError: df = df.fillna(method='ffill').fillna(method='bfill') df = df self.new_data = {} for (k, v) in self._cds_struct.items(): self.new_data[k] = None for (k, v) in self.new_data.items(): try: self.new_data[k] = df[self._cds_struct[k]].values except KeyError: self.new_data[k] = df['time_s'].values return ColumnDataSource(self.new_data)
BAC0
positive
@register_vcs_handler('git', 'pieces_from_vcs') def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ['git'] if sys.platform == 'win32': GITS = ['git.cmd', 'git.exe'] <DeepExtract> assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-parse', '--git-dir']) p = subprocess.Popen([c] + ['rev-parse', '--git-dir'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if True else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (out, rc) = (None, p.returncode) (out, rc) = (stdout, p.returncode) </DeepExtract> if rc != 0: if verbose: print('Directory %s not under git control' % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") <DeepExtract> assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix]) p = subprocess.Popen([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (describe_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (describe_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (describe_out, rc) = (None, p.returncode) (describe_out, rc) = (stdout, p.returncode) </DeepExtract> if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() <DeepExtract> assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-parse', 'HEAD']) p = subprocess.Popen([c] + ['rev-parse', 'HEAD'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (full_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (full_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (full_out, rc) = (None, p.returncode) (full_out, rc) = (stdout, p.returncode) </DeepExtract> if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces['long'] = full_out pieces['short'] = full_out[:7] pieces['error'] = None git_describe = describe_out dirty = git_describe.endswith('-dirty') pieces['dirty'] = dirty if dirty: git_describe = git_describe[:git_describe.rindex('-dirty')] if '-' in git_describe: mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe) if not mo: pieces['error'] = "unable to parse git-describe output: '%s'" % describe_out return pieces full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces['error'] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) return pieces pieces['closest-tag'] = full_tag[len(tag_prefix):] pieces['distance'] = int(mo.group(2)) pieces['short'] = mo.group(3) else: pieces['closest-tag'] = None <DeepExtract> assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-list', 'HEAD', '--count']) p = subprocess.Popen([c] + ['rev-list', 'HEAD', '--count'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (count_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (count_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (count_out, rc) = (None, p.returncode) (count_out, rc) = (stdout, p.returncode) </DeepExtract> pieces['distance'] = int(count_out) date = run_command(GITS, ['show', '-s', '--format=%ci', 'HEAD'], cwd=root)[0].strip() pieces['date'] = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) return pieces
@register_vcs_handler('git', 'pieces_from_vcs') def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ['git'] if sys.platform == 'win32': GITS = ['git.cmd', 'git.exe'] assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-parse', '--git-dir']) p = subprocess.Popen([c] + ['rev-parse', '--git-dir'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if True else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (out, rc) = (None, p.returncode) (out, rc) = (stdout, p.returncode) if rc != 0: if verbose: print('Directory %s not under git control' % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix]) p = subprocess.Popen([c] + ['describe', '--tags', '--dirty', '--always', '--long', '--match', '%s*' % tag_prefix], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (describe_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (describe_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (describe_out, rc) = (None, p.returncode) (describe_out, rc) = (stdout, p.returncode) if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-parse', 'HEAD']) p = subprocess.Popen([c] + ['rev-parse', 'HEAD'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (full_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (full_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (full_out, rc) = (None, p.returncode) (full_out, rc) = (stdout, p.returncode) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces['long'] = full_out pieces['short'] = full_out[:7] pieces['error'] = None git_describe = describe_out dirty = git_describe.endswith('-dirty') pieces['dirty'] = dirty if dirty: git_describe = git_describe[:git_describe.rindex('-dirty')] if '-' in git_describe: mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe) if not mo: pieces['error'] = "unable to parse git-describe output: '%s'" % describe_out return pieces full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces['error'] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) return pieces pieces['closest-tag'] = full_tag[len(tag_prefix):] pieces['distance'] = int(mo.group(2)) pieces['short'] = mo.group(3) else: pieces['closest-tag'] = None assert isinstance(GITS, list) p = None for c in GITS: try: dispcmd = str([c] + ['rev-list', 'HEAD', '--count']) p = subprocess.Popen([c] + ['rev-list', 'HEAD', '--count'], cwd=root, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print('unable to run %s' % dispcmd) print(e) (count_out, rc) = (None, None) else: if verbose: print('unable to find command, tried %s' % (GITS,)) (count_out, rc) = (None, None) stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print('unable to run %s (error)' % dispcmd) print('stdout was %s' % stdout) (count_out, rc) = (None, p.returncode) (count_out, rc) = (stdout, p.returncode) pieces['distance'] = int(count_out) date = run_command(GITS, ['show', '-s', '--format=%ci', 'HEAD'], cwd=root)[0].strip() pieces['date'] = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) return pieces
digital_rf
positive
def save(self, dset, frame, field_name): <DeepExtract> hdf5_file_path = Path(self.resolve_template(self.file_path_tmpl, dset, frame)) </DeepExtract> hdf5_file_path.parent.mkdir(exist_ok=True, parents=True) with h5py.File(hdf5_file_path, 'w') as hdf5_file_handle: <DeepExtract> var_name = self.resolve_var_name(dset, frame, field_name) value_to_write = frame[field_name] if self.write_as_type: value_to_write = value_to_write.astype(self.write_as_type) if var_name in hdf5_file_handle: if self.index_func: index = self.resolve_index(dset, frame, field_name) hdf5_file_handle[var_name][index][:] = value_to_write else: hdf5_file_handle[var_name][:] = value_to_write else: if self.index_func: raise NotImplementedError('Writing to new HDF5 dataset with index_func') hdf5_file_handle.create_dataset(var_name, data=value_to_write, compression=self.compression) </DeepExtract>
def save(self, dset, frame, field_name): hdf5_file_path = Path(self.resolve_template(self.file_path_tmpl, dset, frame)) hdf5_file_path.parent.mkdir(exist_ok=True, parents=True) with h5py.File(hdf5_file_path, 'w') as hdf5_file_handle: var_name = self.resolve_var_name(dset, frame, field_name) value_to_write = frame[field_name] if self.write_as_type: value_to_write = value_to_write.astype(self.write_as_type) if var_name in hdf5_file_handle: if self.index_func: index = self.resolve_index(dset, frame, field_name) hdf5_file_handle[var_name][index][:] = value_to_write else: hdf5_file_handle[var_name][:] = value_to_write else: if self.index_func: raise NotImplementedError('Writing to new HDF5 dataset with index_func') hdf5_file_handle.create_dataset(var_name, data=value_to_write, compression=self.compression) </DeepExtract>
detecting-the-unexpected
positive
def backup(self, name, mode): try: <DeepExtract> stub = medusa_pb2_grpc.MedusaStub(self.channel) if mode == 'differential': backup_mode = 0 elif mode == 'full': backup_mode = 1 else: raise RuntimeError('{} is not a recognized backup mode'.format(mode)) (backup_mode, stub) = (backup_mode, stub) </DeepExtract> request = medusa_pb2.BackupRequest(name=name, mode=backup_mode) return stub.Backup(request) except grpc.RpcError as e: logging.error('Failed sync backup for name: {} and mode: {} due to error: {}'.format(name, mode, e)) return None
def backup(self, name, mode): try: stub = medusa_pb2_grpc.MedusaStub(self.channel) if mode == 'differential': backup_mode = 0 elif mode == 'full': backup_mode = 1 else: raise RuntimeError('{} is not a recognized backup mode'.format(mode)) (backup_mode, stub) = (backup_mode, stub) request = medusa_pb2.BackupRequest(name=name, mode=backup_mode) return stub.Backup(request) except grpc.RpcError as e: logging.error('Failed sync backup for name: {} and mode: {} due to error: {}'.format(name, mode, e)) return None
cassandra-medusa
positive
def test_long_delay(self): for i in range(1, 6): <DeepExtract> self.time.return_value += 1 self.rr.add(0.5 + i) return self.rr.values </DeepExtract> nt.assert_equal(self._add_after(10, 3600.0 * 15), [2.5, 10.0])
def test_long_delay(self): for i in range(1, 6): self.time.return_value += 1 self.rr.add(0.5 + i) return self.rr.values nt.assert_equal(self._add_after(10, 3600.0 * 15), [2.5, 10.0])
appmetrics
positive
def get_memory_data_test(self): <DeepExtract> data_collection = system_model.data_collection data_collection.remove() network_collection = interfaces_model.get_data_collection(server_id=self.server['_id']) network_collection.remove() disk_collection = volumes_model.get_data_collection(server_id=self.server['_id']) disk_collection.remove() </DeepExtract> collection = system_model.data_collection for i in range(0, 30): memory_dict = {'time': i, 'server_id': self.server_id, 'memory': {'used_percent': 15, 'swap_used_mb': 0, 'total_mb': 166, 'free_mb': 4.44, 'used_mb': 66.55}} collection.insert(memory_dict) result = system_model.get_data_after(timestamp=10, enddate=20, server=self.server, check='memory') total = result[1]['data'] eq_(len(total), 11) for entry in total: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 166 assert type(entry['y']) is float used = result[0]['data'] eq_(len(used), 11) for entry in used: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 66.55 assert type(entry['y']) is float keys = [system_model.metric_tuple('total_mb', 'Total memory'), system_model.metric_tuple('used_mb', 'Used memory')] result = collection.find({'server_id': self.server_id, 'time': {'$gte': int(10), '$lte': int(20)}}).sort('time', system_model.asc) charts = system_model.generate_charts(check='memory', keys=keys, result=result) eq_(len(charts), 2) data = charts[0]['data'] eq_(len(data), 11) for entry in data: assert entry['x'] >= 10 assert entry['x'] <= 20 all_servers = server_model.get_all() result = system_model.get_global_data_after(timestamp=10, enddate=20, filtered_servers=all_servers, check='memory') used_percent = result[0]['data'] eq_(len(used_percent), 11) assert result[0]['name'] == self.server['name'] for entry in used_percent: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 15.0 assert type(entry['y']) is float <DeepExtract> data_collection = system_model.data_collection data_collection.remove() network_collection = interfaces_model.get_data_collection(server_id=self.server['_id']) network_collection.remove() disk_collection = volumes_model.get_data_collection(server_id=self.server['_id']) disk_collection.remove() </DeepExtract>
def get_memory_data_test(self): data_collection = system_model.data_collection data_collection.remove() network_collection = interfaces_model.get_data_collection(server_id=self.server['_id']) network_collection.remove() disk_collection = volumes_model.get_data_collection(server_id=self.server['_id']) disk_collection.remove() collection = system_model.data_collection for i in range(0, 30): memory_dict = {'time': i, 'server_id': self.server_id, 'memory': {'used_percent': 15, 'swap_used_mb': 0, 'total_mb': 166, 'free_mb': 4.44, 'used_mb': 66.55}} collection.insert(memory_dict) result = system_model.get_data_after(timestamp=10, enddate=20, server=self.server, check='memory') total = result[1]['data'] eq_(len(total), 11) for entry in total: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 166 assert type(entry['y']) is float used = result[0]['data'] eq_(len(used), 11) for entry in used: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 66.55 assert type(entry['y']) is float keys = [system_model.metric_tuple('total_mb', 'Total memory'), system_model.metric_tuple('used_mb', 'Used memory')] result = collection.find({'server_id': self.server_id, 'time': {'$gte': int(10), '$lte': int(20)}}).sort('time', system_model.asc) charts = system_model.generate_charts(check='memory', keys=keys, result=result) eq_(len(charts), 2) data = charts[0]['data'] eq_(len(data), 11) for entry in data: assert entry['x'] >= 10 assert entry['x'] <= 20 all_servers = server_model.get_all() result = system_model.get_global_data_after(timestamp=10, enddate=20, filtered_servers=all_servers, check='memory') used_percent = result[0]['data'] eq_(len(used_percent), 11) assert result[0]['name'] == self.server['name'] for entry in used_percent: assert entry['x'] >= 10 assert entry['x'] <= 20 assert entry['y'] == 15.0 assert type(entry['y']) is float data_collection = system_model.data_collection data_collection.remove() network_collection = interfaces_model.get_data_collection(server_id=self.server['_id']) network_collection.remove() disk_collection = volumes_model.get_data_collection(server_id=self.server['_id']) disk_collection.remove() </DeepExtract>
amon
positive
def reflection_matrix(point, normal): """Return matrix to mirror at plane defined by point and normal vector. >>> v0 = numpy.random.random(4) - 0.5 >>> v0[3] = 1. >>> v1 = numpy.random.random(3) - 0.5 >>> R = reflection_matrix(v0, v1) >>> numpy.allclose(2, numpy.trace(R)) True >>> numpy.allclose(v0, numpy.dot(R, v0)) True >>> v2 = v0.copy() >>> v2[:3] += v1 >>> v3 = v0.copy() >>> v2[:3] -= v1 >>> numpy.allclose(v2, numpy.dot(R, v3)) True """ <DeepExtract> if out is None: normal[:3] = numpy.array(normal[:3], dtype=numpy.float64, copy=True) if normal[:3].ndim == 1: normal[:3] /= math.sqrt(numpy.dot(normal[:3], normal[:3])) normal = normal[:3] else: if out is not normal[:3]: out[:] = numpy.array(normal[:3], copy=False) normal[:3] = out length = numpy.atleast_1d(numpy.sum(normal[:3] * normal[:3], axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) normal[:3] /= length if out is None: normal = normal[:3] </DeepExtract> M = numpy.identity(4) M[:3, :3] -= 2.0 * numpy.outer(normal, normal) M[:3, 3] = 2.0 * numpy.dot(point[:3], normal) * normal return M
def reflection_matrix(point, normal): """Return matrix to mirror at plane defined by point and normal vector. >>> v0 = numpy.random.random(4) - 0.5 >>> v0[3] = 1. >>> v1 = numpy.random.random(3) - 0.5 >>> R = reflection_matrix(v0, v1) >>> numpy.allclose(2, numpy.trace(R)) True >>> numpy.allclose(v0, numpy.dot(R, v0)) True >>> v2 = v0.copy() >>> v2[:3] += v1 >>> v3 = v0.copy() >>> v2[:3] -= v1 >>> numpy.allclose(v2, numpy.dot(R, v3)) True """ if out is None: normal[:3] = numpy.array(normal[:3], dtype=numpy.float64, copy=True) if normal[:3].ndim == 1: normal[:3] /= math.sqrt(numpy.dot(normal[:3], normal[:3])) normal = normal[:3] else: if out is not normal[:3]: out[:] = numpy.array(normal[:3], copy=False) normal[:3] = out length = numpy.atleast_1d(numpy.sum(normal[:3] * normal[:3], axis)) numpy.sqrt(length, length) if axis is not None: length = numpy.expand_dims(length, axis) normal[:3] /= length if out is None: normal = normal[:3] M = numpy.identity(4) M[:3, :3] -= 2.0 * numpy.outer(normal, normal) M[:3, 3] = 2.0 * numpy.dot(point[:3], normal) * normal return M
CAPTRA
positive
def run(dataset, config): log.info(f'\n**** AutoGluon TimeSeries [v{__version__}] ****\n') timestamp_column = dataset.timestamp_column id_column = dataset.id_column prediction_length = dataset.forecast_range_in_steps <DeepExtract> metrics_mapping = dict(mape='MAPE', smape='sMAPE', mase='MASE', mse='MSE', rmse='RMSE') eval_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None if eval_metric is None: log.warning('Performance metric %s not supported.', config.metric) eval_metric = eval_metric </DeepExtract> label = dataset.target.name time_limit = config.max_runtime_seconds training_params = {k: v for (k, v) in config.framework_params.items() if not k.startswith('_')} <DeepExtract> train_df = pd.read_csv(dataset.train.path, parse_dates=[timestamp_column]) train_data = TimeSeriesDataFrame.from_data_frame(train_df, id_column=id_column, timestamp_column=timestamp_column) test_df = pd.read_csv(dataset.test.path, parse_dates=[timestamp_column]) test_data = TimeSeriesDataFrame.from_data_frame(test_df, id_column=id_column, timestamp_column=timestamp_column) (train_data, test_data) = (train_data, test_data) </DeepExtract> test_data_past = test_data.copy().slice_by_timestep(slice(None, -prediction_length)) predictor_path = tempfile.mkdtemp() + os.sep with Timer() as training: predictor = TimeSeriesPredictor(target=label, path=predictor_path, prediction_length=prediction_length, eval_metric=eval_metric) predictor.fit(train_data=train_data, time_limit=time_limit, **training_params) with Timer() as predict: predictions = predictor.predict(test_data_past) log.info(predictions) predictions_only = predictions['mean'].values test_data_future = test_data.copy().slice_by_timestep(slice(-prediction_length, None)) truth_only = test_data_future[label].values log.info(predictions_only) log.info(truth_only) leaderboard = predictor.leaderboard(test_data, silent=True) with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000): log.info(leaderboard) num_models_trained = len(leaderboard) <DeepExtract> artifacts = config.framework_params.get('_save_artifacts', ['leaderboard']) try: if 'leaderboard' in artifacts: leaderboard_dir = output_subdir('leaderboard', config) save_pd.save(path=os.path.join(leaderboard_dir, 'leaderboard.csv'), df=leaderboard) if 'info' in artifacts: ag_info = predictor.info() info_dir = output_subdir('info', config) save_pkl.save(path=os.path.join(info_dir, 'info.pkl'), object=ag_info) if 'models' in artifacts: shutil.rmtree(os.path.join(predictor.path, 'utils'), ignore_errors=True) models_dir = output_subdir('models', config) zip_path(predictor.path, os.path.join(models_dir, 'models.zip')) except Exception: log.warning('Error when saving artifacts.', exc_info=True) </DeepExtract> shutil.rmtree(predictor.path, ignore_errors=True) quantiles = predictions.drop(columns=['mean']).reset_index(drop=True) period_length = 1 (item_ids, inverse_item_ids) = np.unique(test_data.reset_index()['item_id'].squeeze().to_numpy(), return_index=False, return_inverse=True) y_past = [test_data[label].squeeze().to_numpy()[inverse_item_ids == i][:-prediction_length] for i in range(len(item_ids))] y_past_period_error = [np.abs(y_past_item[period_length:] - y_past_item[:-period_length]).mean() for y_past_item in y_past] y_past_period_error_rep = np.repeat(y_past_period_error, prediction_length) optional_columns = quantiles optional_columns = optional_columns.assign(y_past_period_error=y_past_period_error_rep) return result(output_file=config.output_predictions_file, predictions=predictions_only, truth=truth_only, probabilities=None, probabilities_labels=None, target_is_encoded=False, models_count=num_models_trained, training_duration=training.duration, predict_duration=predict.duration, optional_columns=optional_columns)
def run(dataset, config): log.info(f'\n**** AutoGluon TimeSeries [v{__version__}] ****\n') timestamp_column = dataset.timestamp_column id_column = dataset.id_column prediction_length = dataset.forecast_range_in_steps metrics_mapping = dict(mape='MAPE', smape='sMAPE', mase='MASE', mse='MSE', rmse='RMSE') eval_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None if eval_metric is None: log.warning('Performance metric %s not supported.', config.metric) eval_metric = eval_metric label = dataset.target.name time_limit = config.max_runtime_seconds training_params = {k: v for (k, v) in config.framework_params.items() if not k.startswith('_')} train_df = pd.read_csv(dataset.train.path, parse_dates=[timestamp_column]) train_data = TimeSeriesDataFrame.from_data_frame(train_df, id_column=id_column, timestamp_column=timestamp_column) test_df = pd.read_csv(dataset.test.path, parse_dates=[timestamp_column]) test_data = TimeSeriesDataFrame.from_data_frame(test_df, id_column=id_column, timestamp_column=timestamp_column) (train_data, test_data) = (train_data, test_data) test_data_past = test_data.copy().slice_by_timestep(slice(None, -prediction_length)) predictor_path = tempfile.mkdtemp() + os.sep with Timer() as training: predictor = TimeSeriesPredictor(target=label, path=predictor_path, prediction_length=prediction_length, eval_metric=eval_metric) predictor.fit(train_data=train_data, time_limit=time_limit, **training_params) with Timer() as predict: predictions = predictor.predict(test_data_past) log.info(predictions) predictions_only = predictions['mean'].values test_data_future = test_data.copy().slice_by_timestep(slice(-prediction_length, None)) truth_only = test_data_future[label].values log.info(predictions_only) log.info(truth_only) leaderboard = predictor.leaderboard(test_data, silent=True) with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1000): log.info(leaderboard) num_models_trained = len(leaderboard) artifacts = config.framework_params.get('_save_artifacts', ['leaderboard']) try: if 'leaderboard' in artifacts: leaderboard_dir = output_subdir('leaderboard', config) save_pd.save(path=os.path.join(leaderboard_dir, 'leaderboard.csv'), df=leaderboard) if 'info' in artifacts: ag_info = predictor.info() info_dir = output_subdir('info', config) save_pkl.save(path=os.path.join(info_dir, 'info.pkl'), object=ag_info) if 'models' in artifacts: shutil.rmtree(os.path.join(predictor.path, 'utils'), ignore_errors=True) models_dir = output_subdir('models', config) zip_path(predictor.path, os.path.join(models_dir, 'models.zip')) except Exception: log.warning('Error when saving artifacts.', exc_info=True) shutil.rmtree(predictor.path, ignore_errors=True) quantiles = predictions.drop(columns=['mean']).reset_index(drop=True) period_length = 1 (item_ids, inverse_item_ids) = np.unique(test_data.reset_index()['item_id'].squeeze().to_numpy(), return_index=False, return_inverse=True) y_past = [test_data[label].squeeze().to_numpy()[inverse_item_ids == i][:-prediction_length] for i in range(len(item_ids))] y_past_period_error = [np.abs(y_past_item[period_length:] - y_past_item[:-period_length]).mean() for y_past_item in y_past] y_past_period_error_rep = np.repeat(y_past_period_error, prediction_length) optional_columns = quantiles optional_columns = optional_columns.assign(y_past_period_error=y_past_period_error_rep) return result(output_file=config.output_predictions_file, predictions=predictions_only, truth=truth_only, probabilities=None, probabilities_labels=None, target_is_encoded=False, models_count=num_models_trained, training_duration=training.duration, predict_duration=predict.duration, optional_columns=optional_columns)
automlbenchmark
positive
def preprocess_for_train(image, height, width, bbox, fast_mode=True, scope=None): """Distort one image for training a network. Distorting images provides a useful technique for augmenting the data set during training in order to make the network invariant to aspects of the image that do not effect the label. Additionally it would create image_summaries to display the different transformations applied to the image. Args: image: 3-D Tensor of image. If dtype is tf.float32 then the range should be [0, 1], otherwise it would converted to tf.float32 assuming that the range is [0, MAX], where MAX is largest positive representable number for int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). height: integer width: integer bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. fast_mode: Optional boolean, if True avoids slower transformations (i.e. bi-cubic resizing, random_hue or random_contrast). scope: Optional scope for name_scope. Returns: 3-D float Tensor of distorted image used for training with range [-1, 1]. """ with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): if bbox is None: bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) if image.dtype != tf.float32: image = tf.image.convert_image_dtype(image, dtype=tf.float32) image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox) tf.summary.image('image_with_bounding_boxes', image_with_box) <DeepExtract> with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (bbox_begin, bbox_size, distort_bbox) = sample_distorted_bounding_box cropped_image = tf.slice(image, bbox_begin, bbox_size) (distorted_image, distorted_bbox) = (cropped_image, distort_bbox) </DeepExtract> distorted_image.set_shape([None, None, 3]) image_with_distorted_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distorted_bbox) tf.summary.image('images_with_distorted_bounding_box', image_with_distorted_box) num_resize_cases = 1 if fast_mode else 4 <DeepExtract> sel = tf.random_uniform([], maxval=num_resize_cases, dtype=tf.int32) distorted_image = control_flow_ops.merge([lambda x, method: tf.image.resize_images(x, [height, width], method=method)(control_flow_ops.switch(distorted_image, tf.equal(sel, case))[1], case) for case in range(num_resize_cases)])[0] </DeepExtract> tf.summary.image('cropped_resized_image', tf.expand_dims(distorted_image, 0)) distorted_image = tf.image.random_flip_left_right(distorted_image) <DeepExtract> sel = tf.random_uniform([], maxval=4, dtype=tf.int32) distorted_image = control_flow_ops.merge([lambda x, ordering: distort_color(x, ordering, fast_mode)(control_flow_ops.switch(distorted_image, tf.equal(sel, case))[1], case) for case in range(4)])[0] </DeepExtract> tf.summary.image('final_distorted_image', tf.expand_dims(distorted_image, 0)) distorted_image = tf.subtract(distorted_image, 0.5) distorted_image = tf.multiply(distorted_image, 2.0) return distorted_image
def preprocess_for_train(image, height, width, bbox, fast_mode=True, scope=None): """Distort one image for training a network. Distorting images provides a useful technique for augmenting the data set during training in order to make the network invariant to aspects of the image that do not effect the label. Additionally it would create image_summaries to display the different transformations applied to the image. Args: image: 3-D Tensor of image. If dtype is tf.float32 then the range should be [0, 1], otherwise it would converted to tf.float32 assuming that the range is [0, MAX], where MAX is largest positive representable number for int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). height: integer width: integer bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. fast_mode: Optional boolean, if True avoids slower transformations (i.e. bi-cubic resizing, random_hue or random_contrast). scope: Optional scope for name_scope. Returns: 3-D float Tensor of distorted image used for training with range [-1, 1]. """ with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): if bbox is None: bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) if image.dtype != tf.float32: image = tf.image.convert_image_dtype(image, dtype=tf.float32) image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox) tf.summary.image('image_with_bounding_boxes', image_with_box) with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(tf.shape(image), bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True) (bbox_begin, bbox_size, distort_bbox) = sample_distorted_bounding_box cropped_image = tf.slice(image, bbox_begin, bbox_size) (distorted_image, distorted_bbox) = (cropped_image, distort_bbox) distorted_image.set_shape([None, None, 3]) image_with_distorted_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distorted_bbox) tf.summary.image('images_with_distorted_bounding_box', image_with_distorted_box) num_resize_cases = 1 if fast_mode else 4 sel = tf.random_uniform([], maxval=num_resize_cases, dtype=tf.int32) distorted_image = control_flow_ops.merge([lambda x, method: tf.image.resize_images(x, [height, width], method=method)(control_flow_ops.switch(distorted_image, tf.equal(sel, case))[1], case) for case in range(num_resize_cases)])[0] tf.summary.image('cropped_resized_image', tf.expand_dims(distorted_image, 0)) distorted_image = tf.image.random_flip_left_right(distorted_image) sel = tf.random_uniform([], maxval=4, dtype=tf.int32) distorted_image = control_flow_ops.merge([lambda x, ordering: distort_color(x, ordering, fast_mode)(control_flow_ops.switch(distorted_image, tf.equal(sel, case))[1], case) for case in range(4)])[0] tf.summary.image('final_distorted_image', tf.expand_dims(distorted_image, 0)) distorted_image = tf.subtract(distorted_image, 0.5) distorted_image = tf.multiply(distorted_image, 2.0) return distorted_image
caad_18
positive
def model_fn(self, features, labels, mode, params): """TPUEstimator compatible model function.""" del labels is_training = mode == tf.estimator.ModeKeys.TRAIN data_shape = features.get_shape().as_list()[1:] <DeepExtract> (z_mean, z_logvar) = architectures.make_gaussian_encoder(features, is_training=is_training) </DeepExtract> z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar) <DeepExtract> z_shuffle = [] for i in range(z_sampled.get_shape()[1]): z_shuffle.append(tf.random_shuffle(z_sampled[:, i])) shuffled = tf.stack(z_shuffle, 1, name='latent_shuffled') z_shuffle = shuffled </DeepExtract> with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): (logits_z, probs_z) = architectures.make_discriminator(z_sampled, is_training=is_training) (_, probs_z_shuffle) = architectures.make_discriminator(z_shuffle, is_training=is_training) <DeepExtract> reconstructions = architectures.make_decoder(z_sampled, data_shape, is_training=is_training) </DeepExtract> per_sample_loss = losses.make_reconstruction_loss(features, reconstructions) reconstruction_loss = tf.reduce_mean(per_sample_loss) <DeepExtract> kl_loss = tf.reduce_mean(0.5 * tf.reduce_sum(tf.square(z_mean) + tf.exp(z_logvar) - z_logvar - 1, [1]), name='kl_loss') </DeepExtract> standard_vae_loss = tf.add(reconstruction_loss, kl_loss, name='VAE_loss') tc_loss_per_sample = logits_z[:, 0] - logits_z[:, 1] tc_loss = tf.reduce_mean(tc_loss_per_sample, axis=0) regularizer = kl_loss + self.gamma * tc_loss factor_vae_loss = tf.add(standard_vae_loss, self.gamma * tc_loss, name='factor_VAE_loss') discr_loss = tf.add(0.5 * tf.reduce_mean(tf.log(probs_z[:, 0])), 0.5 * tf.reduce_mean(tf.log(probs_z_shuffle[:, 1])), name='discriminator_loss') if mode == tf.estimator.ModeKeys.TRAIN: optimizer_vae = optimizers.make_vae_optimizer() optimizer_discriminator = optimizers.make_discriminator_optimizer() all_variables = tf.trainable_variables() encoder_vars = [var for var in all_variables if 'encoder' in var.name] decoder_vars = [var for var in all_variables if 'decoder' in var.name] discriminator_vars = [var for var in all_variables if 'discriminator' in var.name] update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op_vae = optimizer_vae.minimize(loss=factor_vae_loss, global_step=tf.train.get_global_step(), var_list=encoder_vars + decoder_vars) train_op_discr = optimizer_discriminator.minimize(loss=-discr_loss, global_step=tf.train.get_global_step(), var_list=discriminator_vars) train_op = tf.group(train_op_vae, train_op_discr, update_ops) tf.summary.scalar('reconstruction_loss', reconstruction_loss) logging_hook = tf.train.LoggingTensorHook({'loss': factor_vae_loss, 'reconstruction_loss': reconstruction_loss}, every_n_iter=50) return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=factor_vae_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL: return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=factor_vae_loss, eval_metrics=(make_metric_fn('reconstruction_loss', 'regularizer', 'kl_loss'), [reconstruction_loss, regularizer, kl_loss])) else: raise NotImplementedError('Eval mode not supported.')
def model_fn(self, features, labels, mode, params): """TPUEstimator compatible model function.""" del labels is_training = mode == tf.estimator.ModeKeys.TRAIN data_shape = features.get_shape().as_list()[1:] (z_mean, z_logvar) = architectures.make_gaussian_encoder(features, is_training=is_training) z_sampled = self.sample_from_latent_distribution(z_mean, z_logvar) z_shuffle = [] for i in range(z_sampled.get_shape()[1]): z_shuffle.append(tf.random_shuffle(z_sampled[:, i])) shuffled = tf.stack(z_shuffle, 1, name='latent_shuffled') z_shuffle = shuffled with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): (logits_z, probs_z) = architectures.make_discriminator(z_sampled, is_training=is_training) (_, probs_z_shuffle) = architectures.make_discriminator(z_shuffle, is_training=is_training) reconstructions = architectures.make_decoder(z_sampled, data_shape, is_training=is_training) per_sample_loss = losses.make_reconstruction_loss(features, reconstructions) reconstruction_loss = tf.reduce_mean(per_sample_loss) kl_loss = tf.reduce_mean(0.5 * tf.reduce_sum(tf.square(z_mean) + tf.exp(z_logvar) - z_logvar - 1, [1]), name='kl_loss') standard_vae_loss = tf.add(reconstruction_loss, kl_loss, name='VAE_loss') tc_loss_per_sample = logits_z[:, 0] - logits_z[:, 1] tc_loss = tf.reduce_mean(tc_loss_per_sample, axis=0) regularizer = kl_loss + self.gamma * tc_loss factor_vae_loss = tf.add(standard_vae_loss, self.gamma * tc_loss, name='factor_VAE_loss') discr_loss = tf.add(0.5 * tf.reduce_mean(tf.log(probs_z[:, 0])), 0.5 * tf.reduce_mean(tf.log(probs_z_shuffle[:, 1])), name='discriminator_loss') if mode == tf.estimator.ModeKeys.TRAIN: optimizer_vae = optimizers.make_vae_optimizer() optimizer_discriminator = optimizers.make_discriminator_optimizer() all_variables = tf.trainable_variables() encoder_vars = [var for var in all_variables if 'encoder' in var.name] decoder_vars = [var for var in all_variables if 'decoder' in var.name] discriminator_vars = [var for var in all_variables if 'discriminator' in var.name] update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op_vae = optimizer_vae.minimize(loss=factor_vae_loss, global_step=tf.train.get_global_step(), var_list=encoder_vars + decoder_vars) train_op_discr = optimizer_discriminator.minimize(loss=-discr_loss, global_step=tf.train.get_global_step(), var_list=discriminator_vars) train_op = tf.group(train_op_vae, train_op_discr, update_ops) tf.summary.scalar('reconstruction_loss', reconstruction_loss) logging_hook = tf.train.LoggingTensorHook({'loss': factor_vae_loss, 'reconstruction_loss': reconstruction_loss}, every_n_iter=50) return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=factor_vae_loss, train_op=train_op, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL: return contrib_tpu.TPUEstimatorSpec(mode=mode, loss=factor_vae_loss, eval_metrics=(make_metric_fn('reconstruction_loss', 'regularizer', 'kl_loss'), [reconstruction_loss, regularizer, kl_loss])) else: raise NotImplementedError('Eval mode not supported.')
disentanglement_lib
positive
def check_order_values(app_configs, **kwargs): errors = [] preferred_orders = settings.COMMENT_ORDER_BY <DeepExtract> allowed_orders = ['reaction__likes', 'reaction__dislikes', 'posted'] allowed_orders.extend(list(map(lambda a: '-' + a, allowed_orders))) allowed_orders.append('?') allowed_orders = allowed_orders </DeepExtract> for preferred_order in preferred_orders: if preferred_order not in allowed_orders: errors.append(Error(f"'{preferred_order}' is not a valid value for COMMENT_ORDER_BY.", hint=f'Please choose one among {allowed_orders}.', id='comment.E002')) return errors
def check_order_values(app_configs, **kwargs): errors = [] preferred_orders = settings.COMMENT_ORDER_BY allowed_orders = ['reaction__likes', 'reaction__dislikes', 'posted'] allowed_orders.extend(list(map(lambda a: '-' + a, allowed_orders))) allowed_orders.append('?') allowed_orders = allowed_orders for preferred_order in preferred_orders: if preferred_order not in allowed_orders: errors.append(Error(f"'{preferred_order}' is not a valid value for COMMENT_ORDER_BY.", hint=f'Please choose one among {allowed_orders}.', id='comment.E002')) return errors
Comment
positive
@typing_action def commands(update, context): chat = update.effective_chat user = update.effective_user conn = connected(context.bot, update, chat, user.id, need_admin=True) if conn: chat = dispatcher.bot.getChat(conn) chat_id = conn else: if update.effective_message.chat.type == 'private': send_message(update.effective_message, 'This command is meant to use in group not in PM') return '' chat = update.effective_chat chat_id = update.effective_chat.id <DeepExtract> disabled = sql.get_all_disabled(chat.id) if not disabled: text = 'No commands are disabled!' result = ''.join((' - `{}`\n'.format(escape_markdown(cmd)) for cmd in disabled)) text = 'The following commands are currently restricted:\n{}'.format(result) </DeepExtract> send_message(update.effective_message, text, parse_mode=ParseMode.MARKDOWN)
@typing_action def commands(update, context): chat = update.effective_chat user = update.effective_user conn = connected(context.bot, update, chat, user.id, need_admin=True) if conn: chat = dispatcher.bot.getChat(conn) chat_id = conn else: if update.effective_message.chat.type == 'private': send_message(update.effective_message, 'This command is meant to use in group not in PM') return '' chat = update.effective_chat chat_id = update.effective_chat.id disabled = sql.get_all_disabled(chat.id) if not disabled: text = 'No commands are disabled!' result = ''.join((' - `{}`\n'.format(escape_markdown(cmd)) for cmd in disabled)) text = 'The following commands are currently restricted:\n{}'.format(result) send_message(update.effective_message, text, parse_mode=ParseMode.MARKDOWN)
EnterpriseALRobot
positive
def getNumPages(self): """ Calculates the number of pages in this PDF file. :return: number of pages :rtype: int :raises PdfReadError: if file is encrypted and restrictions prevent this action. """ if self.isEncrypted: try: self._override_encryption = True <DeepExtract> self._override_encryption = True try: return self._decrypt('') finally: self._override_encryption = False </DeepExtract> return self.trailer['/Root']['/Pages']['/Count'] except: raise utils.PdfReadError('File has not been decrypted') finally: self._override_encryption = False else: if self.flattenedPages == None: <DeepExtract> inheritablePageAttributes = (NameObject('/Resources'), NameObject('/MediaBox'), NameObject('/CropBox'), NameObject('/Rotate')) if inherit == None: inherit = dict() if pages == None: self.flattenedPages = [] catalog = self.trailer['/Root'].getObject() pages = catalog['/Pages'].getObject() t = '/Pages' if '/Type' in pages: t = pages['/Type'] if t == '/Pages': for attr in inheritablePageAttributes: if attr in pages: inherit[attr] = pages[attr] for page in pages['/Kids']: addt = {} if isinstance(page, IndirectObject): addt['indirectRef'] = page self._flatten(page.getObject(), inherit, **addt) elif t == '/Page': for (attr, value) in list(inherit.items()): if attr not in pages: pages[attr] = value pageObj = PageObject(self, indirectRef) pageObj.update(pages) self.flattenedPages.append(pageObj) </DeepExtract> return len(self.flattenedPages)
def getNumPages(self): """ Calculates the number of pages in this PDF file. :return: number of pages :rtype: int :raises PdfReadError: if file is encrypted and restrictions prevent this action. """ if self.isEncrypted: try: self._override_encryption = True self._override_encryption = True try: return self._decrypt('') finally: self._override_encryption = False return self.trailer['/Root']['/Pages']['/Count'] except: raise utils.PdfReadError('File has not been decrypted') finally: self._override_encryption = False else: if self.flattenedPages == None: inheritablePageAttributes = (NameObject('/Resources'), NameObject('/MediaBox'), NameObject('/CropBox'), NameObject('/Rotate')) if inherit == None: inherit = dict() if pages == None: self.flattenedPages = [] catalog = self.trailer['/Root'].getObject() pages = catalog['/Pages'].getObject() t = '/Pages' if '/Type' in pages: t = pages['/Type'] if t == '/Pages': for attr in inheritablePageAttributes: if attr in pages: inherit[attr] = pages[attr] for page in pages['/Kids']: addt = {} if isinstance(page, IndirectObject): addt['indirectRef'] = page self._flatten(page.getObject(), inherit, **addt) elif t == '/Page': for (attr, value) in list(inherit.items()): if attr not in pages: pages[attr] = value pageObj = PageObject(self, indirectRef) pageObj.update(pages) self.flattenedPages.append(pageObj) return len(self.flattenedPages)
endesive
positive