before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def test_create_ca_conf_no_ap_no_iam_with_client_source(tmpdir): current_time = mount_efs.get_utc_now() <DeepExtract> tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) mount_efs.create_required_directory({}, tls_dict['mount_dir']) tls_dict['certificate_path'] = os.path.join(tls_dict['mount_dir'], 'config.conf') tls_dict['private_key'] = os.path.join(tls_dict['mount_dir'], 'privateKey.pem') tls_dict['public_key'] = os.path.join(tls_dict['mount_dir'], 'publicKey.pem') if False: with open(tls_dict['public_key'], 'w') as f: f.write(PUBLIC_KEY_BODY) credentials = CREDENTIALS if False else None ap_id = AP_ID if False else None True = CLIENT_INFO if True else None full_config_body = mount_efs.create_ca_conf(tls_dict['certificate_path'], COMMON_NAME, tls_dict['mount_dir'], tls_dict['private_key'], current_time, REGION, FS_ID, credentials, ap_id, True) assert os.path.exists(tls_dict['certificate_path']) (tls_dict, full_config_body) = (tls_dict, full_config_body) </DeepExtract> ca_extension_body = '[ v3_ca ]\nsubjectKeyIdentifier = hash\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:%s\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info' % FS_ID efs_client_auth_body = '' efs_client_info_body = mount_efs.efs_client_info_builder(CLIENT_INFO) matching_config_body = mount_efs.CA_CONFIG_BODY % (tls_dict['mount_dir'], tls_dict['private_key'], COMMON_NAME, ca_extension_body, efs_client_auth_body, efs_client_info_body) assert full_config_body == matching_config_body
def test_create_ca_conf_no_ap_no_iam_with_client_source(tmpdir): current_time = mount_efs.get_utc_now() tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) mount_efs.create_required_directory({}, tls_dict['mount_dir']) tls_dict['certificate_path'] = os.path.join(tls_dict['mount_dir'], 'config.conf') tls_dict['private_key'] = os.path.join(tls_dict['mount_dir'], 'privateKey.pem') tls_dict['public_key'] = os.path.join(tls_dict['mount_dir'], 'publicKey.pem') if False: with open(tls_dict['public_key'], 'w') as f: f.write(PUBLIC_KEY_BODY) credentials = CREDENTIALS if False else None ap_id = AP_ID if False else None True = CLIENT_INFO if True else None full_config_body = mount_efs.create_ca_conf(tls_dict['certificate_path'], COMMON_NAME, tls_dict['mount_dir'], tls_dict['private_key'], current_time, REGION, FS_ID, credentials, ap_id, True) assert os.path.exists(tls_dict['certificate_path']) (tls_dict, full_config_body) = (tls_dict, full_config_body) ca_extension_body = '[ v3_ca ]\nsubjectKeyIdentifier = hash\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:%s\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info' % FS_ID efs_client_auth_body = '' efs_client_info_body = mount_efs.efs_client_info_builder(CLIENT_INFO) matching_config_body = mount_efs.CA_CONFIG_BODY % (tls_dict['mount_dir'], tls_dict['private_key'], COMMON_NAME, ca_extension_body, efs_client_auth_body, efs_client_info_body) assert full_config_body == matching_config_body
efs-utils
positive
def __init__(self, in_channels): super().__init__() self.need_features = True self.in_channels = in_channels self.in_channels_rrdb = 320 self.kernel_hidden = 1 self.affine_eps = 0.0001 self.n_hidden_layers = 1 self.hidden_channels = 64 self.channels_for_nn = self.in_channels // 2 self.channels_for_co = self.in_channels - self.channels_for_nn if self.channels_for_nn is None: self.channels_for_nn = self.in_channels // 2 <DeepExtract> layers = [Conv2d(self.channels_for_nn, self.hidden_channels), nn.ReLU(inplace=False)] for _ in range(self.n_hidden_layers): layers.append(Conv2d(self.hidden_channels, self.hidden_channels, kernel_size=[self.kernel_hidden, self.kernel_hidden])) layers.append(nn.ReLU(inplace=False)) layers.append(Conv2dZeros(self.hidden_channels, self.channels_for_co * 2)) self.fAffine = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [Conv2d(self.in_channels_rrdb, self.hidden_channels), nn.ReLU(inplace=False)] for _ in range(self.n_hidden_layers): layers.append(Conv2d(self.hidden_channels, self.hidden_channels, kernel_size=[self.kernel_hidden, self.kernel_hidden])) layers.append(nn.ReLU(inplace=False)) layers.append(Conv2dZeros(self.hidden_channels, self.in_channels * 2)) self.fFeatures = nn.Sequential(*layers) </DeepExtract>
def __init__(self, in_channels): super().__init__() self.need_features = True self.in_channels = in_channels self.in_channels_rrdb = 320 self.kernel_hidden = 1 self.affine_eps = 0.0001 self.n_hidden_layers = 1 self.hidden_channels = 64 self.channels_for_nn = self.in_channels // 2 self.channels_for_co = self.in_channels - self.channels_for_nn if self.channels_for_nn is None: self.channels_for_nn = self.in_channels // 2 layers = [Conv2d(self.channels_for_nn, self.hidden_channels), nn.ReLU(inplace=False)] for _ in range(self.n_hidden_layers): layers.append(Conv2d(self.hidden_channels, self.hidden_channels, kernel_size=[self.kernel_hidden, self.kernel_hidden])) layers.append(nn.ReLU(inplace=False)) layers.append(Conv2dZeros(self.hidden_channels, self.channels_for_co * 2)) self.fAffine = nn.Sequential(*layers) layers = [Conv2d(self.in_channels_rrdb, self.hidden_channels), nn.ReLU(inplace=False)] for _ in range(self.n_hidden_layers): layers.append(Conv2d(self.hidden_channels, self.hidden_channels, kernel_size=[self.kernel_hidden, self.kernel_hidden])) layers.append(nn.ReLU(inplace=False)) layers.append(Conv2dZeros(self.hidden_channels, self.in_channels * 2)) self.fFeatures = nn.Sequential(*layers) </DeepExtract>
DeFlow
positive
def __init__(self, block_module, stages, num_groups=1, width_per_group=64, stride_in_1x1=True, stride_init=None, res2_out_channels=256, dilation=1, dcn_config={}): super(ResNetHead, self).__init__() stage2_relative_factor = 2 ** (stages[0].index - 1) stage2_bottleneck_channels = num_groups * width_per_group out_channels = res2_out_channels * stage2_relative_factor in_channels = out_channels // 2 bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor block_module = _TRANSFORMATION_MODULES[block_module] self.stages = [] stride = stride_init for stage in stages: name = 'layer' + str(stage.index) if not stride: stride = int(stage.index > 1) + 1 <DeepExtract> blocks = [] stride = stride if dw_config is not None: dw_dilation = dw_config.get('dilation', ((1, 4, 8, 12), (1, 4, 8, 12), (1, 4, 8, 12), (1, 2, 4, 8))) dw_config['dilation'] = dw_dilation[num_groups - 1] dw_block = None if dw_config is not None: dw_domain = dw_config.get('domain', 'block') assert dw_domain in ['stage', 'block'] if dw_domain == 'stage': dw_group = dw_config.get('group', 1) dw_kernel = dw_config.get('kernel', 3) dw_dilation = dw_config.get('dilation', (1, 4, 8, 12)) dw_shuffle = dw_config.get('shuffle', False) dw_deform = dw_config.get('deform', 'none') dw_block = DynamicWeightsCat11(channels=out_channels, group=dw_group, kernel=dw_kernel, dilation=dw_dilation, shuffle=dw_shuffle, deform=dw_deform) dw_config = None for idx in range(stage.block_count): blocks.append((str(idx), block_module(in_channels, bottleneck_channels, out_channels, stride_in_1x1, stride_in_1x1, stride, dilation=dilation, dcn_config=dcn_config, dw_config=dw_config))) stride = 1 in_channels = out_channels if dw_block is not None: blocks.append(('DW', dw_block)) module = nn.Sequential(OrderedDict(blocks)) </DeepExtract> stride = None self.add_module(name, module) self.stages.append(name) self.out_channels = out_channels
def __init__(self, block_module, stages, num_groups=1, width_per_group=64, stride_in_1x1=True, stride_init=None, res2_out_channels=256, dilation=1, dcn_config={}): super(ResNetHead, self).__init__() stage2_relative_factor = 2 ** (stages[0].index - 1) stage2_bottleneck_channels = num_groups * width_per_group out_channels = res2_out_channels * stage2_relative_factor in_channels = out_channels // 2 bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor block_module = _TRANSFORMATION_MODULES[block_module] self.stages = [] stride = stride_init for stage in stages: name = 'layer' + str(stage.index) if not stride: stride = int(stage.index > 1) + 1 blocks = [] stride = stride if dw_config is not None: dw_dilation = dw_config.get('dilation', ((1, 4, 8, 12), (1, 4, 8, 12), (1, 4, 8, 12), (1, 2, 4, 8))) dw_config['dilation'] = dw_dilation[num_groups - 1] dw_block = None if dw_config is not None: dw_domain = dw_config.get('domain', 'block') assert dw_domain in ['stage', 'block'] if dw_domain == 'stage': dw_group = dw_config.get('group', 1) dw_kernel = dw_config.get('kernel', 3) dw_dilation = dw_config.get('dilation', (1, 4, 8, 12)) dw_shuffle = dw_config.get('shuffle', False) dw_deform = dw_config.get('deform', 'none') dw_block = DynamicWeightsCat11(channels=out_channels, group=dw_group, kernel=dw_kernel, dilation=dw_dilation, shuffle=dw_shuffle, deform=dw_deform) dw_config = None for idx in range(stage.block_count): blocks.append((str(idx), block_module(in_channels, bottleneck_channels, out_channels, stride_in_1x1, stride_in_1x1, stride, dilation=dilation, dcn_config=dcn_config, dw_config=dw_config))) stride = 1 in_channels = out_channels if dw_block is not None: blocks.append(('DW', dw_block)) module = nn.Sequential(OrderedDict(blocks)) stride = None self.add_module(name, module) self.stages.append(name) self.out_channels = out_channels
dgmn
positive
def _parse(type, token, skip_component=None): def set_and_return(token, type, component, dateobj, skip_date_order=False): if not skip_date_order: self.auto_order.append(component) setattr(self, '_token_%s' % component, (token, type)) return [(component, getattr(dateobj, component))] def parse_number(token, skip_component=None): type = 0 for (component, directives) in self.ordered_num_directives.items(): if skip_component == component: continue for directive in directives: try: <DeepExtract> do = strptime(token, directive) </DeepExtract> prev_value = getattr(self, component, None) if not prev_value: return set_and_return(token, type, component, do) else: try: (prev_token, prev_type) = getattr(self, '_token_%s' % component) if prev_type == type: <DeepExtract> do = strptime(prev_token, directive) </DeepExtract> except ValueError: self.unset_tokens.append((prev_token, prev_type, component)) return set_and_return(token, type, component, do) except ValueError: pass else: raise ValueError('Unable to parse: %s' % token) def parse_alpha(token, skip_component=None): type = 1 for (component, directives) in self.alpha_directives.items(): if skip_component == component: continue for directive in directives: try: <DeepExtract> do = strptime(token, directive) </DeepExtract> prev_value = getattr(self, component, None) if not prev_value: return set_and_return(token, type, component, do, skip_date_order=True) elif component == 'month': index = self.auto_order.index('month') self.auto_order[index] = 'day' setattr(self, '_token_day', self._token_month) setattr(self, '_token_month', (token, type)) return [(component, getattr(do, component)), ('day', prev_value)] except: pass else: raise ValueError('Unable to parse: %s' % token) handlers = {0: parse_number, 1: parse_alpha} return handlers[type](token, skip_component)
def _parse(type, token, skip_component=None): def set_and_return(token, type, component, dateobj, skip_date_order=False): if not skip_date_order: self.auto_order.append(component) setattr(self, '_token_%s' % component, (token, type)) return [(component, getattr(dateobj, component))] def parse_number(token, skip_component=None): type = 0 for (component, directives) in self.ordered_num_directives.items(): if skip_component == component: continue for directive in directives: try: do = strptime(token, directive) prev_value = getattr(self, component, None) if not prev_value: return set_and_return(token, type, component, do) else: try: (prev_token, prev_type) = getattr(self, '_token_%s' % component) if prev_type == type: do = strptime(prev_token, directive) except ValueError: self.unset_tokens.append((prev_token, prev_type, component)) return set_and_return(token, type, component, do) except ValueError: pass else: raise ValueError('Unable to parse: %s' % token) def parse_alpha(token, skip_component=None): type = 1 for (component, directives) in self.alpha_directives.items(): if skip_component == component: continue for directive in directives: try: do = strptime(token, directive) prev_value = getattr(self, component, None) if not prev_value: return set_and_return(token, type, component, do, skip_date_order=True) elif component == 'month': index = self.auto_order.index('month') self.auto_order[index] = 'day' setattr(self, '_token_day', self._token_month) setattr(self, '_token_month', (token, type)) return [(component, getattr(do, component)), ('day', prev_value)] except: pass else: raise ValueError('Unable to parse: %s' % token) handlers = {0: parse_number, 1: parse_alpha} return handlers[type](token, skip_component)
dateparser
positive
def BTToDLL(root): if root is None: return root <DeepExtract> if root is None: root = root if root.left: left = BTToDLLUtil(root.left) while left.right: left = left.right left.right = root root.left = left if root.right: right = BTToDLLUtil(root.right) while right.left: right = right.left right.left = root root.right = right root = root </DeepExtract> while root.left: root = root.left return root
def BTToDLL(root): if root is None: return root if root is None: root = root if root.left: left = BTToDLLUtil(root.left) while left.right: left = left.right left.right = root root.left = left if root.right: right = BTToDLLUtil(root.right) while right.left: right = right.left right.left = root root.right = right root = root while root.left: root = root.left return root
DSA
positive
def download_attachment(self, uuid, attachment_name, download_path): """save attachment from memory buffer into the save_path""" <DeepExtract> external_path = self._make_external_filepath(PurePosixPath(self.database, '/'.join(subfold(uuid.hex, self.spec['subfolding'])), uuid.hex).with_suffix('.' + attachment_name)) </DeepExtract> <DeepExtract> if self.spec['protocol'] == 's3': self.s3.fget(external_path, download_path) elif self.spec['protocol'] == 'file': safe_copy(external_path, download_path) else: assert False </DeepExtract>
def download_attachment(self, uuid, attachment_name, download_path): """save attachment from memory buffer into the save_path""" external_path = self._make_external_filepath(PurePosixPath(self.database, '/'.join(subfold(uuid.hex, self.spec['subfolding'])), uuid.hex).with_suffix('.' + attachment_name)) if self.spec['protocol'] == 's3': self.s3.fget(external_path, download_path) elif self.spec['protocol'] == 'file': safe_copy(external_path, download_path) else: assert False </DeepExtract>
datajoint-python
positive
def KDistance(root, k): nodes = [] <DeepExtract> if root is None: return if k == 0: nodes.append(root.data) else: KDistanceUtil(root.left, k - 1, nodes) KDistanceUtil(root.right, k - 1, nodes) </DeepExtract> return nodes
def KDistance(root, k): nodes = [] if root is None: return if k == 0: nodes.append(root.data) else: KDistanceUtil(root.left, k - 1, nodes) KDistanceUtil(root.right, k - 1, nodes) return nodes
Competitive-Programming
positive
@self.app.route('/doLogin', method='POST') @self.app.route('/<project>/doLogin', method='POST') def do_login(project=None): try: username = html.escape(self._parse_parameter(request.forms, 'username')) <DeepExtract> if not 'password' in request.forms: raise ValueMissingException('password') password = request.forms.get('password') </DeepExtract> sessionToken = self.middleware.decryptSessionToken(username, request) if sessionToken is not None: sessionToken = html.escape(sessionToken) (sessionToken, _, expires) = self.middleware.login(username, password, sessionToken) response.set_cookie('username', username, path='/') self.middleware.encryptSessionToken(username, response) return {'expires': expires.strftime('%H:%M:%S')} except Exception as e: abort(403, str(e))
@self.app.route('/doLogin', method='POST') @self.app.route('/<project>/doLogin', method='POST') def do_login(project=None): try: username = html.escape(self._parse_parameter(request.forms, 'username')) if not 'password' in request.forms: raise ValueMissingException('password') password = request.forms.get('password') sessionToken = self.middleware.decryptSessionToken(username, request) if sessionToken is not None: sessionToken = html.escape(sessionToken) (sessionToken, _, expires) = self.middleware.login(username, password, sessionToken) response.set_cookie('username', username, path='/') self.middleware.encryptSessionToken(username, response) return {'expires': expires.strftime('%H:%M:%S')} except Exception as e: abort(403, str(e))
aerial_wildlife_detection
positive
def get_conn_parent_categoryCtx(parse_dict, DocID, sent_index, conn_indices): <DeepExtract> conn_name = ' '.join([parse_dict[DocID]['sentences'][sent_index]['words'][word_token][0] for word_token in conn_indices]) conn_name = conn_name </DeepExtract> parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip() syntax_tree = Syntax_tree(parse_tree) if syntax_tree.tree == None: parent_categoryCtx = 'NONE_TREE' else: parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices) <DeepExtract> if parent_category_node == None: parent_categoryCtx = 'None' Ctx = [] Ctx.append(parent_category_node.name) if parent_category_node.up == None: Ctx.append('NULL') else: Ctx.append(parent_category_node.up.name) for child in parent_category_node.get_children(): Ctx.append(child.name) parent_categoryCtx = '-'.join(Ctx) </DeepExtract> conn_parent_categoryCtx = '%s|%s' % (conn_name, parent_categoryCtx) return conn_parent_categoryCtx
def get_conn_parent_categoryCtx(parse_dict, DocID, sent_index, conn_indices): conn_name = ' '.join([parse_dict[DocID]['sentences'][sent_index]['words'][word_token][0] for word_token in conn_indices]) conn_name = conn_name parse_tree = parse_dict[DocID]['sentences'][sent_index]['parsetree'].strip() syntax_tree = Syntax_tree(parse_tree) if syntax_tree.tree == None: parent_categoryCtx = 'NONE_TREE' else: parent_category_node = syntax_tree.get_parent_category_node_by_token_indices(conn_indices) if parent_category_node == None: parent_categoryCtx = 'None' Ctx = [] Ctx.append(parent_category_node.name) if parent_category_node.up == None: Ctx.append('NULL') else: Ctx.append(parent_category_node.up.name) for child in parent_category_node.get_children(): Ctx.append(child.name) parent_categoryCtx = '-'.join(Ctx) conn_parent_categoryCtx = '%s|%s' % (conn_name, parent_categoryCtx) return conn_parent_categoryCtx
conll2015_discourse
positive
def center_crop(img, crop_height, crop_width): (height, width) = img.shape[:2] if height < crop_height or width < crop_width: raise ValueError('Requested crop size ({crop_height}, {crop_width}) is larger than the image size ({height}, {width})'.format(crop_height=crop_height, crop_width=crop_width, height=height, width=width)) <DeepExtract> y1 = (height - crop_height) // 2 y2 = y1 + crop_height x1 = (width - crop_width) // 2 x2 = x1 + crop_width (x1, y1, x2, y2) = (x1, y1, x2, y2) </DeepExtract> img = img[y1:y2, x1:x2] return img
def center_crop(img, crop_height, crop_width): (height, width) = img.shape[:2] if height < crop_height or width < crop_width: raise ValueError('Requested crop size ({crop_height}, {crop_width}) is larger than the image size ({height}, {width})'.format(crop_height=crop_height, crop_width=crop_width, height=height, width=width)) y1 = (height - crop_height) // 2 y2 = y1 + crop_height x1 = (width - crop_width) // 2 x2 = x1 + crop_width (x1, y1, x2, y2) = (x1, y1, x2, y2) img = img[y1:y2, x1:x2] return img
DenseMatching
positive
def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match('^(.*\\s+)?(.*?)(\\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) <DeepExtract> cfg = dict(use_plots=env.app.config.numpydoc_use_plots, show_class_members=env.app.config.numpydoc_show_class_members, class_members_toctree=env.app.config.numpydoc_class_members_toctree) if objtype == 'module': title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), re.I | re.S) lines[:] = title_re.sub(sixu(''), sixu('\n').join(lines)).split(sixu('\n')) else: doc = get_doc_object(None, objtype, sixu('\n').join(lines), config=cfg) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(sixu('\n')) if env.app.config.numpydoc_edit_link and hasattr(None, '__name__') and None.__name__: if hasattr(None, '__module__'): v = dict(full_name=sixu('%s.%s') % (None.__module__, None.__name__)) else: v = dict(full_name=None.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (env.app.config.numpydoc_edit_link % v).split('\n')] references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) references.sort(key=lambda x: -len(x)) if references: for (i, line) in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu('R%d') % (reference_offset[0] + int(r)) else: new_r = sixu('%s%d') % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) </DeepExtract> self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match('^(.*\\s+)?(.*?)(\\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) cfg = dict(use_plots=env.app.config.numpydoc_use_plots, show_class_members=env.app.config.numpydoc_show_class_members, class_members_toctree=env.app.config.numpydoc_class_members_toctree) if objtype == 'module': title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), re.I | re.S) lines[:] = title_re.sub(sixu(''), sixu('\n').join(lines)).split(sixu('\n')) else: doc = get_doc_object(None, objtype, sixu('\n').join(lines), config=cfg) if sys.version_info[0] >= 3: doc = str(doc) else: doc = unicode(doc) lines[:] = doc.split(sixu('\n')) if env.app.config.numpydoc_edit_link and hasattr(None, '__name__') and None.__name__: if hasattr(None, '__module__'): v = dict(full_name=sixu('%s.%s') % (None.__module__, None.__name__)) else: v = dict(full_name=None.__name__) lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] lines += [sixu(' %s') % x for x in (env.app.config.numpydoc_edit_link % v).split('\n')] references = [] for line in lines: line = line.strip() m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) references.sort(key=lambda x: -len(x)) if references: for (i, line) in enumerate(lines): for r in references: if re.match(sixu('^\\d+$'), r): new_r = sixu('R%d') % (reference_offset[0] + int(r)) else: new_r = sixu('%s%d') % (r, reference_offset[0]) lines[i] = lines[i].replace(sixu('[%s]_') % r, sixu('[%s]_') % new_r) lines[i] = lines[i].replace(sixu('.. [%s]') % r, sixu('.. [%s]') % new_r) reference_offset[0] += len(references) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
biom-format
positive
def get(self, request, *args, **kwargs): <DeepExtract> kwargs.update({'orderable': False}) return super().get_table(**kwargs) </DeepExtract> from django.http import HttpResponse return HttpResponse()
def get(self, request, *args, **kwargs): kwargs.update({'orderable': False}) return super().get_table(**kwargs) from django.http import HttpResponse return HttpResponse()
django-tables2
positive
def exists(self): """ Find out if the query contains any hits, with as little effort as possible """ if self.is_cached: return len(self._cache) > 0 <DeepExtract> if not isinstance(self.q, (type(None), Q)): raise ValueError("self.q value '%s' must be None or a Q instance" % self.q) if not isinstance(self.only_fields, (type(None), tuple)): raise ValueError("self.only_fields value '%s' must be None or a tuple" % self.only_fields) if not isinstance(self.order_fields, (type(None), tuple)): raise ValueError("self.order_fields value '%s' must be None or a tuple" % self.order_fields) if self.return_format not in self.RETURN_TYPES: raise ValueError("self.return_value '%s' must be one of %s" % (self.return_format, self.RETURN_TYPES)) new_qs = self.__class__(self.folder_collection, request_type=self.request_type) new_qs.q = None if self.q is None else deepcopy(self.q) new_qs.only_fields = self.only_fields new_qs.order_fields = None if self.order_fields is None else deepcopy(self.order_fields) new_qs.return_format = self.return_format new_qs.calendar_view = self.calendar_view new_qs.page_size = self.page_size new_qs.max_items = self.max_items new_qs._depth = self._depth new_qs = new_qs </DeepExtract> new_qs.max_items = 1 return new_qs.count(page_size=1) > 0
def exists(self): """ Find out if the query contains any hits, with as little effort as possible """ if self.is_cached: return len(self._cache) > 0 if not isinstance(self.q, (type(None), Q)): raise ValueError("self.q value '%s' must be None or a Q instance" % self.q) if not isinstance(self.only_fields, (type(None), tuple)): raise ValueError("self.only_fields value '%s' must be None or a tuple" % self.only_fields) if not isinstance(self.order_fields, (type(None), tuple)): raise ValueError("self.order_fields value '%s' must be None or a tuple" % self.order_fields) if self.return_format not in self.RETURN_TYPES: raise ValueError("self.return_value '%s' must be one of %s" % (self.return_format, self.RETURN_TYPES)) new_qs = self.__class__(self.folder_collection, request_type=self.request_type) new_qs.q = None if self.q is None else deepcopy(self.q) new_qs.only_fields = self.only_fields new_qs.order_fields = None if self.order_fields is None else deepcopy(self.order_fields) new_qs.return_format = self.return_format new_qs.calendar_view = self.calendar_view new_qs.page_size = self.page_size new_qs.max_items = self.max_items new_qs._depth = self._depth new_qs = new_qs new_qs.max_items = 1 return new_qs.count(page_size=1) > 0
exchangelib
positive
def do_sample(self, x): self.reset() g_for_surgery = nx.DiGraph(self.g) <DeepExtract> for xi in x.keys(): g_for_surgery.remove_edges_from([(parent, child) for (parent, child) in g_for_surgery.in_edges(xi)]) g_for_surgery.nodes()[xi]['parent_names'] = [] g_modified = g_for_surgery </DeepExtract> <DeepExtract> if not self.keep_original_treatment: for (k, v) in x.items(): self._df[k] = v self._df = self._df </DeepExtract> <DeepExtract> if nx.is_directed_acyclic_graph(g_modified): with pm.Model() as model: g_modified = self.apply_data_types(g_modified, self._variable_types) g_modified = self.apply_parents(g_modified) g_modified = self.apply_parameters(g_modified, self._df, initialization_trace=self.fit_trace) g_modified = self.build_bayesian_network(g_modified, self._df) trace = pm.sample_prior_predictive(1) else: raise Exception('Graph is not a DAG!') (g_modified, trace) = (g_modified, trace) </DeepExtract> for col in self._df: if col in trace and col not in self._treatment_names: self._df[col] = trace[col] return self._df.copy()
def do_sample(self, x): self.reset() g_for_surgery = nx.DiGraph(self.g) for xi in x.keys(): g_for_surgery.remove_edges_from([(parent, child) for (parent, child) in g_for_surgery.in_edges(xi)]) g_for_surgery.nodes()[xi]['parent_names'] = [] g_modified = g_for_surgery if not self.keep_original_treatment: for (k, v) in x.items(): self._df[k] = v self._df = self._df if nx.is_directed_acyclic_graph(g_modified): with pm.Model() as model: g_modified = self.apply_data_types(g_modified, self._variable_types) g_modified = self.apply_parents(g_modified) g_modified = self.apply_parameters(g_modified, self._df, initialization_trace=self.fit_trace) g_modified = self.build_bayesian_network(g_modified, self._df) trace = pm.sample_prior_predictive(1) else: raise Exception('Graph is not a DAG!') (g_modified, trace) = (g_modified, trace) for col in self._df: if col in trace and col not in self._treatment_names: self._df[col] = trace[col] return self._df.copy()
dowhy
positive
def run(self): if self.use_estimate_all: <DeepExtract> self.model.eval() test_loss = 0 test_loss_improvement = 0 n_test = len(self.loader.dataset) s = 'Title, Loss:' for target in self.sources: s += ' ({})'.format(target) s += ', loss improvement:' for target in self.sources: s += ' ({})'.format(target) print(s, flush=True) with torch.no_grad(): for (idx, (mixture, sources, samples, name)) in enumerate(self.loader): '\n mixture: (batch_size, 1, n_mics, n_bins, n_frames)\n sources: (batch_size, n_sources, n_mics, n_bins, n_frames)\n samples <int>: Length in time domain\n name <str>: Artist and title of song\n ' if self.use_cuda: mixture = mixture.cuda() sources = sources.cuda() (batch_size, n_sources, n_mics, n_bins, n_frames) = sources.size() mixture_amplitude = torch.abs(mixture) sources_amplitude = torch.abs(sources) estimated_sources_amplitude = {target: [] for target in self.sources} for _mixture_amplitude in mixture_amplitude: for target in self.sources: _estimated_sources_amplitude = self.model(_mixture_amplitude, target=target) estimated_sources_amplitude[target].append(_estimated_sources_amplitude) estimated_sources_amplitude = [torch.cat(estimated_sources_amplitude[target], dim=0).unsqueeze(dim=0) for target in self.sources] estimated_sources_amplitude = torch.cat(estimated_sources_amplitude, dim=0) estimated_sources_amplitude = estimated_sources_amplitude.permute(0, 2, 3, 1, 4) estimated_sources_amplitude = estimated_sources_amplitude.reshape(n_sources, n_mics, n_bins, batch_size * n_frames) mixture = mixture.permute(1, 2, 3, 0, 4).reshape(1, n_mics, n_bins, batch_size * n_frames) mixture_amplitude = mixture_amplitude.permute(1, 2, 3, 0, 4).reshape(1, n_mics, n_bins, batch_size * n_frames) sources_amplitude = sources_amplitude.permute(1, 2, 3, 0, 4).reshape(n_sources, n_mics, n_bins, batch_size * n_frames) loss_mixture = self.criterion(mixture_amplitude, sources_amplitude, batch_mean=False) loss = self.criterion(estimated_sources_amplitude, sources_amplitude, batch_mean=False) loss_improvement = loss_mixture - loss mixture = mixture.cpu() estimated_sources_amplitude = estimated_sources_amplitude.cpu() estimated_sources = self.apply_multichannel_wiener_filter(mixture, estimated_sources_amplitude=estimated_sources_amplitude) estimated_sources_channels = estimated_sources.size()[:-2] estimated_sources = estimated_sources.view(-1, *estimated_sources.size()[-2:]) estimated_sources = istft(estimated_sources, self.n_fft, hop_length=self.hop_length, window=self.window, normalized=self.normalize, return_complex=False) estimated_sources = estimated_sources.view(*estimated_sources_channels, -1) track_dir = os.path.join(self.estimates_dir, name) os.makedirs(track_dir, exist_ok=True) for (source_idx, target) in enumerate(self.sources): estimated_path = os.path.join(track_dir, '{}.wav'.format(target)) estimated_source = estimated_sources[source_idx, :, :samples] signal = estimated_source.unsqueeze(dim=0) if estimated_source.dim() == 1 else estimated_source torchaudio.save(estimated_path, signal, sample_rate=self.sample_rate, bits_per_sample=BITS_PER_SAMPLE_MUSDB18) s = '{},'.format(name) for (idx, target) in enumerate(self.sources): s += ' {:.3f}'.format(loss[idx].item()) s += ', loss improvement:' for (idx, target) in enumerate(self.sources): s += ' {:.3f}'.format(loss_improvement[idx].item()) print(s, flush=True) test_loss += loss test_loss_improvement += loss_improvement test_loss /= n_test test_loss_improvement /= n_test s = 'Loss:' for (idx, target) in enumerate(self.sources): s += ' ({}) {:.3f}'.format(target, test_loss[idx].item()) s += ', loss improvement:' for (idx, target) in enumerate(self.sources): s += ' ({}) {:.3f}'.format(target, test_loss_improvement[idx].item()) print(s, flush=True) </DeepExtract> if self.use_evaluate_all: <DeepExtract> mus = musdb.DB(root=self.musdb18_root, subsets='test', is_wav=True) results = museval.EvalStore(frames_agg='median', tracks_agg='median') for track in mus.tracks: name = track.name estimates = {} estimated_accompaniment = 0 for target in self.sources: estimated_path = os.path.join(self.estimates_dir, name, '{}.wav'.format(target)) (estimated, _) = torchaudio.load(estimated_path) estimated = estimated.numpy().transpose(1, 0) estimates[target] = estimated if target != 'vocals': estimated_accompaniment += estimated estimates['accompaniment'] = estimated_accompaniment scores = museval.eval_mus_track(track, estimates, output_dir=self.json_dir) results.add_track(scores) print(name) print(scores, flush=True) print(results) </DeepExtract>
def run(self): if self.use_estimate_all: self.model.eval() test_loss = 0 test_loss_improvement = 0 n_test = len(self.loader.dataset) s = 'Title, Loss:' for target in self.sources: s += ' ({})'.format(target) s += ', loss improvement:' for target in self.sources: s += ' ({})'.format(target) print(s, flush=True) with torch.no_grad(): for (idx, (mixture, sources, samples, name)) in enumerate(self.loader): '\n mixture: (batch_size, 1, n_mics, n_bins, n_frames)\n sources: (batch_size, n_sources, n_mics, n_bins, n_frames)\n samples <int>: Length in time domain\n name <str>: Artist and title of song\n ' if self.use_cuda: mixture = mixture.cuda() sources = sources.cuda() (batch_size, n_sources, n_mics, n_bins, n_frames) = sources.size() mixture_amplitude = torch.abs(mixture) sources_amplitude = torch.abs(sources) estimated_sources_amplitude = {target: [] for target in self.sources} for _mixture_amplitude in mixture_amplitude: for target in self.sources: _estimated_sources_amplitude = self.model(_mixture_amplitude, target=target) estimated_sources_amplitude[target].append(_estimated_sources_amplitude) estimated_sources_amplitude = [torch.cat(estimated_sources_amplitude[target], dim=0).unsqueeze(dim=0) for target in self.sources] estimated_sources_amplitude = torch.cat(estimated_sources_amplitude, dim=0) estimated_sources_amplitude = estimated_sources_amplitude.permute(0, 2, 3, 1, 4) estimated_sources_amplitude = estimated_sources_amplitude.reshape(n_sources, n_mics, n_bins, batch_size * n_frames) mixture = mixture.permute(1, 2, 3, 0, 4).reshape(1, n_mics, n_bins, batch_size * n_frames) mixture_amplitude = mixture_amplitude.permute(1, 2, 3, 0, 4).reshape(1, n_mics, n_bins, batch_size * n_frames) sources_amplitude = sources_amplitude.permute(1, 2, 3, 0, 4).reshape(n_sources, n_mics, n_bins, batch_size * n_frames) loss_mixture = self.criterion(mixture_amplitude, sources_amplitude, batch_mean=False) loss = self.criterion(estimated_sources_amplitude, sources_amplitude, batch_mean=False) loss_improvement = loss_mixture - loss mixture = mixture.cpu() estimated_sources_amplitude = estimated_sources_amplitude.cpu() estimated_sources = self.apply_multichannel_wiener_filter(mixture, estimated_sources_amplitude=estimated_sources_amplitude) estimated_sources_channels = estimated_sources.size()[:-2] estimated_sources = estimated_sources.view(-1, *estimated_sources.size()[-2:]) estimated_sources = istft(estimated_sources, self.n_fft, hop_length=self.hop_length, window=self.window, normalized=self.normalize, return_complex=False) estimated_sources = estimated_sources.view(*estimated_sources_channels, -1) track_dir = os.path.join(self.estimates_dir, name) os.makedirs(track_dir, exist_ok=True) for (source_idx, target) in enumerate(self.sources): estimated_path = os.path.join(track_dir, '{}.wav'.format(target)) estimated_source = estimated_sources[source_idx, :, :samples] signal = estimated_source.unsqueeze(dim=0) if estimated_source.dim() == 1 else estimated_source torchaudio.save(estimated_path, signal, sample_rate=self.sample_rate, bits_per_sample=BITS_PER_SAMPLE_MUSDB18) s = '{},'.format(name) for (idx, target) in enumerate(self.sources): s += ' {:.3f}'.format(loss[idx].item()) s += ', loss improvement:' for (idx, target) in enumerate(self.sources): s += ' {:.3f}'.format(loss_improvement[idx].item()) print(s, flush=True) test_loss += loss test_loss_improvement += loss_improvement test_loss /= n_test test_loss_improvement /= n_test s = 'Loss:' for (idx, target) in enumerate(self.sources): s += ' ({}) {:.3f}'.format(target, test_loss[idx].item()) s += ', loss improvement:' for (idx, target) in enumerate(self.sources): s += ' ({}) {:.3f}'.format(target, test_loss_improvement[idx].item()) print(s, flush=True) if self.use_evaluate_all: mus = musdb.DB(root=self.musdb18_root, subsets='test', is_wav=True) results = museval.EvalStore(frames_agg='median', tracks_agg='median') for track in mus.tracks: name = track.name estimates = {} estimated_accompaniment = 0 for target in self.sources: estimated_path = os.path.join(self.estimates_dir, name, '{}.wav'.format(target)) (estimated, _) = torchaudio.load(estimated_path) estimated = estimated.numpy().transpose(1, 0) estimates[target] = estimated if target != 'vocals': estimated_accompaniment += estimated estimates['accompaniment'] = estimated_accompaniment scores = museval.eval_mus_track(track, estimates, output_dir=self.json_dir) results.add_track(scores) print(name) print(scores, flush=True) print(results) </DeepExtract>
DNN-based_source_separation
positive
def double_press(dot, col, row): event_double_pressed = Event() dot.when_double_pressed = lambda : event_double_pressed.set() <DeepExtract> sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) </DeepExtract> assert not event_double_pressed.is_set() <DeepExtract> sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) </DeepExtract> assert event_double_pressed.is_set() <DeepExtract> delayed_thread = Thread(target=_delayed_function, args=(lambda : simulate_double_press(col, row), 0.2)) delayed_thread.start() </DeepExtract> assert dot.wait_for_double_press(1) <DeepExtract> delayed_thread = Thread(target=_delayed_function, args=(lambda : simulate_failed_double_press(col, row), 0.2)) delayed_thread.start() </DeepExtract> assert not dot.wait_for_double_press(1)
def double_press(dot, col, row): event_double_pressed = Event() dot.when_double_pressed = lambda : event_double_pressed.set() sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) assert not event_double_pressed.is_set() sleep(mbd.double_press_time + 0.1) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) mbd.mock_blue_dot_pressed(col, row, 0, 0) mbd.mock_blue_dot_released(col, row, 0, 0) assert event_double_pressed.is_set() delayed_thread = Thread(target=_delayed_function, args=(lambda : simulate_double_press(col, row), 0.2)) delayed_thread.start() assert dot.wait_for_double_press(1) delayed_thread = Thread(target=_delayed_function, args=(lambda : simulate_failed_double_press(col, row), 0.2)) delayed_thread.start() assert not dot.wait_for_double_press(1)
BlueDot
positive
def eval_adv(self, sem_in, syn_ref): sem_ret = self.encode_to_hidden(sem_in) <DeepExtract> hidden = sem_ret['hidden'] batch_size = hidden.size(1) hidden = hidden.permute(1, 0, 2).contiguous() if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor) else: hidden = hidden.squeeze() mean = self.hidden2mean(hidden) logv = self.hidden2logv(hidden) if self.training: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean sem_ret['latent'] = z sem_ret['mean'] = mean sem_ret['logv'] = logv sem_ret = sem_ret </DeepExtract> syn_ret = self.encode_to_hidden(syn_ref, need_sort=True) <DeepExtract> hidden = syn_ret['hidden'] batch_size = hidden.size(1) hidden = hidden.permute(1, 0, 2).contiguous() if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor) else: hidden = hidden.squeeze() mean = self.hidden2mean(hidden) logv = self.hidden2logv(hidden) if self.training: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean syn_ret['latent'] = z syn_ret['mean'] = mean syn_ret['logv'] = logv syn_ret = syn_ret </DeepExtract> <DeepExtract> z = sem_ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) sem_ret['decode_init'] = hidden sem_ret = sem_ret </DeepExtract> <DeepExtract> z = syn_ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) syn_ret['decode_init'] = hidden syn_ret = syn_ret </DeepExtract> ret = dict() ret['latent'] = (sem_ret['latent'] + syn_ret['latent']) * 0.5 <DeepExtract> z = ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) ret['decode_init'] = hidden ret = ret </DeepExtract> ret['res'] = self.decode_to_sentence(ret=ret) return ret
def eval_adv(self, sem_in, syn_ref): sem_ret = self.encode_to_hidden(sem_in) hidden = sem_ret['hidden'] batch_size = hidden.size(1) hidden = hidden.permute(1, 0, 2).contiguous() if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor) else: hidden = hidden.squeeze() mean = self.hidden2mean(hidden) logv = self.hidden2logv(hidden) if self.training: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean sem_ret['latent'] = z sem_ret['mean'] = mean sem_ret['logv'] = logv sem_ret = sem_ret syn_ret = self.encode_to_hidden(syn_ref, need_sort=True) hidden = syn_ret['hidden'] batch_size = hidden.size(1) hidden = hidden.permute(1, 0, 2).contiguous() if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_size * self.hidden_factor) else: hidden = hidden.squeeze() mean = self.hidden2mean(hidden) logv = self.hidden2logv(hidden) if self.training: std = torch.exp(0.5 * logv) z = to_var(torch.randn([batch_size, self.latent_size])) z = z * std + mean else: z = mean syn_ret['latent'] = z syn_ret['mean'] = mean syn_ret['logv'] = logv syn_ret = syn_ret z = sem_ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) sem_ret['decode_init'] = hidden sem_ret = sem_ret z = syn_ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) syn_ret['decode_init'] = hidden syn_ret = syn_ret ret = dict() ret['latent'] = (sem_ret['latent'] + syn_ret['latent']) * 0.5 z = ret['latent'] batch_size = z.size(0) hidden = self.latent2hidden(z) if self.hidden_factor > 1: hidden = hidden.view(batch_size, self.hidden_factor, self.hidden_size) hidden = hidden.permute(1, 0, 2) else: hidden = hidden.unsqueeze(0) ret['decode_init'] = hidden ret = ret ret['res'] = self.decode_to_sentence(ret=ret) return ret
DSS-VAE
positive
def track_branches(branches=None, directory=None): """ Tracks all specified branches. :param branches: a list of branches that are to be tracked if not already tracked. If this is set to None then all remote branches will be tracked. :param directory: directory in which to run all commands :raises: subprocess.CalledProcessError if git command fails """ if type(branches) == str: branches = [branches] debug('track_branches(' + str(branches) + ', ' + str(directory) + ')') if branches == []: return <DeepExtract> cmd = 'git branch --no-color' output = check_output(cmd, shell=True, cwd=directory) output = output.splitlines() for token in output: if token.strip().startswith('*'): token = token[2:] if token == '(no branch)': current_branch = None current_branch = token current_branch = None </DeepExtract> try: <DeepExtract> cmd = 'git branch --no-color' if not True: cmd += ' -a' out = check_output(cmd, shell=True, cwd=directory) branches = [] for line in out.splitlines(): if line.count('HEAD -> ') > 0: continue if line.count('(no branch)') > 0: continue line = line.strip('*').strip() branches.append(line) local_branches = branches </DeepExtract> <DeepExtract> cmd = 'git branch --no-color' if not False: cmd += ' -a' out = check_output(cmd, shell=True, cwd=directory) branches = [] for line in out.splitlines(): if line.count('HEAD -> ') > 0: continue if line.count('(no branch)') > 0: continue line = line.strip('*').strip() branches.append(line) all_branches = branches </DeepExtract> untracked_branches = [] for branch in all_branches: if branch.startswith('remotes/'): if branch.count('/') >= 2: branch = '/'.join(branch.split('/')[2:]) if branch not in local_branches: untracked_branches.append(branch) if branches is not None: branches_to_track = [] for untracked in untracked_branches: if untracked in branches: branches_to_track.append(untracked) else: branches_to_track = untracked_branches debug('Tracking branches: ' + str(branches_to_track)) for branch in branches_to_track: <DeepExtract> def checkout_summarize(fail_msg, branch, directory): branch = '(no branch)' if branch is None else branch directory = os.getcwd() if directory is None else directory error("Failed to checkout to '{0}'".format(str(branch)) + ' because the working directory {0}'.format(str(fail_msg))) debug(" Working directory: '{0}'".format(str(directory))) debug(" Working branch: '{0}'".format(str(branch))) debug(" Has local changes: '{0}'".format(str(changes))) debug(" Has untrakced files: '{0}'".format(str(untracked))) pdb_hook() if not bloom.util._quiet and show_git_status: info('\n++ git status:\n', use_prefix=False) os.system('git status') return False debug('Checking out to ' + str(branch)) if branch == get_current_branch(directory): debug('Requested checkout reference is the same as the current branch') return True fail_msg = '' git_root = get_root(directory) if git_root is not None: changes = has_changes(directory) untracked = has_untracked_files(directory) branch = get_current_branch(directory) or 'could not determine branch' else: fail_msg = 'is not a git repository' if fail_msg == '' and changes: fail_msg = 'has local changes' if fail_msg == '' and untracked: fail_msg = 'has untracked files' try: if not changes and (not untracked): execute_command('git checkout "{0}"'.format(str(branch)), cwd=directory) except CalledProcessError as err: fail_msg = 'CalledProcessError: ' + str(err) if raise_exc: checkout_summarize(fail_msg, branch, directory) raise if fail_msg != '': return checkout_summarize(fail_msg, branch, directory) else: return True </DeepExtract> finally: if current_branch: <DeepExtract> def checkout_summarize(fail_msg, branch, directory): branch = '(no branch)' if branch is None else branch directory = os.getcwd() if directory is None else directory error("Failed to checkout to '{0}'".format(str(current_branch)) + ' because the working directory {0}'.format(str(fail_msg))) debug(" Working directory: '{0}'".format(str(directory))) debug(" Working branch: '{0}'".format(str(branch))) debug(" Has local changes: '{0}'".format(str(changes))) debug(" Has untrakced files: '{0}'".format(str(untracked))) pdb_hook() if not bloom.util._quiet and show_git_status: info('\n++ git status:\n', use_prefix=False) os.system('git status') return False debug('Checking out to ' + str(current_branch)) if current_branch == get_current_branch(directory): debug('Requested checkout reference is the same as the current branch') return True fail_msg = '' git_root = get_root(directory) if git_root is not None: changes = has_changes(directory) untracked = has_untracked_files(directory) branch = get_current_branch(directory) or 'could not determine branch' else: fail_msg = 'is not a git repository' if fail_msg == '' and changes: fail_msg = 'has local changes' if fail_msg == '' and untracked: fail_msg = 'has untracked files' try: if not changes and (not untracked): execute_command('git checkout "{0}"'.format(str(current_branch)), cwd=directory) except CalledProcessError as err: fail_msg = 'CalledProcessError: ' + str(err) if raise_exc: checkout_summarize(fail_msg, branch, directory) raise if fail_msg != '': return checkout_summarize(fail_msg, branch, directory) else: return True </DeepExtract>
def track_branches(branches=None, directory=None): """ Tracks all specified branches. :param branches: a list of branches that are to be tracked if not already tracked. If this is set to None then all remote branches will be tracked. :param directory: directory in which to run all commands :raises: subprocess.CalledProcessError if git command fails """ if type(branches) == str: branches = [branches] debug('track_branches(' + str(branches) + ', ' + str(directory) + ')') if branches == []: return cmd = 'git branch --no-color' output = check_output(cmd, shell=True, cwd=directory) output = output.splitlines() for token in output: if token.strip().startswith('*'): token = token[2:] if token == '(no branch)': current_branch = None current_branch = token current_branch = None try: cmd = 'git branch --no-color' if not True: cmd += ' -a' out = check_output(cmd, shell=True, cwd=directory) branches = [] for line in out.splitlines(): if line.count('HEAD -> ') > 0: continue if line.count('(no branch)') > 0: continue line = line.strip('*').strip() branches.append(line) local_branches = branches cmd = 'git branch --no-color' if not False: cmd += ' -a' out = check_output(cmd, shell=True, cwd=directory) branches = [] for line in out.splitlines(): if line.count('HEAD -> ') > 0: continue if line.count('(no branch)') > 0: continue line = line.strip('*').strip() branches.append(line) all_branches = branches untracked_branches = [] for branch in all_branches: if branch.startswith('remotes/'): if branch.count('/') >= 2: branch = '/'.join(branch.split('/')[2:]) if branch not in local_branches: untracked_branches.append(branch) if branches is not None: branches_to_track = [] for untracked in untracked_branches: if untracked in branches: branches_to_track.append(untracked) else: branches_to_track = untracked_branches debug('Tracking branches: ' + str(branches_to_track)) for branch in branches_to_track: def checkout_summarize(fail_msg, branch, directory): branch = '(no branch)' if branch is None else branch directory = os.getcwd() if directory is None else directory error("Failed to checkout to '{0}'".format(str(branch)) + ' because the working directory {0}'.format(str(fail_msg))) debug(" Working directory: '{0}'".format(str(directory))) debug(" Working branch: '{0}'".format(str(branch))) debug(" Has local changes: '{0}'".format(str(changes))) debug(" Has untrakced files: '{0}'".format(str(untracked))) pdb_hook() if not bloom.util._quiet and show_git_status: info('\n++ git status:\n', use_prefix=False) os.system('git status') return False debug('Checking out to ' + str(branch)) if branch == get_current_branch(directory): debug('Requested checkout reference is the same as the current branch') return True fail_msg = '' git_root = get_root(directory) if git_root is not None: changes = has_changes(directory) untracked = has_untracked_files(directory) branch = get_current_branch(directory) or 'could not determine branch' else: fail_msg = 'is not a git repository' if fail_msg == '' and changes: fail_msg = 'has local changes' if fail_msg == '' and untracked: fail_msg = 'has untracked files' try: if not changes and (not untracked): execute_command('git checkout "{0}"'.format(str(branch)), cwd=directory) except CalledProcessError as err: fail_msg = 'CalledProcessError: ' + str(err) if raise_exc: checkout_summarize(fail_msg, branch, directory) raise if fail_msg != '': return checkout_summarize(fail_msg, branch, directory) else: return True finally: if current_branch: def checkout_summarize(fail_msg, branch, directory): branch = '(no branch)' if branch is None else branch directory = os.getcwd() if directory is None else directory error("Failed to checkout to '{0}'".format(str(current_branch)) + ' because the working directory {0}'.format(str(fail_msg))) debug(" Working directory: '{0}'".format(str(directory))) debug(" Working branch: '{0}'".format(str(branch))) debug(" Has local changes: '{0}'".format(str(changes))) debug(" Has untrakced files: '{0}'".format(str(untracked))) pdb_hook() if not bloom.util._quiet and show_git_status: info('\n++ git status:\n', use_prefix=False) os.system('git status') return False debug('Checking out to ' + str(current_branch)) if current_branch == get_current_branch(directory): debug('Requested checkout reference is the same as the current branch') return True fail_msg = '' git_root = get_root(directory) if git_root is not None: changes = has_changes(directory) untracked = has_untracked_files(directory) branch = get_current_branch(directory) or 'could not determine branch' else: fail_msg = 'is not a git repository' if fail_msg == '' and changes: fail_msg = 'has local changes' if fail_msg == '' and untracked: fail_msg = 'has untracked files' try: if not changes and (not untracked): execute_command('git checkout "{0}"'.format(str(current_branch)), cwd=directory) except CalledProcessError as err: fail_msg = 'CalledProcessError: ' + str(err) if raise_exc: checkout_summarize(fail_msg, branch, directory) raise if fail_msg != '': return checkout_summarize(fail_msg, branch, directory) else: return True </DeepExtract>
bloom
positive
def update(self): <DeepExtract> lockflags = fcntl.LOCK_SH if self.base_set: lockflags |= fcntl.LOCK_NB try: f = open(self.path, 'r') try: fcntl.flock(f.fileno(), lockflags) ufp = cPickle.load(f) except ImportError: try: f.seek(0) data = f.read() data = data.replace('feedparser\n', 'feedparser_builtin\n', 1) ufp = cPickle.loads(data) except: ufp = 0 except: ufp = 0 finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) f.close() except: ufp = 0 ufp = ufp </DeepExtract> if not ufp: return 0 if not self.base_set: self.base_set = 1 if 'feed' in ufp and 'title' in ufp['feed']: replace = lambda x: x or ufp['feed']['title'] else: replace = lambda x: x or self.URL self.tags = [replace(x) for x in self.tags] <DeepExtract> newlist = [] for entry in ufp['entries']: if entry in self and entry not in newlist: centry = self[self.index(entry)] if not centry.updated and centry['canto_state'] != entry['canto_state']: centry['canto_state'] = entry['canto_state'] newlist.append(centry) continue nentry = {} nentry['id'] = entry['id'] nentry['feed'] = self.URL nentry['canto_state'] = entry['canto_state'] if 'title' not in entry: nentry['title'] = '' else: nentry['title'] = entry['title'] if 'title_detail' in entry: nentry['title_detail'] = entry['title_detail'] for pc in self.cfg.precache: if pc in entry: nentry[pc] = entry[pc] else: nentry[pc] = None if 'link' in entry: nentry['link'] = entry['link'] elif 'href' in entry: nentry['link'] = entry['href'] updated = STORY_SAVED if self.tags[0] != nentry['canto_state'][0]: nentry['canto_state'][0] = self.tags[0] updated = STORY_UPDATED for tag in self.tags[1:]: if tag not in nentry['canto_state']: nentry['canto_state'].append(tag) updated = STORY_UPDATED if nentry not in newlist: newlist.append(story.Story(nentry, self.path, updated)) del self[:] for item in newlist: if not self.filter or self.filter(self, item): list.append(self, item) </DeepExtract> <DeepExtract> if ufp == None: ufp = self.get_ufp() if not ufp: return changed = self.changed() if not changed: return for entry in changed: if entry not in ufp['entries']: continue old = ufp['entries'][ufp['entries'].index(entry)] if old['canto_state'] != entry['canto_state']: if entry.updated: if self.cfg.state_change_hook: add = [t for t in entry['canto_state'] if t not in old['canto_state']] rem = [t for t in old['canto_state'] if t not in entry['canto_state']] self.cfg.state_change_hook(self, entry, add, rem) old['canto_state'] = entry['canto_state'] else: entry['canto_state'] = old['canto_state'] f = open(self.path, 'r+') try: fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) f.seek(0, 0) f.truncate() cPickle.dump(ufp, f) f.flush() for x in changed: x.updated = STORY_SAVED except: return 0 finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) f.close() del ufp return 1 </DeepExtract> return 1
def update(self): lockflags = fcntl.LOCK_SH if self.base_set: lockflags |= fcntl.LOCK_NB try: f = open(self.path, 'r') try: fcntl.flock(f.fileno(), lockflags) ufp = cPickle.load(f) except ImportError: try: f.seek(0) data = f.read() data = data.replace('feedparser\n', 'feedparser_builtin\n', 1) ufp = cPickle.loads(data) except: ufp = 0 except: ufp = 0 finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) f.close() except: ufp = 0 ufp = ufp if not ufp: return 0 if not self.base_set: self.base_set = 1 if 'feed' in ufp and 'title' in ufp['feed']: replace = lambda x: x or ufp['feed']['title'] else: replace = lambda x: x or self.URL self.tags = [replace(x) for x in self.tags] newlist = [] for entry in ufp['entries']: if entry in self and entry not in newlist: centry = self[self.index(entry)] if not centry.updated and centry['canto_state'] != entry['canto_state']: centry['canto_state'] = entry['canto_state'] newlist.append(centry) continue nentry = {} nentry['id'] = entry['id'] nentry['feed'] = self.URL nentry['canto_state'] = entry['canto_state'] if 'title' not in entry: nentry['title'] = '' else: nentry['title'] = entry['title'] if 'title_detail' in entry: nentry['title_detail'] = entry['title_detail'] for pc in self.cfg.precache: if pc in entry: nentry[pc] = entry[pc] else: nentry[pc] = None if 'link' in entry: nentry['link'] = entry['link'] elif 'href' in entry: nentry['link'] = entry['href'] updated = STORY_SAVED if self.tags[0] != nentry['canto_state'][0]: nentry['canto_state'][0] = self.tags[0] updated = STORY_UPDATED for tag in self.tags[1:]: if tag not in nentry['canto_state']: nentry['canto_state'].append(tag) updated = STORY_UPDATED if nentry not in newlist: newlist.append(story.Story(nentry, self.path, updated)) del self[:] for item in newlist: if not self.filter or self.filter(self, item): list.append(self, item) if ufp == None: ufp = self.get_ufp() if not ufp: return changed = self.changed() if not changed: return for entry in changed: if entry not in ufp['entries']: continue old = ufp['entries'][ufp['entries'].index(entry)] if old['canto_state'] != entry['canto_state']: if entry.updated: if self.cfg.state_change_hook: add = [t for t in entry['canto_state'] if t not in old['canto_state']] rem = [t for t in old['canto_state'] if t not in entry['canto_state']] self.cfg.state_change_hook(self, entry, add, rem) old['canto_state'] = entry['canto_state'] else: entry['canto_state'] = old['canto_state'] f = open(self.path, 'r+') try: fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) f.seek(0, 0) f.truncate() cPickle.dump(ufp, f) f.flush() for x in changed: x.updated = STORY_SAVED except: return 0 finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN) f.close() del ufp return 1 return 1
Canto
positive
def execute(self, context): <DeepExtract> tree = context.space_data.node_tree if tree.nodes.active: while tree.nodes.active != context.active_node: tree = tree.nodes.active.node_tree (nodes, links) = (tree.nodes, tree.links) </DeepExtract> selected = context.selected_nodes bpy.ops.node.duplicate_move_keep_inputs() new_nodes = context.selected_nodes bpy.ops.node.select_all(action='DESELECT') for node in selected: node.select = True bpy.ops.node.delete_reconnect() for new_node in new_nodes: new_node.select = True bpy.ops.transform.translate('INVOKE_DEFAULT') return {'FINISHED'}
def execute(self, context): tree = context.space_data.node_tree if tree.nodes.active: while tree.nodes.active != context.active_node: tree = tree.nodes.active.node_tree (nodes, links) = (tree.nodes, tree.links) selected = context.selected_nodes bpy.ops.node.duplicate_move_keep_inputs() new_nodes = context.selected_nodes bpy.ops.node.select_all(action='DESELECT') for node in selected: node.select = True bpy.ops.node.delete_reconnect() for new_node in new_nodes: new_node.select = True bpy.ops.transform.translate('INVOKE_DEFAULT') return {'FINISHED'}
blender-architecture-scripts
positive
def clear(self): <DeepExtract> not_running = [] extension.pre_cluster_stop(self) for node in list(self.nodes.values()): if not node.stop(wait=wait, signal_event=signal_event, **kwargs): not_running.append(node) extension.post_cluster_stop(self) return not_running </DeepExtract> for node in list(self.nodes.values()): node.clear()
def clear(self): not_running = [] extension.pre_cluster_stop(self) for node in list(self.nodes.values()): if not node.stop(wait=wait, signal_event=signal_event, **kwargs): not_running.append(node) extension.post_cluster_stop(self) return not_running for node in list(self.nodes.values()): node.clear()
ccm
positive
def recursive_update(d, u): """ Recursive updates d with values from u Args: d (dict): dict to update u (dict): updates to propagate """ for (k, v) in u.items(): if k in d: if isinstance(v, dict) and isinstance(d[k], dict): <DeepExtract> for (k, v) in v.items(): if k in d[k]: if isinstance(v, dict) and isinstance(d[k][k], dict): recursive_update(d[k][k], v) else: d[k][k] = v else: d[k][k] = v </DeepExtract> else: d[k] = v else: d[k] = v
def recursive_update(d, u): """ Recursive updates d with values from u Args: d (dict): dict to update u (dict): updates to propagate """ for (k, v) in u.items(): if k in d: if isinstance(v, dict) and isinstance(d[k], dict): for (k, v) in v.items(): if k in d[k]: if isinstance(v, dict) and isinstance(d[k][k], dict): recursive_update(d[k][k], v) else: d[k][k] = v else: d[k][k] = v else: d[k] = v else: d[k] = v
atomate
positive
def create(self, name, cluster_type, connection_settings, autoscale=True): <DeepExtract> if not self.has_permissions('clusters.add_cluster', obj): self.raise_no_permissions('clusters.add_cluster') </DeepExtract> try: obj = models.CMCluster.objects.create(name=name, cluster_type=cluster_type, connection_settings=connection_settings, autoscale=autoscale) <DeepExtract> cluster = resources.Cluster(self, obj) cluster.nodes = CMClusterNodeService(self.context, cluster) cluster.autoscalers = CMClusterAutoScalerService(self.context, cluster) cluster = cluster </DeepExtract> return cluster except IntegrityError as e: raise exceptions.CMDuplicateNameException('A cluster with name: %s already exists' % name)
def create(self, name, cluster_type, connection_settings, autoscale=True): if not self.has_permissions('clusters.add_cluster', obj): self.raise_no_permissions('clusters.add_cluster') try: obj = models.CMCluster.objects.create(name=name, cluster_type=cluster_type, connection_settings=connection_settings, autoscale=autoscale) cluster = resources.Cluster(self, obj) cluster.nodes = CMClusterNodeService(self.context, cluster) cluster.autoscalers = CMClusterAutoScalerService(self.context, cluster) cluster = cluster return cluster except IntegrityError as e: raise exceptions.CMDuplicateNameException('A cluster with name: %s already exists' % name)
cloudman
positive
def continuous_mountain_car_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ <DeepExtract> import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) </DeepExtract> parser.add_argument('--env', help='environment ID', type=str, default='MountainCarContinuous-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num-timesteps', type=int, default=int(10000000.0)) return parser
def continuous_mountain_car_arg_parser(): """ Create an argparse.ArgumentParser for run_mujoco.py. """ import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--env', help='environment ID', type=str, default='MountainCarContinuous-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num-timesteps', type=int, default=int(10000000.0)) return parser
deepdrive
positive
def decode_pinsrw(state): <DeepExtract> if state.op_prefix: state.op_prefix = False type = 1 elif state.rep == 'repne': state.rep = None type = 2 elif state.rep == 'repe': state.rep = None type = 3 else: type = 0 </DeepExtract> <DeepExtract> if len(state.opcode) < 1: state.invalid = True state.insufficient_length = True state.opcode = '' rm = 204 val = ord(state.opcode[0]) rm = val </DeepExtract> mod_field = rm >> 6 & 3 entry = SSETable[state.result.operation] if mod_field == 3: op_entry = entry[0][type] else: op_entry = entry[1][type] state.result.operation = op_entry[0] <DeepExtract> reg = decode_rm(state, get_operand_for_sse_entry_type(state, op_entry[2], 1), get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2])) if get_operand_for_sse_entry_type(state, op_entry[1], 0) != None: if state.rex_reg: reg_offset = 8 else: reg_offset = 0 get_operand_for_sse_entry_type(state, op_entry[1], 0).size = get_size_for_sse_entry_type(state, op_entry[1]) get_operand_for_sse_entry_type(state, op_entry[1], 0).operand = get_reg_list_for_sse_entry_type(state, op_entry[1])[reg + reg_offset] </DeepExtract> if state.flags & DEC_FLAG_INC_OPERATION_FOR_64: <DeepExtract> if op_entry[1] == 'gpr_32_or_64' and state.final_op_size == 8: state.result.operation = state.result.operation[1] elif op_entry[1] == 'gpr_32_or_64': state.result.operation = state.result.operation[0] </DeepExtract> <DeepExtract> if op_entry[2] == 'gpr_32_or_64' and state.final_op_size == 8: state.result.operation = state.result.operation[1] elif op_entry[2] == 'gpr_32_or_64': state.result.operation = state.result.operation[0] </DeepExtract> <DeepExtract> state.result.operands[2].operand = 'imm' state.result.operands[2].size = 1 state.result.operands[2].immediate = read8(state) </DeepExtract> if state.operand1.operand == 'mem': state.operand1.size = 2
def decode_pinsrw(state): if state.op_prefix: state.op_prefix = False type = 1 elif state.rep == 'repne': state.rep = None type = 2 elif state.rep == 'repe': state.rep = None type = 3 else: type = 0 if len(state.opcode) < 1: state.invalid = True state.insufficient_length = True state.opcode = '' rm = 204 val = ord(state.opcode[0]) rm = val mod_field = rm >> 6 & 3 entry = SSETable[state.result.operation] if mod_field == 3: op_entry = entry[0][type] else: op_entry = entry[1][type] state.result.operation = op_entry[0] reg = decode_rm(state, get_operand_for_sse_entry_type(state, op_entry[2], 1), get_reg_list_for_sse_entry_type(state, op_entry[2]), get_size_for_sse_entry_type(state, op_entry[2])) if get_operand_for_sse_entry_type(state, op_entry[1], 0) != None: if state.rex_reg: reg_offset = 8 else: reg_offset = 0 get_operand_for_sse_entry_type(state, op_entry[1], 0).size = get_size_for_sse_entry_type(state, op_entry[1]) get_operand_for_sse_entry_type(state, op_entry[1], 0).operand = get_reg_list_for_sse_entry_type(state, op_entry[1])[reg + reg_offset] if state.flags & DEC_FLAG_INC_OPERATION_FOR_64: if op_entry[1] == 'gpr_32_or_64' and state.final_op_size == 8: state.result.operation = state.result.operation[1] elif op_entry[1] == 'gpr_32_or_64': state.result.operation = state.result.operation[0] if op_entry[2] == 'gpr_32_or_64' and state.final_op_size == 8: state.result.operation = state.result.operation[1] elif op_entry[2] == 'gpr_32_or_64': state.result.operation = state.result.operation[0] state.result.operands[2].operand = 'imm' state.result.operands[2].size = 1 state.result.operands[2].immediate = read8(state) if state.operand1.operand == 'mem': state.operand1.size = 2
deprecated-binaryninja-python
positive
def _convert_ohms(self, val_str): <DeepExtract> match_p10 = re.search('(e|E)([-+]?[0-9]+)', val_str) p10 = 0 if match_p10 is None else int(match_p10.groups()[1]) match_sig_figs = re.search('\\.([0-9]*[1-9])', val_str) explicit_sig_figs = 0 if match_sig_figs is None else len(match_sig_figs.groups(1)[0]) decimal_sig_figs = explicit_sig_figs - p10 </DeepExtract> num = float(val_str) if num < 0.001 or decimal_sig_figs > 6: return ('{:.3f}'.format(num * 1000000.0), 'µOhms') elif num < 1 or decimal_sig_figs > 3: return ('{:.3f}'.format(num * 1000.0), 'mOhms') elif num < 1000.0 or decimal_sig_figs > 0: return ('{:.3f}'.format(num), 'Ohms') elif num < 1000000.0 or decimal_sig_figs > -3: return ('{:.3f}'.format(num * 0.001), 'kOhms') else: return ('{:.3f}'.format(num * 1e-06), 'MOhms')
def _convert_ohms(self, val_str): match_p10 = re.search('(e|E)([-+]?[0-9]+)', val_str) p10 = 0 if match_p10 is None else int(match_p10.groups()[1]) match_sig_figs = re.search('\\.([0-9]*[1-9])', val_str) explicit_sig_figs = 0 if match_sig_figs is None else len(match_sig_figs.groups(1)[0]) decimal_sig_figs = explicit_sig_figs - p10 num = float(val_str) if num < 0.001 or decimal_sig_figs > 6: return ('{:.3f}'.format(num * 1000000.0), 'µOhms') elif num < 1 or decimal_sig_figs > 3: return ('{:.3f}'.format(num * 1000.0), 'mOhms') elif num < 1000.0 or decimal_sig_figs > 0: return ('{:.3f}'.format(num), 'Ohms') elif num < 1000000.0 or decimal_sig_figs > -3: return ('{:.3f}'.format(num * 0.001), 'kOhms') else: return ('{:.3f}'.format(num * 1e-06), 'MOhms')
beep
positive
def stop(self): self.companion.controller.set_mode() <DeepExtract> ret = True </DeepExtract> return ret
def stop(self): self.companion.controller.set_mode() ret = True return ret
bootloader_instrumentation_suite
positive
def test_match_range_range(): rule = textwrap.dedent('\n rule:\n meta:\n name: test rule\n features:\n - count(number(100)): (2, 3)\n ') r = capa.rules.Rule.from_yaml(rule) <DeepExtract> (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) </DeepExtract> assert 'test rule' in matches <DeepExtract> (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2, 3}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2, 3}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) </DeepExtract> assert 'test rule' in matches <DeepExtract> (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) </DeepExtract> assert 'test rule' not in matches <DeepExtract> (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2, 3, 4}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2, 3, 4}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) </DeepExtract> assert 'test rule' not in matches
def test_match_range_range(): rule = textwrap.dedent('\n rule:\n meta:\n name: test rule\n features:\n - count(number(100)): (2, 3)\n ') r = capa.rules.Rule.from_yaml(rule) (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) assert 'test rule' in matches (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2, 3}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2, 3}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) assert 'test rule' in matches (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) assert 'test rule' not in matches (features1, matches1) = capa.engine.match([r], {capa.features.insn.Number(100): {1, 2, 3, 4}}, 0) ruleset = capa.rules.RuleSet([r]) (features2, matches2) = ruleset.match(scope, {capa.features.insn.Number(100): {1, 2, 3, 4}}, 0) for (feature, locations) in features1.items(): assert feature in features2 assert locations == features2[feature] for (rulename, results) in matches1.items(): assert rulename in matches2 assert len(results) == len(matches2[rulename]) (_, matches) = (features1, matches1) assert 'test rule' not in matches
capa
positive
@pytest.mark.parametrize('docstring, kwargs', [(None, None), (None, {}), ('my docstring', {}), (None, {'cached': True}), (None, {'annotation_based': True}), ('my docstring', {'annotation_based': True, 'cached': True})]) def test_initializer(self, docstring, kwargs): def func(): pass func.__doc__ = docstring <DeepExtract> if kwargs is not None: queryable_property = queryable_property(**kwargs) prop = queryable_property(func) </DeepExtract> assert prop.__doc__ == (docstring or None) kwargs = kwargs or {} assert prop.cached is (kwargs.get('cached') or False) annotation_based = kwargs.get('annotation_based', False) assert isinstance(prop, AnnotationGetterMixin) is annotation_based if annotation_based: assert prop.get_value == six.create_bound_method(six.get_unbound_function(AnnotationGetterMixin.get_value), prop) assert prop.get_annotation is func else: assert prop.get_value is func assert prop.get_annotation is None
@pytest.mark.parametrize('docstring, kwargs', [(None, None), (None, {}), ('my docstring', {}), (None, {'cached': True}), (None, {'annotation_based': True}), ('my docstring', {'annotation_based': True, 'cached': True})]) def test_initializer(self, docstring, kwargs): def func(): pass func.__doc__ = docstring if kwargs is not None: queryable_property = queryable_property(**kwargs) prop = queryable_property(func) assert prop.__doc__ == (docstring or None) kwargs = kwargs or {} assert prop.cached is (kwargs.get('cached') or False) annotation_based = kwargs.get('annotation_based', False) assert isinstance(prop, AnnotationGetterMixin) is annotation_based if annotation_based: assert prop.get_value == six.create_bound_method(six.get_unbound_function(AnnotationGetterMixin.get_value), prop) assert prop.get_annotation is func else: assert prop.get_value is func assert prop.get_annotation is None
django-queryable-properties
positive
def pre_save(self, model_instance, add): value = super().pre_save(model_instance, add) <DeepExtract> excerpt = [] default_excerpt = [] paras_seen = 0 for line in value.content.splitlines(): if not line.strip(): paras_seen += 1 if paras_seen < SPLIT_DEFAULT_PARAGRAPHS: default_excerpt.append(line) if line.strip() == SPLIT_MARKER: excerpt = '\n'.join(excerpt) excerpt.append(line) excerpt = '\n'.join(default_excerpt) </DeepExtract> setattr(model_instance, _excerpt_field_name(self.attname), excerpt) return value.content
def pre_save(self, model_instance, add): value = super().pre_save(model_instance, add) excerpt = [] default_excerpt = [] paras_seen = 0 for line in value.content.splitlines(): if not line.strip(): paras_seen += 1 if paras_seen < SPLIT_DEFAULT_PARAGRAPHS: default_excerpt.append(line) if line.strip() == SPLIT_MARKER: excerpt = '\n'.join(excerpt) excerpt.append(line) excerpt = '\n'.join(default_excerpt) setattr(model_instance, _excerpt_field_name(self.attname), excerpt) return value.content
django-model-utils
positive
def compute_result(self, *args): d_a = args[0] d_b = args[1] n = args[2] sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)) sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32)) mul_res1 = 2147483647 & sc1 | extract_16s(d_a, 1) * extract_16s(d_b, 0) << n.value & (sc1 ^ 4294967295) mul_res0 = 2147483647 & sc0 | extract_16s(d_a, 0) * extract_16s(d_b, 1) << n.value & (sc0 ^ 4294967295) e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32) e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32) result_w1 = (e_d_1 - mul_res1).cast_to(Type.int_64) result_w0 = (e_d_0 + mul_res0).cast_to(Type.int_64) result_w1_ssov = ssov32(result_w1, self.max_pos, self.max_neg) result_w0_ssov = ssov32(result_w0, self.max_pos, self.max_neg) self.put(result_w0_ssov, 'd{0}'.format(self.data['c'])) self.put(result_w1_ssov, 'd{0}'.format(self.data['c'] + 1)) c = 0 ov_w0 = overflow_64(result_w0).cast_to(Type.int_32) ov_w1 = overflow_64(result_w1).cast_to(Type.int_32) v = ov_w1 | ov_w0 aov_w0 = advanced_overflow_64(result_w0).cast_to(Type.int_32) aov_w1 = advanced_overflow_64(result_w1).cast_to(Type.int_32) av = aov_w1 | aov_w0 <DeepExtract> psw = self.get('psw', Type.int_32) </DeepExtract> cond_sv = v == 0 cond_sav = av == 0 sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1) sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1) psw = set_usb(psw, c, v, sv, av, sav) self.put(psw, 'psw')
def compute_result(self, *args): d_a = args[0] d_b = args[1] n = args[2] sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)) sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32)) mul_res1 = 2147483647 & sc1 | extract_16s(d_a, 1) * extract_16s(d_b, 0) << n.value & (sc1 ^ 4294967295) mul_res0 = 2147483647 & sc0 | extract_16s(d_a, 0) * extract_16s(d_b, 1) << n.value & (sc0 ^ 4294967295) e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32) e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32) result_w1 = (e_d_1 - mul_res1).cast_to(Type.int_64) result_w0 = (e_d_0 + mul_res0).cast_to(Type.int_64) result_w1_ssov = ssov32(result_w1, self.max_pos, self.max_neg) result_w0_ssov = ssov32(result_w0, self.max_pos, self.max_neg) self.put(result_w0_ssov, 'd{0}'.format(self.data['c'])) self.put(result_w1_ssov, 'd{0}'.format(self.data['c'] + 1)) c = 0 ov_w0 = overflow_64(result_w0).cast_to(Type.int_32) ov_w1 = overflow_64(result_w1).cast_to(Type.int_32) v = ov_w1 | ov_w0 aov_w0 = advanced_overflow_64(result_w0).cast_to(Type.int_32) aov_w1 = advanced_overflow_64(result_w1).cast_to(Type.int_32) av = aov_w1 | aov_w0 psw = self.get('psw', Type.int_32) cond_sv = v == 0 cond_sav = av == 0 sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1) sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1) psw = set_usb(psw, c, v, sv, av, sav) self.put(psw, 'psw')
angr-platforms
positive
def __call__(self, *inputs, **kwargs): if not self.ctx_list: return self.module(*inputs, **kwargs) <DeepExtract> def split_map(obj): if isinstance(obj, NDArray): (inputs, kwargs) = split_and_load(obj, self.ctx_list, batch_axis, even_split=False) if isinstance(obj, tuple) and len(obj) > 0: (inputs, kwargs) = list(zip(*map(split_map, obj))) if isinstance(obj, list) and len(obj) > 0: (inputs, kwargs) = list(map(list, zip(*map(split_map, obj)))) if isinstance(obj, dict) and len(obj) > 0: (inputs, kwargs) = list(map(type(obj), zip(*map(split_map, obj.items())))) (inputs, kwargs) = [obj for targets in self.ctx_list] inputs = split_map(inputs) if inputs else [] kwargs = split_map(kwargs) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) (inputs, kwargs) = (inputs, kwargs) </DeepExtract> assert len(inputs) == len(self.ctx_list) if len(self.ctx_list) == 1: return tuple([tuple_map(self.module(*inputs[0], **kwargs[0]))]) return parallel_apply(self.module, inputs, kwargs, self.sync)
def __call__(self, *inputs, **kwargs): if not self.ctx_list: return self.module(*inputs, **kwargs) def split_map(obj): if isinstance(obj, NDArray): (inputs, kwargs) = split_and_load(obj, self.ctx_list, batch_axis, even_split=False) if isinstance(obj, tuple) and len(obj) > 0: (inputs, kwargs) = list(zip(*map(split_map, obj))) if isinstance(obj, list) and len(obj) > 0: (inputs, kwargs) = list(map(list, zip(*map(split_map, obj)))) if isinstance(obj, dict) and len(obj) > 0: (inputs, kwargs) = list(map(type(obj), zip(*map(split_map, obj.items())))) (inputs, kwargs) = [obj for targets in self.ctx_list] inputs = split_map(inputs) if inputs else [] kwargs = split_map(kwargs) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) (inputs, kwargs) = (inputs, kwargs) assert len(inputs) == len(self.ctx_list) if len(self.ctx_list) == 1: return tuple([tuple_map(self.module(*inputs[0], **kwargs[0]))]) return parallel_apply(self.module, inputs, kwargs, self.sync)
cascade_rcnn_gluon
positive
def _generate_linear_classification_data(num_samples: int, noise_std: float): <DeepExtract> num_features = int(np.random.choice(20, 1) + 1) coefficients = np.random.uniform(-5, 5, num_features) X = np.random.normal(0, 1, (num_samples, num_features)) y = np.sum(coefficients * X, axis=1) + np.random.normal(0, noise_std, num_samples) (X, y, _) = (X, y, coefficients) </DeepExtract> y = y > np.median(y) return (X, y.astype(str))
def _generate_linear_classification_data(num_samples: int, noise_std: float): num_features = int(np.random.choice(20, 1) + 1) coefficients = np.random.uniform(-5, 5, num_features) X = np.random.normal(0, 1, (num_samples, num_features)) y = np.sum(coefficients * X, axis=1) + np.random.normal(0, noise_std, num_samples) (X, y, _) = (X, y, coefficients) y = y > np.median(y) return (X, y.astype(str))
dowhy
positive
def run(self): """Configures glibc for AppRun 3""" self._glibc_module_files = self.context.app_dir.find(file_matching_patterns.glibc) if self._glibc_module_files: self._module_dir.mkdir(parents=True, exist_ok=True) <DeepExtract> for file in self.context.app_dir.files.values(): if file.interpreter and (not self._is_file_in_a_module(file)) and (not file.path.is_symlink()): binary = lief.parse(file.path.__str__()) self._patch_binary_interpreter_path(binary) </DeepExtract> self.context.app_dir.move_files(self._glibc_module_files, self._module_dir) <DeepExtract> sys_root = pathlib.Path('/') for binary_interpreter in self.context.app_dir.binary_interpreters: binary_interpreter_path = self.context.app_dir.path / binary_interpreter.__str__().strip('/') binary_interpreter_path.parent.mkdir(parents=True, exist_ok=True) binary_interpreter_path.symlink_to(sys_root / binary_interpreter) </DeepExtract> <DeepExtract> for interpreter_path in self.context.app_dir.script_interpreters: rel_path = pathlib.Path(interpreter_path.strip('/')) expected_path = self.context.app_dir.path / rel_path if expected_path.exists(): mirror_path = self._module_dir / rel_path rel_mirror_path = os.path.relpath(expected_path, mirror_path.parent) mirror_path.parent.mkdir(parents=True, exist_ok=True) logging.info('Linking script interpreter: %s', interpreter_path) mirror_path.symlink_to(rel_mirror_path) else: logging.warning('Script interpreter not found in AppDir: %s', interpreter_path) </DeepExtract> <DeepExtract> library_paths = set() for module_file in self._glibc_module_files: if module_file.soname: library_paths.add(module_file.path.parent.__str__()) library_paths = library_paths </DeepExtract> <DeepExtract> glibc_check_binary_path = self.context.binaries_resolver.resolve_check_glibc_binary(self.context.main_arch) glibc_check_binary_target_path = self._module_dir / 'check' glibc_check_binary_target_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(glibc_check_binary_path, glibc_check_binary_target_path) os.chmod(glibc_check_binary_target_path, 493) </DeepExtract> <DeepExtract> library_paths = [replace_app_dir_in_path(self.context.app_dir.path, path) for path in library_paths] runtime_dir = '$APPDIR/' + self._module_dir.relative_to(self.context.app_dir.path).__str__() config = {'version': '1.0', 'check': {'required_glibc': self._find_bundled_glibc_version()}, 'module': {'runtime_dir': runtime_dir, 'library_paths': library_paths}} glibc_module_config_path = self._module_dir / 'config' apprun_utils.write_config_file(config, glibc_module_config_path) </DeepExtract>
def run(self): """Configures glibc for AppRun 3""" self._glibc_module_files = self.context.app_dir.find(file_matching_patterns.glibc) if self._glibc_module_files: self._module_dir.mkdir(parents=True, exist_ok=True) for file in self.context.app_dir.files.values(): if file.interpreter and (not self._is_file_in_a_module(file)) and (not file.path.is_symlink()): binary = lief.parse(file.path.__str__()) self._patch_binary_interpreter_path(binary) self.context.app_dir.move_files(self._glibc_module_files, self._module_dir) sys_root = pathlib.Path('/') for binary_interpreter in self.context.app_dir.binary_interpreters: binary_interpreter_path = self.context.app_dir.path / binary_interpreter.__str__().strip('/') binary_interpreter_path.parent.mkdir(parents=True, exist_ok=True) binary_interpreter_path.symlink_to(sys_root / binary_interpreter) for interpreter_path in self.context.app_dir.script_interpreters: rel_path = pathlib.Path(interpreter_path.strip('/')) expected_path = self.context.app_dir.path / rel_path if expected_path.exists(): mirror_path = self._module_dir / rel_path rel_mirror_path = os.path.relpath(expected_path, mirror_path.parent) mirror_path.parent.mkdir(parents=True, exist_ok=True) logging.info('Linking script interpreter: %s', interpreter_path) mirror_path.symlink_to(rel_mirror_path) else: logging.warning('Script interpreter not found in AppDir: %s', interpreter_path) library_paths = set() for module_file in self._glibc_module_files: if module_file.soname: library_paths.add(module_file.path.parent.__str__()) library_paths = library_paths glibc_check_binary_path = self.context.binaries_resolver.resolve_check_glibc_binary(self.context.main_arch) glibc_check_binary_target_path = self._module_dir / 'check' glibc_check_binary_target_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(glibc_check_binary_path, glibc_check_binary_target_path) os.chmod(glibc_check_binary_target_path, 493) library_paths = [replace_app_dir_in_path(self.context.app_dir.path, path) for path in library_paths] runtime_dir = '$APPDIR/' + self._module_dir.relative_to(self.context.app_dir.path).__str__() config = {'version': '1.0', 'check': {'required_glibc': self._find_bundled_glibc_version()}, 'module': {'runtime_dir': runtime_dir, 'library_paths': library_paths}} glibc_module_config_path = self._module_dir / 'config' apprun_utils.write_config_file(config, glibc_module_config_path) </DeepExtract>
appimage-builder
positive
def generate_rows(self, dataset_schema=None, dataset_partitioning=None, partition_id=None, records_limit=-1): from_date = datetime.datetime.strptime(self.from_date, '%Y-%m-%d') to_date = datetime.datetime.strptime(self.to_date, '%Y-%m-%d') if to_date < from_date: raise ValueError('The end date must occur after the start date') list_datetimes = [from_date + datetime.timedelta(days=x) for x in range((to_date - from_date).days + 1)] logger.info('Forecast.io plugin - List of dates: %s' % ', '.join([d.strftime('%d/%m/%Y') for d in list_datetimes])) <DeepExtract> if self.cache_file: logger.info('Forecast.io plugin - Loading cache (%s)' % self.cache_file) with open(self.cache_file, 'r') as f: self.cache_data = json.load(f) f.close() </DeepExtract> for day in list_datetimes: <DeepExtract> if not day: result = None day_key = day.strftime('%Y-%m-%dT00:00:00') if day_key in self.cache_data.keys(): logger.info('Forecast.io plugin - Already in cache for %s' % day_key) result = self.cache_data.get(day_key) else: logger.info('Forecast.io plugin - Request for %s' % day_key) if self.api_limit > -1 and self.api_calls >= self.api_limit: logger.info('Forecast.io plugin - Limit reached, no call for %s (cur=%d lim=%d)' % (day_key, self.api_calls, self.api_limit)) result = {'result': 'Limit reached. No call.'} headers = {'Accept-Encoding': 'gzip', 'Accept': 'application/json'} params = {'lang': 'en', 'units': 'si', 'exclude': 'currently,minutely'} r = requests.get(url='https://api.darksky.net/forecast/%s/%s,%s,%s' % (self.api_key, self.latitude, self.longitude, day_key), params=params, headers=headers) if r.status_code != 200: logger.info('Forecast.io plugin - Error in request (status code: %s)' % r.status_code) logger.info('Forecast.io plugin - Response: %s' % r.text) r.raise_for_status() sys.exit() result = r.json() self.api_calls = r.headers.get('X-Forecast-API-Calls') logger.info('Forecast.io plugin - X-Forecast-API-Calls: %s' % self.api_calls) if self.api_calls is not None: self.api_calls = int(self.api_calls) else: self.api_calls = -1 if self.cache_file and day < datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0): logger.info('Forecast.io plugin - Adding to cache: %s' % day_key) self.cache_data[day_key] = result sleep(0.1) result = result </DeepExtract> yield {'day': day.strftime('%Y-%m-%d'), 'day_date': day.strftime('%Y-%m-%dT00:00:00.000Z'), 'daily_data': json.dumps(result['daily']['data'][0]) if 'daily' in result and 'data' in result['daily'] else '', 'hourly_data': json.dumps(result.get('hourly', '')) if 'hourly' in result else '', 'full_json': json.dumps(result)} <DeepExtract> if self.cache_file: logger.info('Forecast.io plugin - Saving cache (%s)' % self.cache_file) with open(self.cache_file, 'w') as f: json.dump(self.cache_data, f) f.close() </DeepExtract>
def generate_rows(self, dataset_schema=None, dataset_partitioning=None, partition_id=None, records_limit=-1): from_date = datetime.datetime.strptime(self.from_date, '%Y-%m-%d') to_date = datetime.datetime.strptime(self.to_date, '%Y-%m-%d') if to_date < from_date: raise ValueError('The end date must occur after the start date') list_datetimes = [from_date + datetime.timedelta(days=x) for x in range((to_date - from_date).days + 1)] logger.info('Forecast.io plugin - List of dates: %s' % ', '.join([d.strftime('%d/%m/%Y') for d in list_datetimes])) if self.cache_file: logger.info('Forecast.io plugin - Loading cache (%s)' % self.cache_file) with open(self.cache_file, 'r') as f: self.cache_data = json.load(f) f.close() for day in list_datetimes: if not day: result = None day_key = day.strftime('%Y-%m-%dT00:00:00') if day_key in self.cache_data.keys(): logger.info('Forecast.io plugin - Already in cache for %s' % day_key) result = self.cache_data.get(day_key) else: logger.info('Forecast.io plugin - Request for %s' % day_key) if self.api_limit > -1 and self.api_calls >= self.api_limit: logger.info('Forecast.io plugin - Limit reached, no call for %s (cur=%d lim=%d)' % (day_key, self.api_calls, self.api_limit)) result = {'result': 'Limit reached. No call.'} headers = {'Accept-Encoding': 'gzip', 'Accept': 'application/json'} params = {'lang': 'en', 'units': 'si', 'exclude': 'currently,minutely'} r = requests.get(url='https://api.darksky.net/forecast/%s/%s,%s,%s' % (self.api_key, self.latitude, self.longitude, day_key), params=params, headers=headers) if r.status_code != 200: logger.info('Forecast.io plugin - Error in request (status code: %s)' % r.status_code) logger.info('Forecast.io plugin - Response: %s' % r.text) r.raise_for_status() sys.exit() result = r.json() self.api_calls = r.headers.get('X-Forecast-API-Calls') logger.info('Forecast.io plugin - X-Forecast-API-Calls: %s' % self.api_calls) if self.api_calls is not None: self.api_calls = int(self.api_calls) else: self.api_calls = -1 if self.cache_file and day < datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0): logger.info('Forecast.io plugin - Adding to cache: %s' % day_key) self.cache_data[day_key] = result sleep(0.1) result = result yield {'day': day.strftime('%Y-%m-%d'), 'day_date': day.strftime('%Y-%m-%dT00:00:00.000Z'), 'daily_data': json.dumps(result['daily']['data'][0]) if 'daily' in result and 'data' in result['daily'] else '', 'hourly_data': json.dumps(result.get('hourly', '')) if 'hourly' in result else '', 'full_json': json.dumps(result)} if self.cache_file: logger.info('Forecast.io plugin - Saving cache (%s)' % self.cache_file) with open(self.cache_file, 'w') as f: json.dump(self.cache_data, f) f.close() </DeepExtract>
dataiku-contrib
positive
def test_bool(self): <DeepExtract> with self.create_table('a {}'.format('Bool')): data = [(np.array(range(self.n)) % 2).astype(bool)] self.client.execute('INSERT INTO test (a) VALUES', data, columnar=True) query = 'SELECT * FROM test' inserted = self.emit_cli(query) self.assertEqual(inserted, '\n'.join((str(x).lower() for x in data[0])) + '\n') rv = self.client.execute(query, columnar=True) </DeepExtract> <DeepExtract> data = (np.array(range(self.n)) % 2).astype(bool) self.assertArraysEqual(rv[0], data) self.assertEqual(rv[0].dtype, np.bool_) </DeepExtract>
def test_bool(self): with self.create_table('a {}'.format('Bool')): data = [(np.array(range(self.n)) % 2).astype(bool)] self.client.execute('INSERT INTO test (a) VALUES', data, columnar=True) query = 'SELECT * FROM test' inserted = self.emit_cli(query) self.assertEqual(inserted, '\n'.join((str(x).lower() for x in data[0])) + '\n') rv = self.client.execute(query, columnar=True) data = (np.array(range(self.n)) % 2).astype(bool) self.assertArraysEqual(rv[0], data) self.assertEqual(rv[0].dtype, np.bool_) </DeepExtract>
clickhouse-driver
positive
def acquire_cidr(self, num=1): <DeepExtract> if int(num) > len(self.pool): [status, result] = [False, 'No enough IPs: %s' % self.info] result = [] for i in range(0, int(num)): result.append(self.pool.pop()) [status, result] = [True, result] </DeepExtract> if not status: return [status, result] return [True, list(map(lambda x: x + '/' + self.info.split('/')[1], result))]
def acquire_cidr(self, num=1): if int(num) > len(self.pool): [status, result] = [False, 'No enough IPs: %s' % self.info] result = [] for i in range(0, int(num)): result.append(self.pool.pop()) [status, result] = [True, result] if not status: return [status, result] return [True, list(map(lambda x: x + '/' + self.info.split('/')[1], result))]
docklet
positive
def main(fname, sessions, label, do_plot=True): """ usual main function """ sign = 'neg' if 'neg' in sessions[0] else 'pos' for ses in sessions: if sign not in ses: raise ValueError('Different signs: {}'.format(sessions)) basedir = os.path.dirname(fname) sorting_dir = os.path.join(basedir, label) logfname = os.path.join(basedir, 'log.txt') logfid = open(logfname, 'a') if not os.path.isdir(sorting_dir): os.mkdir(sorting_dir) logfid.write('created {}\n'.format(sorting_dir)) outfname = os.path.join(sorting_dir, 'sort_cat.h5') if not options['OverwriteGroups']: if os.path.exists(outfname): print(outfname + ' exists already, skipping!') return None <DeepExtract> basedir = os.path.dirname(fname) sort_man = SortingManager(fname) ses_mans = {} for ses in sessions: ses_mans[ses] = SessionManager(os.path.join(basedir, ses)) print('Starting read from {}'.format(fname)) (sorted_index, sorted_info, artifacts) = read_all_info(ses_mans) write_sorting_file(outfname, sorted_index, sorted_info, artifacts) for (ses, man) in ses_mans.items(): print('Closed {}'.format(ses)) del man sort_man = sort_man </DeepExtract> fid = tables.open_file(outfname, 'r+') fid.set_node_attr('/', 'sign', sign) spk_idx = fid.root.index[:] all_spikes = sort_man.get_data_by_name_and_index('spikes', spk_idx, sign) del sort_man <DeepExtract> classes = fid.root.classes[:] print('classes: {}, all_spikes: {}'.format(classes.shape, all_spikes.shape)) (ids, mean_array, stds) = get_means(classes, all_spikes) if not len(ids): (classes, matches) = (fid.root.classes[:], fid.root.matches[:]) unmatched_idx = (classes == CLID_UNMATCHED).nonzero()[0] blocksize = 50 * 1000 n_unmatched = unmatched_idx.shape[0] starts = np.arange(0, n_unmatched, blocksize) if not len(starts): starts = np.array([0]) stops = np.array([n_unmatched]) else: stops = starts + blocksize stops[-1] = n_unmatched for (start, stop) in zip(starts, stops): this_idx = unmatched_idx[start:stop] print('Calculating match for {} spikes'.format(all_spikes[this_idx].shape[0])) all_dists = distances_euclidean(all_spikes[this_idx], mean_array) print('all_dists: {}'.format(all_dists.shape)) all_dists[all_dists > options['SecondMatchFactor'] * stds] = np.inf minimizers_idx = all_dists.argmin(1) minimizers = ids[minimizers_idx] minima = all_dists.min(1) minimizers[minima >= options['SecondMatchMaxDist'] * all_spikes.shape[1]] = 0 fid.root.classes[this_idx] = minimizers fid.root.matches[this_idx] = SPIKE_MATCHED_2 fid.root.distance[this_idx] = minima fid.flush() (classes, matches) = (fid.root.classes[:], fid.root.matches[:]) </DeepExtract> if options['RecheckArtifacts']: clids = np.unique(classes) rows_clids = len(clids) if 0 in clids: artifacts = np.zeros((rows_clids, 2), 'int64') artifacts[:, 0] = clids else: artifacts = np.zeros((rows_clids + 1, 2), 'int64') artifacts[1:, 0] = clids invert = True if sign == 'neg' else False (_, art_ids) = find_artifacts(all_spikes, classes, clids, invert) for aid in art_ids: artifacts[artifacts[:, 0] == aid, 1] = 1 fid.root.artifacts[:] = artifacts fid.flush() if do_plot: <DeepExtract> if not os.path.isdir(sorting_dir): os.mkdir(sorting_dir) clids = np.unique(classes) fig = mpl.figure(figsize=options['figsize']) fig.add_axes([0, 0, 1, 1]) xax = np.arange(all_spikes.shape[1]) xlim = (0, all_spikes.shape[1] - 1) (_, means, _) = get_means(classes, all_spikes) if not len(means): ylim = options['ylim'] else: max_of_means = np.abs(means).max() if max_of_means < options['ylim'][1]: ylim = (-1.5 * max_of_means, 1.5 * max_of_means) else: ylim = options['ylim'] for (clnum, clid) in enumerate(clids): cl_idx = classes == clid outname = os.path.join(sorting_dir, 'class_{:03d}.png'.format(clid)) print('Plotting {}/{}, {}, {} spikes'.format(clnum + 1, len(clids), sorting_dir, cl_idx.sum())) plot_class(fig, xax, xlim, ylim, all_spikes[cl_idx], matches[cl_idx], outname) mpl.close(fig) </DeepExtract> logfid.write('Plotted classes in {}\n'.format(sorting_dir)) print(fid.filename, fid.root.artifacts[:]) fid.close() logfid.close() return outfname
def main(fname, sessions, label, do_plot=True): """ usual main function """ sign = 'neg' if 'neg' in sessions[0] else 'pos' for ses in sessions: if sign not in ses: raise ValueError('Different signs: {}'.format(sessions)) basedir = os.path.dirname(fname) sorting_dir = os.path.join(basedir, label) logfname = os.path.join(basedir, 'log.txt') logfid = open(logfname, 'a') if not os.path.isdir(sorting_dir): os.mkdir(sorting_dir) logfid.write('created {}\n'.format(sorting_dir)) outfname = os.path.join(sorting_dir, 'sort_cat.h5') if not options['OverwriteGroups']: if os.path.exists(outfname): print(outfname + ' exists already, skipping!') return None basedir = os.path.dirname(fname) sort_man = SortingManager(fname) ses_mans = {} for ses in sessions: ses_mans[ses] = SessionManager(os.path.join(basedir, ses)) print('Starting read from {}'.format(fname)) (sorted_index, sorted_info, artifacts) = read_all_info(ses_mans) write_sorting_file(outfname, sorted_index, sorted_info, artifacts) for (ses, man) in ses_mans.items(): print('Closed {}'.format(ses)) del man sort_man = sort_man fid = tables.open_file(outfname, 'r+') fid.set_node_attr('/', 'sign', sign) spk_idx = fid.root.index[:] all_spikes = sort_man.get_data_by_name_and_index('spikes', spk_idx, sign) del sort_man classes = fid.root.classes[:] print('classes: {}, all_spikes: {}'.format(classes.shape, all_spikes.shape)) (ids, mean_array, stds) = get_means(classes, all_spikes) if not len(ids): (classes, matches) = (fid.root.classes[:], fid.root.matches[:]) unmatched_idx = (classes == CLID_UNMATCHED).nonzero()[0] blocksize = 50 * 1000 n_unmatched = unmatched_idx.shape[0] starts = np.arange(0, n_unmatched, blocksize) if not len(starts): starts = np.array([0]) stops = np.array([n_unmatched]) else: stops = starts + blocksize stops[-1] = n_unmatched for (start, stop) in zip(starts, stops): this_idx = unmatched_idx[start:stop] print('Calculating match for {} spikes'.format(all_spikes[this_idx].shape[0])) all_dists = distances_euclidean(all_spikes[this_idx], mean_array) print('all_dists: {}'.format(all_dists.shape)) all_dists[all_dists > options['SecondMatchFactor'] * stds] = np.inf minimizers_idx = all_dists.argmin(1) minimizers = ids[minimizers_idx] minima = all_dists.min(1) minimizers[minima >= options['SecondMatchMaxDist'] * all_spikes.shape[1]] = 0 fid.root.classes[this_idx] = minimizers fid.root.matches[this_idx] = SPIKE_MATCHED_2 fid.root.distance[this_idx] = minima fid.flush() (classes, matches) = (fid.root.classes[:], fid.root.matches[:]) if options['RecheckArtifacts']: clids = np.unique(classes) rows_clids = len(clids) if 0 in clids: artifacts = np.zeros((rows_clids, 2), 'int64') artifacts[:, 0] = clids else: artifacts = np.zeros((rows_clids + 1, 2), 'int64') artifacts[1:, 0] = clids invert = True if sign == 'neg' else False (_, art_ids) = find_artifacts(all_spikes, classes, clids, invert) for aid in art_ids: artifacts[artifacts[:, 0] == aid, 1] = 1 fid.root.artifacts[:] = artifacts fid.flush() if do_plot: if not os.path.isdir(sorting_dir): os.mkdir(sorting_dir) clids = np.unique(classes) fig = mpl.figure(figsize=options['figsize']) fig.add_axes([0, 0, 1, 1]) xax = np.arange(all_spikes.shape[1]) xlim = (0, all_spikes.shape[1] - 1) (_, means, _) = get_means(classes, all_spikes) if not len(means): ylim = options['ylim'] else: max_of_means = np.abs(means).max() if max_of_means < options['ylim'][1]: ylim = (-1.5 * max_of_means, 1.5 * max_of_means) else: ylim = options['ylim'] for (clnum, clid) in enumerate(clids): cl_idx = classes == clid outname = os.path.join(sorting_dir, 'class_{:03d}.png'.format(clid)) print('Plotting {}/{}, {}, {} spikes'.format(clnum + 1, len(clids), sorting_dir, cl_idx.sum())) plot_class(fig, xax, xlim, ylim, all_spikes[cl_idx], matches[cl_idx], outname) mpl.close(fig) logfid.write('Plotted classes in {}\n'.format(sorting_dir)) print(fid.filename, fid.root.artifacts[:]) fid.close() logfid.close() return outfname
combinato
positive
def __init__(self, options: Namespace): """Construct app run based on already loaded configuration.""" <DeepExtract> tags = set() for tag in options.skip_list: tags.update(str(tag).split(',')) options.skip_list = sorted(set(tags)) </DeepExtract> <DeepExtract> tags = set() for tag in options.warn_list: tags.update(str(tag).split(',')) options.warn_list = sorted(set(tags)) </DeepExtract> self.options = options <DeepExtract> r: type[formatters.BaseFormatter[Any]] = formatters.Formatter if options.format == 'quiet': r = formatters.QuietFormatter elif options.format in ('json', 'codeclimate'): r = formatters.CodeclimateJSONFormatter elif options.format == 'sarif': r = formatters.SarifFormatter elif options.parseable or options.format == 'pep8': r = formatters.ParseableFormatter formatter_factory = r </DeepExtract> self.formatter = formatter_factory(options.cwd, options.display_relative_path) self.runtime = Runtime(isolated=True, require_module=True)
def __init__(self, options: Namespace): """Construct app run based on already loaded configuration.""" tags = set() for tag in options.skip_list: tags.update(str(tag).split(',')) options.skip_list = sorted(set(tags)) tags = set() for tag in options.warn_list: tags.update(str(tag).split(',')) options.warn_list = sorted(set(tags)) self.options = options r: type[formatters.BaseFormatter[Any]] = formatters.Formatter if options.format == 'quiet': r = formatters.QuietFormatter elif options.format in ('json', 'codeclimate'): r = formatters.CodeclimateJSONFormatter elif options.format == 'sarif': r = formatters.SarifFormatter elif options.parseable or options.format == 'pep8': r = formatters.ParseableFormatter formatter_factory = r self.formatter = formatter_factory(options.cwd, options.display_relative_path) self.runtime = Runtime(isolated=True, require_module=True)
ansible-lint
positive
def test_get_subnet_config_should_set_proper_values_to_model(): <DeepExtract> cluster_model = dict_to_objdict({'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': {'name': 'TestCluster', 'prefix': 'prefix', 'cloud': {'vnet_address_pool': address_pool, 'network': {'use_network_security_groups': True}, 'default_os_image': 'default', 'use_public_ips': True}}}) cluster_model = cluster_model </DeepExtract> component_value = dict_to_objdict({'address_pool': '10.20.0.0/24', 'availability_zone': 'eu-westa'}) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_subnet(component_value, 'component', 'my-test-vpc', 1) assert actual.specification.name == 'prefix-testcluster-component-subnet-1' assert actual.specification.vpc_name == 'my-test-vpc' assert actual.specification.cidr_block == '10.20.0.0/24' assert actual.specification.availability_zone == 'eu-westa'
def test_get_subnet_config_should_set_proper_values_to_model(): cluster_model = dict_to_objdict({'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': {'name': 'TestCluster', 'prefix': 'prefix', 'cloud': {'vnet_address_pool': address_pool, 'network': {'use_network_security_groups': True}, 'default_os_image': 'default', 'use_public_ips': True}}}) cluster_model = cluster_model component_value = dict_to_objdict({'address_pool': '10.20.0.0/24', 'availability_zone': 'eu-westa'}) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_subnet(component_value, 'component', 'my-test-vpc', 1) assert actual.specification.name == 'prefix-testcluster-component-subnet-1' assert actual.specification.vpc_name == 'my-test-vpc' assert actual.specification.cidr_block == '10.20.0.0/24' assert actual.specification.availability_zone == 'eu-westa'
epiphany
positive
def test_400_if_invalid_filter_choice(self): <DeepExtract> return self.client.login(username=self.username, password=self.password) </DeepExtract> url = self.url + '?sample_field=xpto&search=algo' response = self.client.get(url) assert 400 == response.status_code self.assertTemplateUsed(response, 'core/dataset-detail.html') assert 'sample_field' in response.context['filter_form'].errors assert 'algo' == response.context['search_term']
def test_400_if_invalid_filter_choice(self): return self.client.login(username=self.username, password=self.password) url = self.url + '?sample_field=xpto&search=algo' response = self.client.get(url) assert 400 == response.status_code self.assertTemplateUsed(response, 'core/dataset-detail.html') assert 'sample_field' in response.context['filter_form'].errors assert 'algo' == response.context['search_term']
brasil.io
positive
def step1(g): visited = set() <DeepExtract> q = [0] while q: v = q[0] q = q[1:] if v in visited: continue visited.add(v) for v2 in g[v]: q += [v2] </DeepExtract> return len(visited)
def step1(g): visited = set() q = [0] while q: v = q[0] q = q[1:] if v in visited: continue visited.add(v) for v2 in g[v]: q += [v2] return len(visited)
advent_of_code_2017
positive
def train(): """Train a en->fr translation model using WMT data.""" from_train = None to_train = None from_dev = None to_dev = None if FLAGS.from_train_data and FLAGS.to_train_data: from_train_data = FLAGS.from_train_data to_train_data = FLAGS.to_train_data from_dev_data = from_train_data to_dev_data = to_train_data if FLAGS.from_dev_data and FLAGS.to_dev_data: from_dev_data = FLAGS.from_dev_data to_dev_data = FLAGS.to_dev_data (from_train, to_train, from_dev, to_dev, _, _) = data_utils.prepare_data(FLAGS.data_dir, from_train_data, to_train_data, from_dev_data, to_dev_data, FLAGS.from_vocab_size, FLAGS.to_vocab_size) else: print('Preparing WMT data in %s' % FLAGS.data_dir) (from_train, to_train, from_dev, to_dev, _, _) = data_utils.prepare_wmt_data(FLAGS.data_dir, FLAGS.from_vocab_size, FLAGS.to_vocab_size) with tf.Session() as sess: print('Creating %d layers of %d units.' % (FLAGS.num_layers, FLAGS.size)) <DeepExtract> dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 model = seq2seq_model.Seq2SeqModel(FLAGS.from_vocab_size, FLAGS.to_vocab_size, _buckets, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, forward_only=False, dtype=dtype) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print('Reading model parameters from %s' % ckpt.model_checkpoint_path) model.saver.restore(sess, ckpt.model_checkpoint_path) else: print('Created model with fresh parameters.') sess.run(tf.global_variables_initializer()) model = model </DeepExtract> print('Reading development and training data (limit: %d).' % FLAGS.max_train_data_size) <DeepExtract> data_set = [[] for _ in _buckets] mycount = 0 with tf.gfile.GFile(from_dev, mode='r') as source_file: with tf.gfile.GFile(to_dev, mode='r') as target_file: (source, target) = (source_file.readline(), target_file.readline()) counter = 0 while source and target and (not max_size or counter < max_size): counter += 1 if counter % 100000 == 0: print(' reading data line %d' % counter) sys.stdout.flush() source_ids = [int(x) for x in source.split()] target_ids = [int(x) for x in target.split()] target_ids.append(data_utils.EOS_ID) for (bucket_id, (source_size, target_size)) in enumerate(_buckets): if len(source_ids) < source_size and len(target_ids) < target_size: data_set[bucket_id].append([source_ids, target_ids]) mycount = mycount + 1 break (source, target) = (source_file.readline(), target_file.readline()) print(mycount) dev_set = data_set </DeepExtract> <DeepExtract> data_set = [[] for _ in _buckets] mycount = 0 with tf.gfile.GFile(from_train, mode='r') as source_file: with tf.gfile.GFile(to_train, mode='r') as target_file: (source, target) = (source_file.readline(), target_file.readline()) counter = 0 while source and target and (not FLAGS.max_train_data_size or counter < FLAGS.max_train_data_size): counter += 1 if counter % 100000 == 0: print(' reading data line %d' % counter) sys.stdout.flush() source_ids = [int(x) for x in source.split()] target_ids = [int(x) for x in target.split()] target_ids.append(data_utils.EOS_ID) for (bucket_id, (source_size, target_size)) in enumerate(_buckets): if len(source_ids) < source_size and len(target_ids) < target_size: data_set[bucket_id].append([source_ids, target_ids]) mycount = mycount + 1 break (source, target) = (source_file.readline(), target_file.readline()) print(mycount) train_set = data_set </DeepExtract> train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes))] (step_time, loss) = (0.0, 0.0) current_step = 0 previous_losses = [] while current_step < FLAGS.num_train_step: random_number_01 = np.random.random_sample() bucket_id = min([i for i in xrange(len(train_buckets_scale)) if train_buckets_scale[i] > random_number_01]) start_time = time.time() (encoder_inputs, decoder_inputs, target_weights) = model.get_batch(train_set, bucket_id) (_, step_loss, _, enc_init_states, enc_all_outputs) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 1) step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint loss += step_loss / FLAGS.steps_per_checkpoint current_step += 1 if current_step % FLAGS.steps_per_checkpoint == 0: first_layer = np.array(enc_init_states[0]) mat_first_layer = np.matrix(first_layer) with open('first_layer_states.txt', 'wb') as f: for line in mat_first_layer: np.savetxt(f, line, fmt='%.2f') second_layer = np.array(enc_init_states[1]) mat_second_layer = np.matrix(second_layer) with open('second_layer_states.txt', 'wb') as f: for line in mat_second_layer: np.savetxt(f, line, fmt='%.2f') perplexity = math.exp(float(loss)) if loss < 300 else float('inf') print('global step %d learning rate %.4f step-time %.5f perplexity %.5f' % (model.global_step.eval(), model.learning_rate.eval(), step_time, perplexity)) if len(previous_losses) > 2 and loss > max(previous_losses[-3:]): sess.run(model.learning_rate_decay_op) previous_losses.append(loss) checkpoint_path = os.path.join(FLAGS.train_dir, 'translate.ckpt') model.saver.save(sess, checkpoint_path, global_step=model.global_step) (step_time, loss) = (0.0, 0.0) for bucket_id in xrange(len(_buckets)): if len(dev_set[bucket_id]) == 0: print(' eval: empty bucket %d' % bucket_id) continue (encoder_inputs, decoder_inputs, target_weights) = model.get_batch(dev_set, bucket_id) (_, eval_loss, _, _, _) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 0) eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float('inf') print(' eval: bucket %d perplexity %.5f' % (bucket_id, eval_ppx)) sys.stdout.flush() en_vocab_path = os.path.join(FLAGS.data_dir, 'vocab%d.from' % FLAGS.from_vocab_size) fr_vocab_path = os.path.join(FLAGS.data_dir, 'vocab%d.to' % FLAGS.to_vocab_size) (en_vocab, _) = data_utils.initialize_vocabulary(en_vocab_path) (_, rev_fr_vocab) = data_utils.initialize_vocabulary(fr_vocab_path) max_iter = 100 count = 0 model.batch_size = 1 with gfile.GFile(FLAGS.from_train_data, mode='rb') as f: for sentence in f: count = count + 1 if max_iter < count: break print(sentence) token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab) bucket_id = len(_buckets) - 1 for (i, bucket) in enumerate(_buckets): if bucket[0] >= len(token_ids): bucket_id = i break else: logging.warning('Sentence truncated: %s', sentence) (encoder_inputs, decoder_inputs, target_weights) = model.get_batch({bucket_id: [(token_ids, [])]}, bucket_id) (_, _, output_logits, enc_all_state, _) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 0) quit()
def train(): """Train a en->fr translation model using WMT data.""" from_train = None to_train = None from_dev = None to_dev = None if FLAGS.from_train_data and FLAGS.to_train_data: from_train_data = FLAGS.from_train_data to_train_data = FLAGS.to_train_data from_dev_data = from_train_data to_dev_data = to_train_data if FLAGS.from_dev_data and FLAGS.to_dev_data: from_dev_data = FLAGS.from_dev_data to_dev_data = FLAGS.to_dev_data (from_train, to_train, from_dev, to_dev, _, _) = data_utils.prepare_data(FLAGS.data_dir, from_train_data, to_train_data, from_dev_data, to_dev_data, FLAGS.from_vocab_size, FLAGS.to_vocab_size) else: print('Preparing WMT data in %s' % FLAGS.data_dir) (from_train, to_train, from_dev, to_dev, _, _) = data_utils.prepare_wmt_data(FLAGS.data_dir, FLAGS.from_vocab_size, FLAGS.to_vocab_size) with tf.Session() as sess: print('Creating %d layers of %d units.' % (FLAGS.num_layers, FLAGS.size)) dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 model = seq2seq_model.Seq2SeqModel(FLAGS.from_vocab_size, FLAGS.to_vocab_size, _buckets, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, forward_only=False, dtype=dtype) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print('Reading model parameters from %s' % ckpt.model_checkpoint_path) model.saver.restore(sess, ckpt.model_checkpoint_path) else: print('Created model with fresh parameters.') sess.run(tf.global_variables_initializer()) model = model print('Reading development and training data (limit: %d).' % FLAGS.max_train_data_size) data_set = [[] for _ in _buckets] mycount = 0 with tf.gfile.GFile(from_dev, mode='r') as source_file: with tf.gfile.GFile(to_dev, mode='r') as target_file: (source, target) = (source_file.readline(), target_file.readline()) counter = 0 while source and target and (not max_size or counter < max_size): counter += 1 if counter % 100000 == 0: print(' reading data line %d' % counter) sys.stdout.flush() source_ids = [int(x) for x in source.split()] target_ids = [int(x) for x in target.split()] target_ids.append(data_utils.EOS_ID) for (bucket_id, (source_size, target_size)) in enumerate(_buckets): if len(source_ids) < source_size and len(target_ids) < target_size: data_set[bucket_id].append([source_ids, target_ids]) mycount = mycount + 1 break (source, target) = (source_file.readline(), target_file.readline()) print(mycount) dev_set = data_set data_set = [[] for _ in _buckets] mycount = 0 with tf.gfile.GFile(from_train, mode='r') as source_file: with tf.gfile.GFile(to_train, mode='r') as target_file: (source, target) = (source_file.readline(), target_file.readline()) counter = 0 while source and target and (not FLAGS.max_train_data_size or counter < FLAGS.max_train_data_size): counter += 1 if counter % 100000 == 0: print(' reading data line %d' % counter) sys.stdout.flush() source_ids = [int(x) for x in source.split()] target_ids = [int(x) for x in target.split()] target_ids.append(data_utils.EOS_ID) for (bucket_id, (source_size, target_size)) in enumerate(_buckets): if len(source_ids) < source_size and len(target_ids) < target_size: data_set[bucket_id].append([source_ids, target_ids]) mycount = mycount + 1 break (source, target) = (source_file.readline(), target_file.readline()) print(mycount) train_set = data_set train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes))] (step_time, loss) = (0.0, 0.0) current_step = 0 previous_losses = [] while current_step < FLAGS.num_train_step: random_number_01 = np.random.random_sample() bucket_id = min([i for i in xrange(len(train_buckets_scale)) if train_buckets_scale[i] > random_number_01]) start_time = time.time() (encoder_inputs, decoder_inputs, target_weights) = model.get_batch(train_set, bucket_id) (_, step_loss, _, enc_init_states, enc_all_outputs) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 1) step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint loss += step_loss / FLAGS.steps_per_checkpoint current_step += 1 if current_step % FLAGS.steps_per_checkpoint == 0: first_layer = np.array(enc_init_states[0]) mat_first_layer = np.matrix(first_layer) with open('first_layer_states.txt', 'wb') as f: for line in mat_first_layer: np.savetxt(f, line, fmt='%.2f') second_layer = np.array(enc_init_states[1]) mat_second_layer = np.matrix(second_layer) with open('second_layer_states.txt', 'wb') as f: for line in mat_second_layer: np.savetxt(f, line, fmt='%.2f') perplexity = math.exp(float(loss)) if loss < 300 else float('inf') print('global step %d learning rate %.4f step-time %.5f perplexity %.5f' % (model.global_step.eval(), model.learning_rate.eval(), step_time, perplexity)) if len(previous_losses) > 2 and loss > max(previous_losses[-3:]): sess.run(model.learning_rate_decay_op) previous_losses.append(loss) checkpoint_path = os.path.join(FLAGS.train_dir, 'translate.ckpt') model.saver.save(sess, checkpoint_path, global_step=model.global_step) (step_time, loss) = (0.0, 0.0) for bucket_id in xrange(len(_buckets)): if len(dev_set[bucket_id]) == 0: print(' eval: empty bucket %d' % bucket_id) continue (encoder_inputs, decoder_inputs, target_weights) = model.get_batch(dev_set, bucket_id) (_, eval_loss, _, _, _) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 0) eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float('inf') print(' eval: bucket %d perplexity %.5f' % (bucket_id, eval_ppx)) sys.stdout.flush() en_vocab_path = os.path.join(FLAGS.data_dir, 'vocab%d.from' % FLAGS.from_vocab_size) fr_vocab_path = os.path.join(FLAGS.data_dir, 'vocab%d.to' % FLAGS.to_vocab_size) (en_vocab, _) = data_utils.initialize_vocabulary(en_vocab_path) (_, rev_fr_vocab) = data_utils.initialize_vocabulary(fr_vocab_path) max_iter = 100 count = 0 model.batch_size = 1 with gfile.GFile(FLAGS.from_train_data, mode='rb') as f: for sentence in f: count = count + 1 if max_iter < count: break print(sentence) token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab) bucket_id = len(_buckets) - 1 for (i, bucket) in enumerate(_buckets): if bucket[0] >= len(token_ids): bucket_id = i break else: logging.warning('Sentence truncated: %s', sentence) (encoder_inputs, decoder_inputs, target_weights) = model.get_batch({bucket_id: [(token_ids, [])]}, bucket_id) (_, _, output_logits, enc_all_state, _) = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, False, 0) quit()
DeepAffinity
positive
def test_single_ant(self): module.debug = False class test_empty_object(module.ant_colony): def __init__(self): pass def _get_distance(self, start, end): pass def _init_matrix(self, size, value=None): pass def _init_ants(self, count, start=0): pass def _populate_ant_updated_pheromone_map(self, ant): pass def mainloop(self): pass test_object = test_empty_object() def _init_matrix(size, value=None): """ setup a matrix NxN (where n = size) used in both self.distance_matrix and self.pheromone_map as they require identical matrixes besides which value to initialize to """ ret = [] for row in range(size): ret.append([value for x in range(size)]) return ret class mock_ant: def get_route(self): return [0, 1, 2] def get_distance_traveled(self): return float(2) <DeepExtract> ret = [] for row in range(3): ret.append([1 for x in range(3)]) test_object.pheromone_map = ret </DeepExtract> <DeepExtract> ret = [] for row in range(3): ret.append([0.5 for x in range(3)]) test_object.ant_updated_pheromone_map = ret </DeepExtract> test_object.pheromone_evaporation_coefficient = 0.99 test_object.pheromone_constant = 1 test_object.ants = [mock_ant()] test_object.first_pass = False test_object._update_pheromone_map() self.assertEqual(test_object.pheromone_map[0][1], 0.51) self.assertEqual(test_object.pheromone_map[1][2], 0.51)
def test_single_ant(self): module.debug = False class test_empty_object(module.ant_colony): def __init__(self): pass def _get_distance(self, start, end): pass def _init_matrix(self, size, value=None): pass def _init_ants(self, count, start=0): pass def _populate_ant_updated_pheromone_map(self, ant): pass def mainloop(self): pass test_object = test_empty_object() def _init_matrix(size, value=None): """ setup a matrix NxN (where n = size) used in both self.distance_matrix and self.pheromone_map as they require identical matrixes besides which value to initialize to """ ret = [] for row in range(size): ret.append([value for x in range(size)]) return ret class mock_ant: def get_route(self): return [0, 1, 2] def get_distance_traveled(self): return float(2) ret = [] for row in range(3): ret.append([1 for x in range(3)]) test_object.pheromone_map = ret ret = [] for row in range(3): ret.append([0.5 for x in range(3)]) test_object.ant_updated_pheromone_map = ret test_object.pheromone_evaporation_coefficient = 0.99 test_object.pheromone_constant = 1 test_object.ants = [mock_ant()] test_object.first_pass = False test_object._update_pheromone_map() self.assertEqual(test_object.pheromone_map[0][1], 0.51) self.assertEqual(test_object.pheromone_map[1][2], 0.51)
ant-colony-optimization
positive
def test_has_dependencies_remaining_with_not_set_dependencies(self): <DeepExtract> dependency_task = dict(task_reference=dependency_task_reference, dependencies_by_reference=[], QUEUE_STATUS=NOT_SET) task_to_run = dict(task_reference=task_to_run_reference, dependencies_by_reference=[dependency_task_reference]) all_tasks = dict() all_tasks[task_to_run_reference] = task_to_run all_tasks[dependency_task_reference] = dependency_task (task_to_run, all_tasks, _) = (task_to_run, all_tasks, list(all_tasks.keys())) </DeepExtract> expected_is_currently_blocked = True expected_is_permanently_blocked = False (actual_is_currently_blocked, actual_is_permanently_blocked) = self.sut.has_dependencies_remaining(task_to_run, all_tasks) self.assertEqual(expected_is_currently_blocked, actual_is_currently_blocked) self.assertEqual(expected_is_permanently_blocked, actual_is_permanently_blocked)
def test_has_dependencies_remaining_with_not_set_dependencies(self): dependency_task = dict(task_reference=dependency_task_reference, dependencies_by_reference=[], QUEUE_STATUS=NOT_SET) task_to_run = dict(task_reference=task_to_run_reference, dependencies_by_reference=[dependency_task_reference]) all_tasks = dict() all_tasks[task_to_run_reference] = task_to_run all_tasks[dependency_task_reference] = dependency_task (task_to_run, all_tasks, _) = (task_to_run, all_tasks, list(all_tasks.keys())) expected_is_currently_blocked = True expected_is_permanently_blocked = False (actual_is_currently_blocked, actual_is_permanently_blocked) = self.sut.has_dependencies_remaining(task_to_run, all_tasks) self.assertEqual(expected_is_currently_blocked, actual_is_currently_blocked) self.assertEqual(expected_is_permanently_blocked, actual_is_permanently_blocked)
aws-service-catalog-puppet
positive
def update(self, _id, _data=None, _keys=None, **kwds): """ Updates an edge in the database and returns it. :param _id: The edge ID. :type _id: int or str :param _data: Optional property data dict. :type _data: dict :param kwds: Optional property data keyword pairs. :type kwds: dict :rtype: Response """ <DeepExtract> data = {} if _data is None else _data data.update(kwds) data = data </DeepExtract> return self.client.update_edge(_id, data, keys=_keys)
def update(self, _id, _data=None, _keys=None, **kwds): """ Updates an edge in the database and returns it. :param _id: The edge ID. :type _id: int or str :param _data: Optional property data dict. :type _data: dict :param kwds: Optional property data keyword pairs. :type kwds: dict :rtype: Response """ data = {} if _data is None else _data data.update(kwds) data = data return self.client.update_edge(_id, data, keys=_keys)
bulbs
positive
def make_shape_group(self, appearance, transform, num_shapes, num_subobjects): <DeepExtract> if appearance['fill'][0] == 'none': fill = None elif appearance['fill'][0] == 'solid': fill = torch.cat([appearance['fill'][1], appearance['opacity'] * appearance['fill-opacity']]).to(self.device) elif appearance['fill'][0] == 'url': fill = appearance['fill'][1].getGrad(appearance['opacity'] * appearance['fill-opacity'], transform) else: raise ValueError("Unknown paint value type '{}'".format(appearance['fill'][0])) </DeepExtract> <DeepExtract> if appearance['stroke'][0] == 'none': stroke = None elif appearance['stroke'][0] == 'solid': stroke = torch.cat([appearance['stroke'][1], appearance['opacity'] * appearance['stroke-opacity']]).to(self.device) elif appearance['stroke'][0] == 'url': stroke = appearance['stroke'][1].getGrad(appearance['opacity'] * appearance['stroke-opacity'], transform) else: raise ValueError("Unknown paint value type '{}'".format(appearance['stroke'][0])) </DeepExtract> sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)), fill_color=fill, use_even_odd_rule=appearance['fill-rule'] == 'evenodd', stroke_color=stroke, shape_to_canvas=transform, id=self.id) return sg
def make_shape_group(self, appearance, transform, num_shapes, num_subobjects): if appearance['fill'][0] == 'none': fill = None elif appearance['fill'][0] == 'solid': fill = torch.cat([appearance['fill'][1], appearance['opacity'] * appearance['fill-opacity']]).to(self.device) elif appearance['fill'][0] == 'url': fill = appearance['fill'][1].getGrad(appearance['opacity'] * appearance['fill-opacity'], transform) else: raise ValueError("Unknown paint value type '{}'".format(appearance['fill'][0])) if appearance['stroke'][0] == 'none': stroke = None elif appearance['stroke'][0] == 'solid': stroke = torch.cat([appearance['stroke'][1], appearance['opacity'] * appearance['stroke-opacity']]).to(self.device) elif appearance['stroke'][0] == 'url': stroke = appearance['stroke'][1].getGrad(appearance['opacity'] * appearance['stroke-opacity'], transform) else: raise ValueError("Unknown paint value type '{}'".format(appearance['stroke'][0])) sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)), fill_color=fill, use_even_odd_rule=appearance['fill-rule'] == 'evenodd', stroke_color=stroke, shape_to_canvas=transform, id=self.id) return sg
diffvg
positive
def open_folder(folder: str): """ This will open a folder even if it doesn't exist. It'll create one if it doesn't exist. """ <DeepExtract> target_path = os.path.join(ROOT_DIR, folder) create_folder_if_needed(target_path) target_path = target_path </DeepExtract> <DeepExtract> if platform.system() == 'Windows': os.startfile(target_path) elif platform.system() == 'Darwin': subprocess.Popen(['open', target_path]) else: subprocess.Popen(['xdg-open', target_path]) </DeepExtract>
def open_folder(folder: str): """ This will open a folder even if it doesn't exist. It'll create one if it doesn't exist. """ target_path = os.path.join(ROOT_DIR, folder) create_folder_if_needed(target_path) target_path = target_path if platform.system() == 'Windows': os.startfile(target_path) elif platform.system() == 'Darwin': subprocess.Popen(['open', target_path]) else: subprocess.Popen(['xdg-open', target_path]) </DeepExtract>
algobot
positive
@timing def _count_files_to_scan(self) -> int: <DeepExtract> paths_to_scan = self._get_configured_storage_paths() files_in_media_folders = self._walk_paths_to_scan(paths_to_scan) files_to_scan = self._filter_unchanged_files(files_in_media_folders) files_to_scan = files_to_scan </DeepExtract> try: return max(1, len(list(files_to_scan))) except StopIteration as e: reporter.exception('importer', e, '_count_files_to_scan raised a stop iteration.') return 1
@timing def _count_files_to_scan(self) -> int: paths_to_scan = self._get_configured_storage_paths() files_in_media_folders = self._walk_paths_to_scan(paths_to_scan) files_to_scan = self._filter_unchanged_files(files_in_media_folders) files_to_scan = files_to_scan try: return max(1, len(list(files_to_scan))) except StopIteration as e: reporter.exception('importer', e, '_count_files_to_scan raised a stop iteration.') return 1
cozy
positive
def split_to_patches(self, patch_size, workers=0, paths_per_worker=10): origin = (float(self.view_x), float(self.view_y)) (patch_w, patch_h) = patch_size patches_row_n = int(np.ceil(self.view_height.as_pixels() / patch_h)) patches_col_n = int(np.ceil(self.view_width.as_pixels() / patch_w)) patches_n = (patches_row_n, patches_col_n) vector_patch_origin_xs = np.array([patch_w * j for j in range(patches_col_n)]) vector_patch_origin_ys = np.array([patch_h * i for i in range(patches_row_n)]) vector_patch_origins = np.stack((vector_patch_origin_xs[None].repeat(patches_row_n, axis=0), vector_patch_origin_ys[:, None].repeat(patches_col_n, axis=1)), axis=-1) patch_size_pixels = (units.Pixels(patch_w), units.Pixels(patch_h)) vector_patches = np.array([[self.__class__([], origin=(units.Pixels(coord) for coord in vector_patch_origins[i, j]), size=patch_size_pixels, view_size=patch_size_pixels) for j in range(patches_col_n)] for i in range(patches_row_n)]) split_path_to_patches = lambda path: path.split_to_patches(origin=origin, patch_size=patch_size, patches_n=patches_n) def distribute_path_in_patches(iS, jS, paths): for idx in range(len(iS)): i = iS[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) if isinstance(workers, int) and workers == 0: for path in self.paths: <DeepExtract> for idx in range(len(*split_path_to_patches(path))): i = *split_path_to_patches(path)[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) </DeepExtract> else: if isinstance(workers, int): from pathos.multiprocessing import cpu_count, ProcessingPool as Pool if workers == -1: batches_n = int(np.ceil(len(self.paths) / paths_per_worker)) optimal_workers = cpu_count() - 1 workers = min(optimal_workers, batches_n) workers = Pool(workers) close_workers = True else: close_workers = False for splits in workers.uimap(split_path_to_patches, self.paths, chunksize=paths_per_worker): <DeepExtract> for idx in range(len(*splits)): i = *splits[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) </DeepExtract> if close_workers: workers.close() workers.join() workers.clear() return vector_patches
def split_to_patches(self, patch_size, workers=0, paths_per_worker=10): origin = (float(self.view_x), float(self.view_y)) (patch_w, patch_h) = patch_size patches_row_n = int(np.ceil(self.view_height.as_pixels() / patch_h)) patches_col_n = int(np.ceil(self.view_width.as_pixels() / patch_w)) patches_n = (patches_row_n, patches_col_n) vector_patch_origin_xs = np.array([patch_w * j for j in range(patches_col_n)]) vector_patch_origin_ys = np.array([patch_h * i for i in range(patches_row_n)]) vector_patch_origins = np.stack((vector_patch_origin_xs[None].repeat(patches_row_n, axis=0), vector_patch_origin_ys[:, None].repeat(patches_col_n, axis=1)), axis=-1) patch_size_pixels = (units.Pixels(patch_w), units.Pixels(patch_h)) vector_patches = np.array([[self.__class__([], origin=(units.Pixels(coord) for coord in vector_patch_origins[i, j]), size=patch_size_pixels, view_size=patch_size_pixels) for j in range(patches_col_n)] for i in range(patches_row_n)]) split_path_to_patches = lambda path: path.split_to_patches(origin=origin, patch_size=patch_size, patches_n=patches_n) def distribute_path_in_patches(iS, jS, paths): for idx in range(len(iS)): i = iS[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) if isinstance(workers, int) and workers == 0: for path in self.paths: for idx in range(len(*split_path_to_patches(path))): i = *split_path_to_patches(path)[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) else: if isinstance(workers, int): from pathos.multiprocessing import cpu_count, ProcessingPool as Pool if workers == -1: batches_n = int(np.ceil(len(self.paths) / paths_per_worker)) optimal_workers = cpu_count() - 1 workers = min(optimal_workers, batches_n) workers = Pool(workers) close_workers = True else: close_workers = False for splits in workers.uimap(split_path_to_patches, self.paths, chunksize=paths_per_worker): for idx in range(len(*splits)): i = *splits[idx] j = jS[idx] path_in_patch = paths[idx] vector_patches[i, j].paths.append(path_in_patch.translated(-vector_patch_origins[i, j])) if close_workers: workers.close() workers.join() workers.clear() return vector_patches
Deep-Vectorization-of-Technical-Drawings
positive
def _test(self): <DeepExtract> if True: outputs_losses = self.outputs_losses_train else: outputs_losses = self.outputs_losses_test if backend_name == 'tensorflow.compat.v1': feed_dict = self.net.feed_dict(True, self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) (self.train_state.y_pred_train, self.train_state.loss_train) = self.sess.run(outputs_losses, feed_dict=feed_dict) if backend_name == 'tensorflow': outs = outputs_losses(self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) elif backend_name == 'pytorch': self.net.requires_grad_(requires_grad=False) outs = outputs_losses(self.train_state.X_train, self.train_state.y_train) self.net.requires_grad_() elif backend_name == 'jax': outs = outputs_losses(self.params, self.train_state.X_train, self.train_state.y_train) elif backend_name == 'paddle': outs = outputs_losses(self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) (self.train_state.y_pred_train, self.train_state.loss_train) = (utils.to_numpy(outs[0]), utils.to_numpy(outs[1])) </DeepExtract> <DeepExtract> if False: outputs_losses = self.outputs_losses_train else: outputs_losses = self.outputs_losses_test if backend_name == 'tensorflow.compat.v1': feed_dict = self.net.feed_dict(False, self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) (self.train_state.y_pred_test, self.train_state.loss_test) = self.sess.run(outputs_losses, feed_dict=feed_dict) if backend_name == 'tensorflow': outs = outputs_losses(self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) elif backend_name == 'pytorch': self.net.requires_grad_(requires_grad=False) outs = outputs_losses(self.train_state.X_test, self.train_state.y_test) self.net.requires_grad_() elif backend_name == 'jax': outs = outputs_losses(self.params, self.train_state.X_test, self.train_state.y_test) elif backend_name == 'paddle': outs = outputs_losses(self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) (self.train_state.y_pred_test, self.train_state.loss_test) = (utils.to_numpy(outs[0]), utils.to_numpy(outs[1])) </DeepExtract> if isinstance(self.train_state.y_test, (list, tuple)): self.train_state.metrics_test = [m(self.train_state.y_test[i], self.train_state.y_pred_test[i]) for m in self.metrics for i in range(len(self.train_state.y_test))] else: self.train_state.metrics_test = [m(self.train_state.y_test, self.train_state.y_pred_test) for m in self.metrics] self.train_state.update_best() self.losshistory.append(self.train_state.step, self.train_state.loss_train, self.train_state.loss_test, self.train_state.metrics_test) if np.isnan(self.train_state.loss_train).any() or np.isnan(self.train_state.loss_test).any(): self.stop_training = True display.training_display(self.train_state)
def _test(self): if True: outputs_losses = self.outputs_losses_train else: outputs_losses = self.outputs_losses_test if backend_name == 'tensorflow.compat.v1': feed_dict = self.net.feed_dict(True, self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) (self.train_state.y_pred_train, self.train_state.loss_train) = self.sess.run(outputs_losses, feed_dict=feed_dict) if backend_name == 'tensorflow': outs = outputs_losses(self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) elif backend_name == 'pytorch': self.net.requires_grad_(requires_grad=False) outs = outputs_losses(self.train_state.X_train, self.train_state.y_train) self.net.requires_grad_() elif backend_name == 'jax': outs = outputs_losses(self.params, self.train_state.X_train, self.train_state.y_train) elif backend_name == 'paddle': outs = outputs_losses(self.train_state.X_train, self.train_state.y_train, self.train_state.train_aux_vars) (self.train_state.y_pred_train, self.train_state.loss_train) = (utils.to_numpy(outs[0]), utils.to_numpy(outs[1])) if False: outputs_losses = self.outputs_losses_train else: outputs_losses = self.outputs_losses_test if backend_name == 'tensorflow.compat.v1': feed_dict = self.net.feed_dict(False, self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) (self.train_state.y_pred_test, self.train_state.loss_test) = self.sess.run(outputs_losses, feed_dict=feed_dict) if backend_name == 'tensorflow': outs = outputs_losses(self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) elif backend_name == 'pytorch': self.net.requires_grad_(requires_grad=False) outs = outputs_losses(self.train_state.X_test, self.train_state.y_test) self.net.requires_grad_() elif backend_name == 'jax': outs = outputs_losses(self.params, self.train_state.X_test, self.train_state.y_test) elif backend_name == 'paddle': outs = outputs_losses(self.train_state.X_test, self.train_state.y_test, self.train_state.test_aux_vars) (self.train_state.y_pred_test, self.train_state.loss_test) = (utils.to_numpy(outs[0]), utils.to_numpy(outs[1])) if isinstance(self.train_state.y_test, (list, tuple)): self.train_state.metrics_test = [m(self.train_state.y_test[i], self.train_state.y_pred_test[i]) for m in self.metrics for i in range(len(self.train_state.y_test))] else: self.train_state.metrics_test = [m(self.train_state.y_test, self.train_state.y_pred_test) for m in self.metrics] self.train_state.update_best() self.losshistory.append(self.train_state.step, self.train_state.loss_train, self.train_state.loss_test, self.train_state.metrics_test) if np.isnan(self.train_state.loss_train).any() or np.isnan(self.train_state.loss_test).any(): self.stop_training = True display.training_display(self.train_state)
deepxde
positive
def duplicate(diff): object = bpy.context.view_layer.objects.active bpy.ops.object.select_all(action='DESELECT') bpy.context.view_layer.objects.active = object bpy.context.view_layer.objects.active.select_set(True) if int(diff) <= 50: for num in range(diff): bpy.ops.object.duplicate(linked=True, mode='DUMMY') else: lista = [] lista.append(bpy.context.view_layer.objects.active) for obj in bpy.data.objects: if bpy.context.view_layer.objects.active != obj and obj.select_get(): lista.append(obj) total = int(str(numpy.log2(diff)).split('.')[0]) i = 0 while i < total: <DeepExtract> for obj in lista: obj.select_set(True) bpy.ops.object.duplicate(linked=True, mode='DUMMY') for obj in bpy.data.objects: if obj.select_get(): lista.append(obj) for obj in bpy.data.objects: if obj.select_get(): lista.append(obj) for obj in lista: obj.select_set(True) </DeepExtract> i += 1 difference = int(diff - pow(2, int(str(numpy.log2(diff)).split('.')[0]))) + 1 <DeepExtract> object = bpy.context.view_layer.objects.active bpy.ops.object.select_all(action='DESELECT') bpy.context.view_layer.objects.active = object bpy.context.view_layer.objects.active.select_set(True) if int(difference) <= 50: for num in range(difference): bpy.ops.object.duplicate(linked=True, mode='DUMMY') else: lista = [] lista.append(bpy.context.view_layer.objects.active) for obj in bpy.data.objects: if bpy.context.view_layer.objects.active != obj and obj.select_get(): lista.append(obj) total = int(str(numpy.log2(difference)).split('.')[0]) i = 0 while i < total: create_duplicate(lista) i += 1 difference = int(difference - pow(2, int(str(numpy.log2(difference)).split('.')[0]))) + 1 duplicate(difference) </DeepExtract>
def duplicate(diff): object = bpy.context.view_layer.objects.active bpy.ops.object.select_all(action='DESELECT') bpy.context.view_layer.objects.active = object bpy.context.view_layer.objects.active.select_set(True) if int(diff) <= 50: for num in range(diff): bpy.ops.object.duplicate(linked=True, mode='DUMMY') else: lista = [] lista.append(bpy.context.view_layer.objects.active) for obj in bpy.data.objects: if bpy.context.view_layer.objects.active != obj and obj.select_get(): lista.append(obj) total = int(str(numpy.log2(diff)).split('.')[0]) i = 0 while i < total: for obj in lista: obj.select_set(True) bpy.ops.object.duplicate(linked=True, mode='DUMMY') for obj in bpy.data.objects: if obj.select_get(): lista.append(obj) for obj in bpy.data.objects: if obj.select_get(): lista.append(obj) for obj in lista: obj.select_set(True) i += 1 difference = int(diff - pow(2, int(str(numpy.log2(diff)).split('.')[0]))) + 1 object = bpy.context.view_layer.objects.active bpy.ops.object.select_all(action='DESELECT') bpy.context.view_layer.objects.active = object bpy.context.view_layer.objects.active.select_set(True) if int(difference) <= 50: for num in range(difference): bpy.ops.object.duplicate(linked=True, mode='DUMMY') else: lista = [] lista.append(bpy.context.view_layer.objects.active) for obj in bpy.data.objects: if bpy.context.view_layer.objects.active != obj and obj.select_get(): lista.append(obj) total = int(str(numpy.log2(difference)).split('.')[0]) i = 0 while i < total: create_duplicate(lista) i += 1 difference = int(difference - pow(2, int(str(numpy.log2(difference)).split('.')[0]))) + 1 duplicate(difference) </DeepExtract>
BioBlender21
positive
def test_create_ca_conf_with_iam_no_accesspoint(tmpdir): current_time = mount_efs.get_utc_now() <DeepExtract> tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) mount_efs.create_required_directory({}, tls_dict['mount_dir']) tls_dict['certificate_path'] = os.path.join(tls_dict['mount_dir'], 'config.conf') tls_dict['private_key'] = os.path.join(tls_dict['mount_dir'], 'privateKey.pem') tls_dict['public_key'] = os.path.join(tls_dict['mount_dir'], 'publicKey.pem') if True: with open(tls_dict['public_key'], 'w') as f: f.write(PUBLIC_KEY_BODY) credentials = CREDENTIALS if True else None ap_id = AP_ID if False else None True = CLIENT_INFO if True else None full_config_body = mount_efs.create_ca_conf(tls_dict['certificate_path'], COMMON_NAME, tls_dict['mount_dir'], tls_dict['private_key'], current_time, REGION, FS_ID, credentials, ap_id, True) assert os.path.exists(tls_dict['certificate_path']) (tls_dict, full_config_body) = (tls_dict, full_config_body) </DeepExtract> ca_extension_body = '[ v3_ca ]\nsubjectKeyIdentifier = hash\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:%s\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info' % FS_ID efs_client_auth_body = mount_efs.efs_client_auth_builder(tls_dict['public_key'], CREDENTIALS['AccessKeyId'], CREDENTIALS['SecretAccessKey'], current_time, REGION, FS_ID, CREDENTIALS['Token']) efs_client_info_body = mount_efs.efs_client_info_builder(CLIENT_INFO) matching_config_body = mount_efs.CA_CONFIG_BODY % (tls_dict['mount_dir'], tls_dict['private_key'], COMMON_NAME, ca_extension_body, efs_client_auth_body, efs_client_info_body) assert full_config_body == matching_config_body
def test_create_ca_conf_with_iam_no_accesspoint(tmpdir): current_time = mount_efs.get_utc_now() tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) mount_efs.create_required_directory({}, tls_dict['mount_dir']) tls_dict['certificate_path'] = os.path.join(tls_dict['mount_dir'], 'config.conf') tls_dict['private_key'] = os.path.join(tls_dict['mount_dir'], 'privateKey.pem') tls_dict['public_key'] = os.path.join(tls_dict['mount_dir'], 'publicKey.pem') if True: with open(tls_dict['public_key'], 'w') as f: f.write(PUBLIC_KEY_BODY) credentials = CREDENTIALS if True else None ap_id = AP_ID if False else None True = CLIENT_INFO if True else None full_config_body = mount_efs.create_ca_conf(tls_dict['certificate_path'], COMMON_NAME, tls_dict['mount_dir'], tls_dict['private_key'], current_time, REGION, FS_ID, credentials, ap_id, True) assert os.path.exists(tls_dict['certificate_path']) (tls_dict, full_config_body) = (tls_dict, full_config_body) ca_extension_body = '[ v3_ca ]\nsubjectKeyIdentifier = hash\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:%s\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info' % FS_ID efs_client_auth_body = mount_efs.efs_client_auth_builder(tls_dict['public_key'], CREDENTIALS['AccessKeyId'], CREDENTIALS['SecretAccessKey'], current_time, REGION, FS_ID, CREDENTIALS['Token']) efs_client_info_body = mount_efs.efs_client_info_builder(CLIENT_INFO) matching_config_body = mount_efs.CA_CONFIG_BODY % (tls_dict['mount_dir'], tls_dict['private_key'], COMMON_NAME, ca_extension_body, efs_client_auth_body, efs_client_info_body) assert full_config_body == matching_config_body
efs-utils
positive
@bp.route('/audit/datasets-metadata') def datasets_metadata_page(): store = _model.STORE all_products = {p.name for p in store.index.products.get_all()} summarised_products = set(store.list_complete_products()) unsummarised_product_names = all_products - summarised_products extra = {} if 'timings' in flask.request.args: <DeepExtract> extra['product_timings_iter'] = sorted(list(product_timings()), key=lambda a: a.time_seconds or 0, reverse=True) </DeepExtract> return utils.render('audit-metadata-issues.html', products_all=all_products, products_summarised=summarised_products, products_missing=unsummarised_product_names, spatial_quality_stats=list(store.get_quality_stats()), **extra)
@bp.route('/audit/datasets-metadata') def datasets_metadata_page(): store = _model.STORE all_products = {p.name for p in store.index.products.get_all()} summarised_products = set(store.list_complete_products()) unsummarised_product_names = all_products - summarised_products extra = {} if 'timings' in flask.request.args: extra['product_timings_iter'] = sorted(list(product_timings()), key=lambda a: a.time_seconds or 0, reverse=True) return utils.render('audit-metadata-issues.html', products_all=all_products, products_summarised=summarised_products, products_missing=unsummarised_product_names, spatial_quality_stats=list(store.get_quality_stats()), **extra)
datacube-explorer
positive
def partition(self): V = self.sV.copy() <DeepExtract> R = filter(lambda v: len(v.e_in()) == 0, self.sV) </DeepExtract> for r in R: V.remove(r) parts = [] while len(R) > 0: v = R.pop(0) p = Poset([v]) l = v.N(+1) while len(l) > 0: x = l.pop(0) if x in p: continue if all([y in p for y in x.N(-1)]): p.add(x) if x in R: R.remove(x) else: V.remove(x) l.extend(x.N(+1)) elif x in V: V.remove(x) R.append(x) parts.append(list(p)) return parts
def partition(self): V = self.sV.copy() R = filter(lambda v: len(v.e_in()) == 0, self.sV) for r in R: V.remove(r) parts = [] while len(R) > 0: v = R.pop(0) p = Poset([v]) l = v.N(+1) while len(l) > 0: x = l.pop(0) if x in p: continue if all([y in p for y in x.N(-1)]): p.add(x) if x in R: R.remove(x) else: V.remove(x) l.extend(x.N(+1)) elif x in V: V.remove(x) R.append(x) parts.append(list(p)) return parts
CodeAtlasSublime
positive
def dalton_model(nstates=3, omin=-5, omax=5, sigma_min=0.5, sigma_max=2.0, lifetime_max=100, lifetime_min=10, reversible=True, output='gaussian'): """ Construct a test multistate model with regular spaced emission means (linearly interpolated between omin and omax) and variable emission widths (linearly interpolated between sigma_min and sigma_max). Parameters ---------- nstates : int, optional, default = 3 number of hidden states omin : float, optional, default = -5 mean position of the first state. omax : float, optional, default = 5 mean position of the last state. sigma_min : float, optional, default = 0.5 The width of the observed gaussian distribution for the first state sigma_max : float, optional, default = 2.0 The width of the observed gaussian distribution for the last state lifetime_max : float, optional, default = 100 maximum lifetime of any state lifetime_min : float, optional, default = 10 minimum lifetime of any state reversible : bool, optional, default=True If True, the row-stochastic transition matrix will be reversible. output_model_type : str, optional, default='gaussian' Output model to use, one of ['gaussian', 'discrete'] Returns ------- model : HMM The synthetic HMM model. Examples -------- Generate default model. >>> model = dalton_model() Generate model with specified number of states. >>> model = dalton_model(nstates=5) Generate non-reversible model. >>> model = dalton_model(reversible=False) Generate a discrete output model. >>> model = dalton_model(output='discrete') """ means = np.linspace(omin, omax, num=nstates) sigmas = np.linspace(sigma_min, sigma_max, num=nstates) if output == 'gaussian': output_model = GaussianOutputModel(nstates, means=means, sigmas=sigmas) elif output == 'discrete': B = np.zeros([nstates, nstates], dtype=np.float64) for i in range(nstates): for j in range(nstates): B[i, j] = np.exp(-0.5 * (means[i] - means[j]) / (sigmas[i] * sigmas[j])) B[i, :] /= B[i, :].sum() output_model = DiscreteOutputModel(B) else: raise Exception("output_model_type = '%s' unknown, must be one of ['gaussian', 'discrete']" % output) <DeepExtract> ltmax = math.log(lifetime_max) ltmin = math.log(lifetime_min) lt = np.linspace(ltmin, ltmax, num=nstates) diag = 1.0 - 1.0 / np.exp(lt) X = np.random.random((nstates, nstates)) if reversible: X += X.T T = X / np.sum(X, axis=1)[:, None] for i in range(nstates): T[i, i] = 0 T[i, :] *= (1.0 - diag[i]) / np.sum(T[i, :]) T[i, i] = 1.0 - np.sum(T[i, :]) Tij = T </DeepExtract> import msmtools.analysis as msmana Pi = msmana.stationary_distribution(Tij) from bhmm import HMM model = HMM(Pi, Tij, output_model) return model
def dalton_model(nstates=3, omin=-5, omax=5, sigma_min=0.5, sigma_max=2.0, lifetime_max=100, lifetime_min=10, reversible=True, output='gaussian'): """ Construct a test multistate model with regular spaced emission means (linearly interpolated between omin and omax) and variable emission widths (linearly interpolated between sigma_min and sigma_max). Parameters ---------- nstates : int, optional, default = 3 number of hidden states omin : float, optional, default = -5 mean position of the first state. omax : float, optional, default = 5 mean position of the last state. sigma_min : float, optional, default = 0.5 The width of the observed gaussian distribution for the first state sigma_max : float, optional, default = 2.0 The width of the observed gaussian distribution for the last state lifetime_max : float, optional, default = 100 maximum lifetime of any state lifetime_min : float, optional, default = 10 minimum lifetime of any state reversible : bool, optional, default=True If True, the row-stochastic transition matrix will be reversible. output_model_type : str, optional, default='gaussian' Output model to use, one of ['gaussian', 'discrete'] Returns ------- model : HMM The synthetic HMM model. Examples -------- Generate default model. >>> model = dalton_model() Generate model with specified number of states. >>> model = dalton_model(nstates=5) Generate non-reversible model. >>> model = dalton_model(reversible=False) Generate a discrete output model. >>> model = dalton_model(output='discrete') """ means = np.linspace(omin, omax, num=nstates) sigmas = np.linspace(sigma_min, sigma_max, num=nstates) if output == 'gaussian': output_model = GaussianOutputModel(nstates, means=means, sigmas=sigmas) elif output == 'discrete': B = np.zeros([nstates, nstates], dtype=np.float64) for i in range(nstates): for j in range(nstates): B[i, j] = np.exp(-0.5 * (means[i] - means[j]) / (sigmas[i] * sigmas[j])) B[i, :] /= B[i, :].sum() output_model = DiscreteOutputModel(B) else: raise Exception("output_model_type = '%s' unknown, must be one of ['gaussian', 'discrete']" % output) ltmax = math.log(lifetime_max) ltmin = math.log(lifetime_min) lt = np.linspace(ltmin, ltmax, num=nstates) diag = 1.0 - 1.0 / np.exp(lt) X = np.random.random((nstates, nstates)) if reversible: X += X.T T = X / np.sum(X, axis=1)[:, None] for i in range(nstates): T[i, i] = 0 T[i, :] *= (1.0 - diag[i]) / np.sum(T[i, :]) T[i, i] = 1.0 - np.sum(T[i, :]) Tij = T import msmtools.analysis as msmana Pi = msmana.stationary_distribution(Tij) from bhmm import HMM model = HMM(Pi, Tij, output_model) return model
bhmm
positive
def runSelectedValidators(self, validatorInstList, verifyMode, verbose=True, stopIfFoundBlock=False, publishLog=None, *args): """ Run the code for each active validator instance. verifyMode = True for verify = False for fix """ validationResultData = {} logText = '' if publishLog: logText = '\nPublisher' logText += '\nScene: ' + publishLog['Scene'] logText += '\nPublished: ' + publishLog['Published'] logText += '\nExported: ' + publishLog['ExportPath'] logText += '\nComment: ' + publishLog['Comment'] + '\n' if validatorInstList: progressAmount = 0 maxProcess = len(validatorInstList) cmds.progressWindow(title='dpValidator', progress=progressAmount, status='dpValidator: 0%', isInterruptable=False) for (v, validatorInst) in enumerate(validatorInstList): if validatorInst.active: progressAmount += 1 cmds.progressWindow(edit=True, maxValue=maxProcess, progress=progressAmount, status=validatorInst.guideModuleName + ': ' + repr(progressAmount)) validatorInst.verbose = False validationResultData[validatorInst.guideModuleName] = validatorInst.runValidator(verifyMode) validatorInst.verbose = True if stopIfFoundBlock: if True in validatorInst.foundIssueList: if False in validatorInst.resultOkList: return (validationResultData, True, v) if validationResultData: dataList = list(validationResultData.keys()) dataList.sort() for (i, dataItem) in enumerate(dataList): logText += validationResultData[dataItem]['logText'] if i != len(dataList) - 1: logText += '\n' heightSize = len(dataList) else: logText += '\n' + self.langDic[self.langName]['i207_notMarked'] heightSize = 2 thisTime = str(time.asctime(time.localtime(time.time()))) logText = thisTime + '\n' + logText if verbose: <DeepExtract> self.info_title = 'i019_log' self.info_description = 'v000_validator' self.info_text = logText self.info_winWidth = 250 self.info_winHeight = 150 + heightSize * 13 self.info_align = 'left' if cmds.window('dpInfoWindow', query=True, exists=True): cmds.deleteUI('dpInfoWindow', window=True) dpInfoWin = cmds.window('dpInfoWindow', title='dpAutoRig - v' + DPAR_VERSION_PY3 + ' - ' + self.langDic[self.langName]['i013_info'] + ' - ' + self.langDic[self.langName][self.info_title], iconName='dpInfo', widthHeight=(self.info_winWidth, self.info_winHeight), menuBar=False, sizeable=True, minimizeButton=False, maximizeButton=False) infoColumnLayout = cmds.columnLayout('infoColumnLayout', adjustableColumn=True, columnOffset=['both', 20], parent=dpInfoWin) cmds.separator(style='none', height=10, parent=infoColumnLayout) infoLayout = cmds.scrollLayout('infoLayout', parent=infoColumnLayout) if self.info_description: infoDesc = cmds.text(self.langDic[self.langName][self.info_description], align=self.info_align, parent=infoLayout) if self.info_text: infoText = cmds.text(self.info_text, align=self.info_align, parent=infoLayout) cmds.showWindow(dpInfoWin) </DeepExtract> print('\n-------------\n' + self.langDic[self.langName]['v000_validator'] + '\n' + logText) if publishLog: validationResultData['Publisher'] = publishLog if not dpUtils.exportLogDicToJson(validationResultData, subFolder=self.dpData + '/' + self.dpLog): print(self.langDic[self.langName]['i201_saveScene']) cmds.progressWindow(endProgress=True) return (validationResultData, False, 0)
def runSelectedValidators(self, validatorInstList, verifyMode, verbose=True, stopIfFoundBlock=False, publishLog=None, *args): """ Run the code for each active validator instance. verifyMode = True for verify = False for fix """ validationResultData = {} logText = '' if publishLog: logText = '\nPublisher' logText += '\nScene: ' + publishLog['Scene'] logText += '\nPublished: ' + publishLog['Published'] logText += '\nExported: ' + publishLog['ExportPath'] logText += '\nComment: ' + publishLog['Comment'] + '\n' if validatorInstList: progressAmount = 0 maxProcess = len(validatorInstList) cmds.progressWindow(title='dpValidator', progress=progressAmount, status='dpValidator: 0%', isInterruptable=False) for (v, validatorInst) in enumerate(validatorInstList): if validatorInst.active: progressAmount += 1 cmds.progressWindow(edit=True, maxValue=maxProcess, progress=progressAmount, status=validatorInst.guideModuleName + ': ' + repr(progressAmount)) validatorInst.verbose = False validationResultData[validatorInst.guideModuleName] = validatorInst.runValidator(verifyMode) validatorInst.verbose = True if stopIfFoundBlock: if True in validatorInst.foundIssueList: if False in validatorInst.resultOkList: return (validationResultData, True, v) if validationResultData: dataList = list(validationResultData.keys()) dataList.sort() for (i, dataItem) in enumerate(dataList): logText += validationResultData[dataItem]['logText'] if i != len(dataList) - 1: logText += '\n' heightSize = len(dataList) else: logText += '\n' + self.langDic[self.langName]['i207_notMarked'] heightSize = 2 thisTime = str(time.asctime(time.localtime(time.time()))) logText = thisTime + '\n' + logText if verbose: self.info_title = 'i019_log' self.info_description = 'v000_validator' self.info_text = logText self.info_winWidth = 250 self.info_winHeight = 150 + heightSize * 13 self.info_align = 'left' if cmds.window('dpInfoWindow', query=True, exists=True): cmds.deleteUI('dpInfoWindow', window=True) dpInfoWin = cmds.window('dpInfoWindow', title='dpAutoRig - v' + DPAR_VERSION_PY3 + ' - ' + self.langDic[self.langName]['i013_info'] + ' - ' + self.langDic[self.langName][self.info_title], iconName='dpInfo', widthHeight=(self.info_winWidth, self.info_winHeight), menuBar=False, sizeable=True, minimizeButton=False, maximizeButton=False) infoColumnLayout = cmds.columnLayout('infoColumnLayout', adjustableColumn=True, columnOffset=['both', 20], parent=dpInfoWin) cmds.separator(style='none', height=10, parent=infoColumnLayout) infoLayout = cmds.scrollLayout('infoLayout', parent=infoColumnLayout) if self.info_description: infoDesc = cmds.text(self.langDic[self.langName][self.info_description], align=self.info_align, parent=infoLayout) if self.info_text: infoText = cmds.text(self.info_text, align=self.info_align, parent=infoLayout) cmds.showWindow(dpInfoWin) print('\n-------------\n' + self.langDic[self.langName]['v000_validator'] + '\n' + logText) if publishLog: validationResultData['Publisher'] = publishLog if not dpUtils.exportLogDicToJson(validationResultData, subFolder=self.dpData + '/' + self.dpLog): print(self.langDic[self.langName]['i201_saveScene']) cmds.progressWindow(endProgress=True) return (validationResultData, False, 0)
dpAutoRigSystem
positive
def xy2TemperatureAndTint(x, y): """ Convert xy coordinates of a color point to Temperature and Tint. The conversion is based on the Robertson's method of interpolation in the uv space. Tint is a shift in the uv space. :param x: :type x: float :param y: :type y: float :return: Temperature and Tint :rtype: 2-uple of float """ <DeepExtract> d = 1.5 - x + 6.0 * y (u, v) = (2.0 * x / d, 3.0 * y / d) (u, v) = (u, v) </DeepExtract> (last_dt, last_dv, last_du) = (0.0, 0.0, 0.0) (temp, tint) = (0, 0) for index in range(31): (du, dv) = (1.0, uvtTable[index][3]) n = np.sqrt(1.0 + dv * dv) (du, dv) = (du / n, dv / n) (uu, vv) = (u - uvtTable[index][1], v - uvtTable[index][2]) dt = -uu * dv + vv * du if dt <= 0 or index == 30: if index == 0: raise ValueError('xy2TemperatureAndTint : Temp should not be infinity') if index == 30: if dt > 0: raise ValueError('xy2TemperatureAndTint : Temp should be >= 1667 K') dt = -dt w = dt / (last_dt + dt) temp = 10 ** 6 / (w * uvtTable[index - 1][0] + (1.0 - w) * uvtTable[index][0]) (du, dv) = (du * (1.0 - w) + last_du * w, dv * (1.0 - w) + last_dv * w) n = np.sqrt(du * du + dv * dv) (du, dv) = (du / n, dv / n) tint = u * du + v * dv break (last_dt, last_du, last_dv) = (dt, du, dv) return (temp, tint)
def xy2TemperatureAndTint(x, y): """ Convert xy coordinates of a color point to Temperature and Tint. The conversion is based on the Robertson's method of interpolation in the uv space. Tint is a shift in the uv space. :param x: :type x: float :param y: :type y: float :return: Temperature and Tint :rtype: 2-uple of float """ d = 1.5 - x + 6.0 * y (u, v) = (2.0 * x / d, 3.0 * y / d) (u, v) = (u, v) (last_dt, last_dv, last_du) = (0.0, 0.0, 0.0) (temp, tint) = (0, 0) for index in range(31): (du, dv) = (1.0, uvtTable[index][3]) n = np.sqrt(1.0 + dv * dv) (du, dv) = (du / n, dv / n) (uu, vv) = (u - uvtTable[index][1], v - uvtTable[index][2]) dt = -uu * dv + vv * du if dt <= 0 or index == 30: if index == 0: raise ValueError('xy2TemperatureAndTint : Temp should not be infinity') if index == 30: if dt > 0: raise ValueError('xy2TemperatureAndTint : Temp should be >= 1667 K') dt = -dt w = dt / (last_dt + dt) temp = 10 ** 6 / (w * uvtTable[index - 1][0] + (1.0 - w) * uvtTable[index][0]) (du, dv) = (du * (1.0 - w) + last_du * w, dv * (1.0 - w) + last_dv * w) n = np.sqrt(du * du + dv * dv) (du, dv) = (du / n, dv / n) tint = u * du + v * dv break (last_dt, last_du, last_dv) = (dt, du, dv) return (temp, tint)
bLUe_PYSIDE2
positive
def rename_variation(self): new = clean_name(self.input_text.value) self.db.set_variation(new, self.select_physician.value, self.select_physician_roi.value, self.select_variation.value) <DeepExtract> self.select_variation.options = self.db.get_variations(self.select_physician.value, self.select_physician_roi.value) self.select_variation.value = self.select_variation.options[0] </DeepExtract> self.select_variation.value = new
def rename_variation(self): new = clean_name(self.input_text.value) self.db.set_variation(new, self.select_physician.value, self.select_physician_roi.value, self.select_variation.value) self.select_variation.options = self.db.get_variations(self.select_physician.value, self.select_physician_roi.value) self.select_variation.value = self.select_variation.options[0] self.select_variation.value = new
DVH-Analytics-Bokeh
positive
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Uninstall a library on a cluster.') @click.option('--cluster-id', required=True, type=ClusterIdClickType(), help=ClusterIdClickType.help) @click.option('--all', is_flag=True, cls=OneOfOption, one_of=UNINSTALL_OPTIONS, default=False, help='If set, uninstall all libraries.') @click.option('--jar', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=JAR_HELP) @click.option('--egg', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=EGG_HELP) @click.option('--whl', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=WHEEL_HELP) @click.option('--maven-coordinates', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=MAVEN_COORDINATES_HELP) @click.option('--maven-repo', help=MAVEN_REPO_HELP) @click.option('--maven-exclusion', multiple=True, help=MAVEN_EXCLUSION_HELP) @click.option('--pypi-package', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=PYPI_PACKAGE_HELP) @click.option('--pypi-repo', help=PYPI_REPO_HELP) @click.option('--cran-package', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=CRAN_PACKAGE_HELP) @click.option('--cran-repo', help=CRAN_REPO_HELP) @debug_option @profile_option @eat_exceptions @provide_api_client def uninstall_cli(api_client, cluster_id, all, jar, egg, whl, maven_coordinates, maven_repo, maven_exclusion, pypi_package, pypi_repo, cran_package, cran_repo): """ Mark libraries on a cluster to be uninstalled. Libraries which are marked to be uninstalled will stay attached until the cluster is restarted. (see `databricks clusters restart -h`). """ if all: libraries_api = LibrariesApi(api_client) library_statuses = libraries_api.cluster_status(cluster_id).get('library_statuses', []) libraries = [l_status['library'] for l_status in library_statuses] if len(libraries) == 0: return libraries_api.uninstall_libraries(cluster_id, libraries) <DeepExtract> click.echo(click.style('WARNING: Uninstalling libraries requires a cluster restart.', fg='red')) click.echo('databricks clusters restart --cluster-id {}'.format(cluster_id)) </DeepExtract> return <DeepExtract> maven_exclusion = list(maven_exclusion) if jar is not None: library = {'jar': jar} elif egg is not None: library = {'egg': egg} elif whl is not None: library = {'whl': whl} elif maven_coordinates is not None: maven_library = {'maven': {'coordinates': maven_coordinates}} if maven_repo is not None: maven_library['maven']['repo'] = maven_repo if len(maven_exclusion) > 0: maven_library['maven']['exclusions'] = maven_exclusion library = maven_library elif pypi_package is not None: pypi_library = {'pypi': {'package': pypi_package}} if pypi_repo is not None: pypi_library['pypi']['repo'] = pypi_repo library = pypi_library elif cran_package is not None: cran_library = {'cran': {'package': cran_package}} if cran_repo is not None: cran_library['cran']['repo'] = cran_repo library = cran_library raise AssertionError('Code not reached.') </DeepExtract> LibrariesApi(api_client).uninstall_libraries(cluster_id, [library]) <DeepExtract> click.echo(click.style('WARNING: Uninstalling libraries requires a cluster restart.', fg='red')) click.echo('databricks clusters restart --cluster-id {}'.format(cluster_id)) </DeepExtract>
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Uninstall a library on a cluster.') @click.option('--cluster-id', required=True, type=ClusterIdClickType(), help=ClusterIdClickType.help) @click.option('--all', is_flag=True, cls=OneOfOption, one_of=UNINSTALL_OPTIONS, default=False, help='If set, uninstall all libraries.') @click.option('--jar', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=JAR_HELP) @click.option('--egg', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=EGG_HELP) @click.option('--whl', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=WHEEL_HELP) @click.option('--maven-coordinates', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=MAVEN_COORDINATES_HELP) @click.option('--maven-repo', help=MAVEN_REPO_HELP) @click.option('--maven-exclusion', multiple=True, help=MAVEN_EXCLUSION_HELP) @click.option('--pypi-package', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=PYPI_PACKAGE_HELP) @click.option('--pypi-repo', help=PYPI_REPO_HELP) @click.option('--cran-package', cls=OneOfOption, one_of=UNINSTALL_OPTIONS, help=CRAN_PACKAGE_HELP) @click.option('--cran-repo', help=CRAN_REPO_HELP) @debug_option @profile_option @eat_exceptions @provide_api_client def uninstall_cli(api_client, cluster_id, all, jar, egg, whl, maven_coordinates, maven_repo, maven_exclusion, pypi_package, pypi_repo, cran_package, cran_repo): """ Mark libraries on a cluster to be uninstalled. Libraries which are marked to be uninstalled will stay attached until the cluster is restarted. (see `databricks clusters restart -h`). """ if all: libraries_api = LibrariesApi(api_client) library_statuses = libraries_api.cluster_status(cluster_id).get('library_statuses', []) libraries = [l_status['library'] for l_status in library_statuses] if len(libraries) == 0: return libraries_api.uninstall_libraries(cluster_id, libraries) click.echo(click.style('WARNING: Uninstalling libraries requires a cluster restart.', fg='red')) click.echo('databricks clusters restart --cluster-id {}'.format(cluster_id)) return maven_exclusion = list(maven_exclusion) if jar is not None: library = {'jar': jar} elif egg is not None: library = {'egg': egg} elif whl is not None: library = {'whl': whl} elif maven_coordinates is not None: maven_library = {'maven': {'coordinates': maven_coordinates}} if maven_repo is not None: maven_library['maven']['repo'] = maven_repo if len(maven_exclusion) > 0: maven_library['maven']['exclusions'] = maven_exclusion library = maven_library elif pypi_package is not None: pypi_library = {'pypi': {'package': pypi_package}} if pypi_repo is not None: pypi_library['pypi']['repo'] = pypi_repo library = pypi_library elif cran_package is not None: cran_library = {'cran': {'package': cran_package}} if cran_repo is not None: cran_library['cran']['repo'] = cran_repo library = cran_library raise AssertionError('Code not reached.') LibrariesApi(api_client).uninstall_libraries(cluster_id, [library]) click.echo(click.style('WARNING: Uninstalling libraries requires a cluster restart.', fg='red')) click.echo('databricks clusters restart --cluster-id {}'.format(cluster_id)) </DeepExtract>
databricks-cli
positive
def SmallParsimony(l): def is_ripe(v): for child in Adj[v]: if not Tag[child]: return False return True def find_ripe(Nodes): Ripe = [] Unripe = [] for v in Nodes: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) return (Ripe, Unripe) def delta(i, j): return 0 if i == j else 1 def get_distance(v, k): def best_alignment(child): return min([s[child][i] + delta(i, k) for i in range(len(Alphabet))]) return sum([best_alignment(child) for child in Adj[v]]) def backtrack(root, s): def dfs(node, k, parent_score): def match(i, j, child_scores): return parent_score == child_scores[0][i] + child_scores[1][j] if len(Adj[node]) == 0: return children = Adj[node] child_scores_delta = [[s[child][i] + delta(i, k) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): <DeepExtract> def match(i, j, child_scores): return scores_children[i] == child_scores[0][i] + child_scores[1][j] if len(Adj[children[i]]) == 0: return children = Adj[children[i]] child_scores_delta = [[s[child][i] + delta(i, ks[children[i]]) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): dfs(children[i], ks[children[i]], scores_children[i]) </DeepExtract> ks = {} index = np.argmin(s[root]) score = s[root][index] ks[root] = index <DeepExtract> def match(i, j, child_scores): return score == child_scores[0][i] + child_scores[1][j] if len(Adj[root]) == 0: return children = Adj[root] child_scores_delta = [[s[child][i] + delta(i, index) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): dfs(children[i], ks[children[i]], scores_children[i]) </DeepExtract> return (score, ks) s = {} Tag = {} ToBeProcessed = [] for v in Adj.keys(): if v in Leaves: char = Leaves[v][l] s[v] = [0 if Alphabet[k] == char else float('inf') for k in range(len(Alphabet))] Tag[v] = True else: Tag[v] = False ToBeProcessed.append(v) <DeepExtract> Ripe = [] Unripe = [] for v in ToBeProcessed: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) (Ripe, ToBeProcessed) = (Ripe, Unripe) </DeepExtract> while len(Ripe) > 0: for v in Ripe: s[v] = [get_distance(v, k) for k in range(len(Alphabet))] Tag[v] = True <DeepExtract> Ripe = [] Unripe = [] for v in ToBeProcessed: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) (Ripe, ToBeProcessed) = (Ripe, Unripe) </DeepExtract> assert len(ToBeProcessed) == 0, 'If there are no ripe nodes, ToBeProcessed should be exhausted' return backtrack(v, s)
def SmallParsimony(l): def is_ripe(v): for child in Adj[v]: if not Tag[child]: return False return True def find_ripe(Nodes): Ripe = [] Unripe = [] for v in Nodes: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) return (Ripe, Unripe) def delta(i, j): return 0 if i == j else 1 def get_distance(v, k): def best_alignment(child): return min([s[child][i] + delta(i, k) for i in range(len(Alphabet))]) return sum([best_alignment(child) for child in Adj[v]]) def backtrack(root, s): def dfs(node, k, parent_score): def match(i, j, child_scores): return parent_score == child_scores[0][i] + child_scores[1][j] if len(Adj[node]) == 0: return children = Adj[node] child_scores_delta = [[s[child][i] + delta(i, k) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): def match(i, j, child_scores): return scores_children[i] == child_scores[0][i] + child_scores[1][j] if len(Adj[children[i]]) == 0: return children = Adj[children[i]] child_scores_delta = [[s[child][i] + delta(i, ks[children[i]]) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): dfs(children[i], ks[children[i]], scores_children[i]) ks = {} index = np.argmin(s[root]) score = s[root][index] ks[root] = index def match(i, j, child_scores): return score == child_scores[0][i] + child_scores[1][j] if len(Adj[root]) == 0: return children = Adj[root] child_scores_delta = [[s[child][i] + delta(i, index) for i in range(len(Alphabet))] for child in children] child_scores_raw = [[s[child][i] for i in range(len(Alphabet))] for child in children] candidates = [(i, j, child_scores_raw) for i in range(len(Alphabet)) for j in range(len(Alphabet)) if match(i, j, child_scores_delta)] selection = candidates[randrange(len(candidates))] scores_children = [selection[2][i][selection[i]] for i in range(len(children))] for i in range(len(children)): ks[children[i]] = selection[i] for i in range(len(children)): dfs(children[i], ks[children[i]], scores_children[i]) return (score, ks) s = {} Tag = {} ToBeProcessed = [] for v in Adj.keys(): if v in Leaves: char = Leaves[v][l] s[v] = [0 if Alphabet[k] == char else float('inf') for k in range(len(Alphabet))] Tag[v] = True else: Tag[v] = False ToBeProcessed.append(v) Ripe = [] Unripe = [] for v in ToBeProcessed: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) (Ripe, ToBeProcessed) = (Ripe, Unripe) while len(Ripe) > 0: for v in Ripe: s[v] = [get_distance(v, k) for k in range(len(Alphabet))] Tag[v] = True Ripe = [] Unripe = [] for v in ToBeProcessed: if is_ripe(v): Ripe.append(v) else: Unripe.append(v) (Ripe, ToBeProcessed) = (Ripe, Unripe) assert len(ToBeProcessed) == 0, 'If there are no ripe nodes, ToBeProcessed should be exhausted' return backtrack(v, s)
bioinformatics
positive
def write(self): """Adds resource to API gateway""" paths_with_api = self._get_resource_meta_paths(self.api_gateway_name, API_GATEWAY_TYPE) if not paths_with_api: message = f"Api gateway '{self.api_gateway_name}' was not found" _LOG.error(message) raise ValueError(message) USER_LOG.info(f"Adding resource '{self.resource_path}' to api '{self.api_gateway_name}'...") <DeepExtract> for path in paths_with_api: deployment_resources = json.loads(_read_content_from_file(path)) if self.resource_path in deployment_resources[self.api_gateway_name]['resources']: _LOG.info(f"Found resource '{self.resource_path}' in api '{self.api_gateway_name}' in file '{path}'") path_with_resources = (path, deployment_resources) </DeepExtract> if not path_with_resources: path_with_api = paths_with_api[0] deployment_resources = json.loads(_read_content_from_file(path_with_api)) else: (path_with_api, deployment_resources) = path_with_resources message = f"Resource '{self.resource_path}' was found in api gateway '{self.api_gateway_name}' in file '{path_with_api}'" _LOG.warning(f'Found duplicate while generating meta. {message}') if not click.confirm(f'{message} Overwrite?'): USER_LOG.warning(f"Skipping resource '{self.resource_path}'") raise RuntimeError USER_LOG.info(f"Adding resource '{self.resource_path}' to api '{self.api_gateway_name}'...") <DeepExtract> if self._dict.get('integration_type') == 'lambda': self.validate_integration_lambda_existence() elif 'lamdba_name' in self._dict: self._dict.pop('lambda_name') deployment_resources[self.api_gateway_name]['resources'][self.resource_path] = super()._resolve_configuration() </DeepExtract> _write_content_to_file(path_with_api, json.dumps(deployment_resources, indent=2))
def write(self): """Adds resource to API gateway""" paths_with_api = self._get_resource_meta_paths(self.api_gateway_name, API_GATEWAY_TYPE) if not paths_with_api: message = f"Api gateway '{self.api_gateway_name}' was not found" _LOG.error(message) raise ValueError(message) USER_LOG.info(f"Adding resource '{self.resource_path}' to api '{self.api_gateway_name}'...") for path in paths_with_api: deployment_resources = json.loads(_read_content_from_file(path)) if self.resource_path in deployment_resources[self.api_gateway_name]['resources']: _LOG.info(f"Found resource '{self.resource_path}' in api '{self.api_gateway_name}' in file '{path}'") path_with_resources = (path, deployment_resources) if not path_with_resources: path_with_api = paths_with_api[0] deployment_resources = json.loads(_read_content_from_file(path_with_api)) else: (path_with_api, deployment_resources) = path_with_resources message = f"Resource '{self.resource_path}' was found in api gateway '{self.api_gateway_name}' in file '{path_with_api}'" _LOG.warning(f'Found duplicate while generating meta. {message}') if not click.confirm(f'{message} Overwrite?'): USER_LOG.warning(f"Skipping resource '{self.resource_path}'") raise RuntimeError USER_LOG.info(f"Adding resource '{self.resource_path}' to api '{self.api_gateway_name}'...") if self._dict.get('integration_type') == 'lambda': self.validate_integration_lambda_existence() elif 'lamdba_name' in self._dict: self._dict.pop('lambda_name') deployment_resources[self.api_gateway_name]['resources'][self.resource_path] = super()._resolve_configuration() _write_content_to_file(path_with_api, json.dumps(deployment_resources, indent=2))
aws-syndicate
positive
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for (i, label) in enumerate(label_list)} premise_2_tokenzed = {} hypothesis_2_tokenzed = {} list_2_tokenizedID = {} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(examples))) tokens_a = premise_2_tokenzed.get(example.text_a) if tokens_a is None: tokens_a = tokenizer.tokenize(example.text_a) premise_2_tokenzed[example.text_a] = tokens_a tokens_b = premise_2_tokenzed.get(example.text_b) if tokens_b is None: tokens_b = tokenizer.tokenize(example.text_b) hypothesis_2_tokenzed[example.text_b] = tokens_b <DeepExtract> while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() </DeepExtract> tokens_A = ['[CLS]'] + tokens_a + ['[SEP]'] segment_ids_A = [0] * len(tokens_A) tokens_B = tokens_b + ['[SEP]'] segment_ids_B = [1] * (len(tokens_b) + 1) tokens = tokens_A + tokens_B segment_ids = segment_ids_A + segment_ids_B input_ids_A = list_2_tokenizedID.get(' '.join(tokens_A)) if input_ids_A is None: input_ids_A = tokenizer.convert_tokens_to_ids(tokens_A) list_2_tokenizedID[' '.join(tokens_A)] = input_ids_A input_ids_B = list_2_tokenizedID.get(' '.join(tokens_B)) if input_ids_B is None: input_ids_B = tokenizer.convert_tokens_to_ids(tokens_B) list_2_tokenizedID[' '.join(tokens_B)] = input_ids_B input_ids = input_ids_A + input_ids_B input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == 'classification': label_id = label_map[example.label] elif output_mode == 'regression': label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info('*** Example ***') logger.info('guid: %s' % example.guid) logger.info('tokens: %s' % ' '.join([str(x) for x in tokens])) logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) logger.info('label: %s (id = %d)' % (example.label, label_id)) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for (i, label) in enumerate(label_list)} premise_2_tokenzed = {} hypothesis_2_tokenzed = {} list_2_tokenizedID = {} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info('Writing example %d of %d' % (ex_index, len(examples))) tokens_a = premise_2_tokenzed.get(example.text_a) if tokens_a is None: tokens_a = tokenizer.tokenize(example.text_a) premise_2_tokenzed[example.text_a] = tokens_a tokens_b = premise_2_tokenzed.get(example.text_b) if tokens_b is None: tokens_b = tokenizer.tokenize(example.text_b) hypothesis_2_tokenzed[example.text_b] = tokens_b while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_seq_length - 3: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() tokens_A = ['[CLS]'] + tokens_a + ['[SEP]'] segment_ids_A = [0] * len(tokens_A) tokens_B = tokens_b + ['[SEP]'] segment_ids_B = [1] * (len(tokens_b) + 1) tokens = tokens_A + tokens_B segment_ids = segment_ids_A + segment_ids_B input_ids_A = list_2_tokenizedID.get(' '.join(tokens_A)) if input_ids_A is None: input_ids_A = tokenizer.convert_tokens_to_ids(tokens_A) list_2_tokenizedID[' '.join(tokens_A)] = input_ids_A input_ids_B = list_2_tokenizedID.get(' '.join(tokens_B)) if input_ids_B is None: input_ids_B = tokenizer.convert_tokens_to_ids(tokens_B) list_2_tokenizedID[' '.join(tokens_B)] = input_ids_B input_ids = input_ids_A + input_ids_B input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == 'classification': label_id = label_map[example.label] elif output_mode == 'regression': label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 5: logger.info('*** Example ***') logger.info('guid: %s' % example.guid) logger.info('tokens: %s' % ' '.join([str(x) for x in tokens])) logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) logger.info('label: %s (id = %d)' % (example.label, label_id)) features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features
BenchmarkingZeroShot
positive
@property def tar(self): if self._tar is None: <DeepExtract> with tarfile.open(mode='w', fileobj=file) as tar: for pfile in self.pfiles: tar.add(pfile.fullpath, arcname=pfile.relativepath) file.seek(0) self._tar = file return file </DeepExtract> return self._tar
@property def tar(self): if self._tar is None: with tarfile.open(mode='w', fileobj=file) as tar: for pfile in self.pfiles: tar.add(pfile.fullpath, arcname=pfile.relativepath) file.seek(0) self._tar = file return file return self._tar
anaconda-client
positive
def add(self, meta): """ Add a resource to this pool. The resource is loaded and returned when ``load_pool()`` is called. :param meta: The resource description """ <DeepExtract> if inspect.isclass(type(meta)): if issubclass(meta.__class__, ResourceDescription): return raise ValueError('Resource loader got type {}, not a resource description'.format(type(meta))) </DeepExtract> <DeepExtract> meta.loader_cls = self.get_loader(meta, raise_on_error=True) </DeepExtract> self._resources.append(meta)
def add(self, meta): """ Add a resource to this pool. The resource is loaded and returned when ``load_pool()`` is called. :param meta: The resource description """ if inspect.isclass(type(meta)): if issubclass(meta.__class__, ResourceDescription): return raise ValueError('Resource loader got type {}, not a resource description'.format(type(meta))) meta.loader_cls = self.get_loader(meta, raise_on_error=True) self._resources.append(meta)
demosys-py
positive
def __iter__(self): self.loader = iter(self.original_loader) <DeepExtract> try: self.container = next(self.loader) self.stop_iteration = False except StopIteration: if self.infinite_loader: self.loader = iter(self.original_loader) return self._preload() self.stop_iteration = True return with torch.cuda.stream(self.stream): for (key, item) in self.container.items(): self.container[key] = item.cuda(non_blocking=True).float() self.container['img'] = self.container['img'] * 2 / 255 - 1 self.container['condition'] = self.container['img'] * self.container['mask'] </DeepExtract> return self
def __iter__(self): self.loader = iter(self.original_loader) try: self.container = next(self.loader) self.stop_iteration = False except StopIteration: if self.infinite_loader: self.loader = iter(self.original_loader) return self._preload() self.stop_iteration = True return with torch.cuda.stream(self.stream): for (key, item) in self.container.items(): self.container[key] = item.cuda(non_blocking=True).float() self.container['img'] = self.container['img'] * 2 / 255 - 1 self.container['condition'] = self.container['img'] * self.container['mask'] return self
DeepPrivacy
positive
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) def quadrilateral_intersection(pts1, pts2, int_pts): num_of_inter = 0 for i in range(4): if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): int_pts[num_of_inter * 2] = pts1[2 * i] int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] num_of_inter += 1 if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): int_pts[num_of_inter * 2] = pts2[2 * i] int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] num_of_inter += 1 temp_pts = cuda.local.array((2,), dtype=numba.float32) for i in range(4): for j in range(4): <DeepExtract> A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[2 * i] A[1] = pts1[2 * i + 1] B[0] = pts1[2 * ((i + 1) % 4)] B[1] = pts1[2 * ((i + 1) % 4) + 1] C[0] = pts2[2 * j] C[1] = pts2[2 * j + 1] D[0] = pts2[2 * ((j + 1) % 4)] D[1] = pts2[2 * ((j + 1) % 4) + 1] BA0 = B[0] - A[0] BA1 = B[1] - A[1] DA0 = D[0] - A[0] CA0 = C[0] - A[0] DA1 = D[1] - A[1] CA1 = C[1] - A[1] acd = DA1 * CA0 > CA1 * DA0 bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = CA1 * BA0 > BA1 * CA0 abd = DA1 * BA0 > BA1 * DA0 if abc != abd: DC0 = D[0] - C[0] DC1 = D[1] - C[1] ABBA = A[0] * B[1] - B[0] * A[1] CDDC = C[0] * D[1] - D[0] * C[1] DH = BA1 * DC0 - BA0 * DC1 Dx = ABBA * DC0 - BA0 * CDDC Dy = ABBA * DC1 - BA1 * CDDC temp_pts[0] = Dx / DH temp_pts[1] = Dy / DH has_pts = True has_pts = False </DeepExtract> if has_pts: int_pts[num_of_inter * 2] = temp_pts[0] int_pts[num_of_inter * 2 + 1] = temp_pts[1] num_of_inter += 1 return num_of_inter
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) def quadrilateral_intersection(pts1, pts2, int_pts): num_of_inter = 0 for i in range(4): if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): int_pts[num_of_inter * 2] = pts1[2 * i] int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] num_of_inter += 1 if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): int_pts[num_of_inter * 2] = pts2[2 * i] int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] num_of_inter += 1 temp_pts = cuda.local.array((2,), dtype=numba.float32) for i in range(4): for j in range(4): A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[2 * i] A[1] = pts1[2 * i + 1] B[0] = pts1[2 * ((i + 1) % 4)] B[1] = pts1[2 * ((i + 1) % 4) + 1] C[0] = pts2[2 * j] C[1] = pts2[2 * j + 1] D[0] = pts2[2 * ((j + 1) % 4)] D[1] = pts2[2 * ((j + 1) % 4) + 1] BA0 = B[0] - A[0] BA1 = B[1] - A[1] DA0 = D[0] - A[0] CA0 = C[0] - A[0] DA1 = D[1] - A[1] CA1 = C[1] - A[1] acd = DA1 * CA0 > CA1 * DA0 bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) if acd != bcd: abc = CA1 * BA0 > BA1 * CA0 abd = DA1 * BA0 > BA1 * DA0 if abc != abd: DC0 = D[0] - C[0] DC1 = D[1] - C[1] ABBA = A[0] * B[1] - B[0] * A[1] CDDC = C[0] * D[1] - D[0] * C[1] DH = BA1 * DC0 - BA0 * DC1 Dx = ABBA * DC0 - BA0 * CDDC Dy = ABBA * DC1 - BA1 * CDDC temp_pts[0] = Dx / DH temp_pts[1] = Dy / DH has_pts = True has_pts = False if has_pts: int_pts[num_of_inter * 2] = temp_pts[0] int_pts[num_of_inter * 2 + 1] = temp_pts[1] num_of_inter += 1 return num_of_inter
ebms_3dod
positive
@login_required def interruption_spent_time(request, board_id=None): <DeepExtract> if board_id is None: board = None board = get_user_boards(request.user).get(id=board_id) board = board </DeepExtract> return interruptions.interruption_spent_time(request.user, board)
@login_required def interruption_spent_time(request, board_id=None): if board_id is None: board = None board = get_user_boards(request.user).get(id=board_id) board = board return interruptions.interruption_spent_time(request.user, board)
djanban
positive
def get_viewport_profile(request_envelope): """Utility method, to get viewport profile. The viewport profile is calculated using the shape, current pixel width and height, along with the dpi. If there is no `viewport` value in `request_envelope.context`, then an `ViewportProfile.UNKNOWN_VIEWPORT_PROFILE` is returned. :param request_envelope: The alexa request envelope object :type request_envelope: ask_sdk_model.request_envelope.RequestEnvelope :return: Calculated Viewport Profile enum :rtype: ViewportProfile """ viewport_state = request_envelope.context.viewport if viewport_state: shape = viewport_state.shape current_pixel_width = int(viewport_state.current_pixel_width) current_pixel_height = int(viewport_state.current_pixel_height) dpi = int(viewport_state.dpi) <DeepExtract> if current_pixel_width > current_pixel_height: orientation = Orientation.LANDSCAPE elif current_pixel_width < current_pixel_height: orientation = Orientation.PORTRAIT else: orientation = Orientation.EQUAL </DeepExtract> <DeepExtract> if dpi in range(0, 121): dpi_group = Density.XLOW elif dpi in range(121, 161): dpi_group = Density.LOW elif dpi in range(161, 241): dpi_group = Density.MEDIUM elif dpi in range(241, 321): dpi_group = Density.HIGH elif dpi in range(321, 481): dpi_group = Density.XHIGH elif dpi >= 481: dpi_group = Density.XXHIGH raise AskSdkException('Unknown dpi group value: {}'.format(dpi)) </DeepExtract> <DeepExtract> if current_pixel_width in range(0, 600): pixel_width_size_group = Size.XSMALL elif current_pixel_width in range(600, 960): pixel_width_size_group = Size.SMALL elif current_pixel_width in range(960, 1280): pixel_width_size_group = Size.MEDIUM elif current_pixel_width in range(1280, 1920): pixel_width_size_group = Size.LARGE elif current_pixel_width >= 1920: pixel_width_size_group = Size.XLARGE raise AskSdkException('Unknown size group value: {}'.format(current_pixel_width)) </DeepExtract> <DeepExtract> if current_pixel_height in range(0, 600): pixel_height_size_group = Size.XSMALL elif current_pixel_height in range(600, 960): pixel_height_size_group = Size.SMALL elif current_pixel_height in range(960, 1280): pixel_height_size_group = Size.MEDIUM elif current_pixel_height in range(1280, 1920): pixel_height_size_group = Size.LARGE elif current_pixel_height >= 1920: pixel_height_size_group = Size.XLARGE raise AskSdkException('Unknown size group value: {}'.format(current_pixel_height)) </DeepExtract> if shape is Shape.ROUND and orientation is Orientation.EQUAL and (dpi_group is Density.LOW) and (pixel_width_size_group is Size.XSMALL) and (pixel_height_size_group is Size.XSMALL): return ViewportProfile.HUB_ROUND_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group <= Size.MEDIUM) and (pixel_height_size_group <= Size.XSMALL): return ViewportProfile.HUB_LANDSCAPE_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group <= Size.MEDIUM) and (pixel_height_size_group <= Size.SMALL): return ViewportProfile.HUB_LANDSCAPE_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group >= Size.LARGE) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.HUB_LANDSCAPE_LARGE elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.MEDIUM) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.MOBILE_LANDSCAPE_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.SMALL) and (pixel_height_size_group >= Size.MEDIUM): return ViewportProfile.MOBILE_PORTRAIT_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.SMALL) and (pixel_height_size_group >= Size.XSMALL): return ViewportProfile.MOBILE_LANDSCAPE_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.XSMALL) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.MOBILE_PORTRAIT_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group >= Density.HIGH) and (pixel_width_size_group >= Size.XLARGE) and (pixel_height_size_group >= Size.MEDIUM): return ViewportProfile.TV_LANDSCAPE_XLARGE elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group >= Density.HIGH) and (pixel_width_size_group is Size.XSMALL) and (pixel_height_size_group is Size.XLARGE): return ViewportProfile.TV_PORTRAIT_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group >= Density.HIGH) and (pixel_width_size_group is Size.MEDIUM) and (pixel_height_size_group is Size.SMALL): return ViewportProfile.TV_LANDSCAPE_MEDIUM return ViewportProfile.UNKNOWN_VIEWPORT_PROFILE
def get_viewport_profile(request_envelope): """Utility method, to get viewport profile. The viewport profile is calculated using the shape, current pixel width and height, along with the dpi. If there is no `viewport` value in `request_envelope.context`, then an `ViewportProfile.UNKNOWN_VIEWPORT_PROFILE` is returned. :param request_envelope: The alexa request envelope object :type request_envelope: ask_sdk_model.request_envelope.RequestEnvelope :return: Calculated Viewport Profile enum :rtype: ViewportProfile """ viewport_state = request_envelope.context.viewport if viewport_state: shape = viewport_state.shape current_pixel_width = int(viewport_state.current_pixel_width) current_pixel_height = int(viewport_state.current_pixel_height) dpi = int(viewport_state.dpi) if current_pixel_width > current_pixel_height: orientation = Orientation.LANDSCAPE elif current_pixel_width < current_pixel_height: orientation = Orientation.PORTRAIT else: orientation = Orientation.EQUAL if dpi in range(0, 121): dpi_group = Density.XLOW elif dpi in range(121, 161): dpi_group = Density.LOW elif dpi in range(161, 241): dpi_group = Density.MEDIUM elif dpi in range(241, 321): dpi_group = Density.HIGH elif dpi in range(321, 481): dpi_group = Density.XHIGH elif dpi >= 481: dpi_group = Density.XXHIGH raise AskSdkException('Unknown dpi group value: {}'.format(dpi)) if current_pixel_width in range(0, 600): pixel_width_size_group = Size.XSMALL elif current_pixel_width in range(600, 960): pixel_width_size_group = Size.SMALL elif current_pixel_width in range(960, 1280): pixel_width_size_group = Size.MEDIUM elif current_pixel_width in range(1280, 1920): pixel_width_size_group = Size.LARGE elif current_pixel_width >= 1920: pixel_width_size_group = Size.XLARGE raise AskSdkException('Unknown size group value: {}'.format(current_pixel_width)) if current_pixel_height in range(0, 600): pixel_height_size_group = Size.XSMALL elif current_pixel_height in range(600, 960): pixel_height_size_group = Size.SMALL elif current_pixel_height in range(960, 1280): pixel_height_size_group = Size.MEDIUM elif current_pixel_height in range(1280, 1920): pixel_height_size_group = Size.LARGE elif current_pixel_height >= 1920: pixel_height_size_group = Size.XLARGE raise AskSdkException('Unknown size group value: {}'.format(current_pixel_height)) if shape is Shape.ROUND and orientation is Orientation.EQUAL and (dpi_group is Density.LOW) and (pixel_width_size_group is Size.XSMALL) and (pixel_height_size_group is Size.XSMALL): return ViewportProfile.HUB_ROUND_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group <= Size.MEDIUM) and (pixel_height_size_group <= Size.XSMALL): return ViewportProfile.HUB_LANDSCAPE_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group <= Size.MEDIUM) and (pixel_height_size_group <= Size.SMALL): return ViewportProfile.HUB_LANDSCAPE_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.LOW) and (pixel_width_size_group >= Size.LARGE) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.HUB_LANDSCAPE_LARGE elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.MEDIUM) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.MOBILE_LANDSCAPE_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.SMALL) and (pixel_height_size_group >= Size.MEDIUM): return ViewportProfile.MOBILE_PORTRAIT_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.SMALL) and (pixel_height_size_group >= Size.XSMALL): return ViewportProfile.MOBILE_LANDSCAPE_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group is Density.MEDIUM) and (pixel_width_size_group >= Size.XSMALL) and (pixel_height_size_group >= Size.SMALL): return ViewportProfile.MOBILE_PORTRAIT_SMALL elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group >= Density.HIGH) and (pixel_width_size_group >= Size.XLARGE) and (pixel_height_size_group >= Size.MEDIUM): return ViewportProfile.TV_LANDSCAPE_XLARGE elif shape is Shape.RECTANGLE and orientation is Orientation.PORTRAIT and (dpi_group >= Density.HIGH) and (pixel_width_size_group is Size.XSMALL) and (pixel_height_size_group is Size.XLARGE): return ViewportProfile.TV_PORTRAIT_MEDIUM elif shape is Shape.RECTANGLE and orientation is Orientation.LANDSCAPE and (dpi_group >= Density.HIGH) and (pixel_width_size_group is Size.MEDIUM) and (pixel_height_size_group is Size.SMALL): return ViewportProfile.TV_LANDSCAPE_MEDIUM return ViewportProfile.UNKNOWN_VIEWPORT_PROFILE
alexa-skills-kit-sdk-for-python
positive
def resample(self, image, flow, normalize=True): (b, c, h, w) = image.size() if not hasattr(self, 'grid') or self.grid.size() != flow.size(): <DeepExtract> hor = torch.linspace(-1.0, 1.0, w) hor.requires_grad = False hor = hor.view(1, 1, 1, w) hor = hor.expand(b, 1, h, w) ver = torch.linspace(-1.0, 1.0, h) ver.requires_grad = False ver = ver.view(1, 1, h, 1) ver = ver.expand(b, 1, h, w) t_grid = torch.cat([hor, ver], 1) t_grid.requires_grad = False if flow.dtype == torch.float16: t_grid = t_grid.half() self.grid = t_grid.cuda(flow.get_device()) </DeepExtract> if normalize: flow = torch.cat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], dim=1) final_grid = (self.grid + flow).permute(0, 2, 3, 1).cuda(image.get_device()) <DeepExtract> if self.opt.fp16: output = torch.nn.functional.grid_sample(image.float(), final_grid.float(), mode='bilinear', padding_mode='border').half() else: output = torch.nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border') </DeepExtract> return output
def resample(self, image, flow, normalize=True): (b, c, h, w) = image.size() if not hasattr(self, 'grid') or self.grid.size() != flow.size(): hor = torch.linspace(-1.0, 1.0, w) hor.requires_grad = False hor = hor.view(1, 1, 1, w) hor = hor.expand(b, 1, h, w) ver = torch.linspace(-1.0, 1.0, h) ver.requires_grad = False ver = ver.view(1, 1, h, 1) ver = ver.expand(b, 1, h, w) t_grid = torch.cat([hor, ver], 1) t_grid.requires_grad = False if flow.dtype == torch.float16: t_grid = t_grid.half() self.grid = t_grid.cuda(flow.get_device()) if normalize: flow = torch.cat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], dim=1) final_grid = (self.grid + flow).permute(0, 2, 3, 1).cuda(image.get_device()) if self.opt.fp16: output = torch.nn.functional.grid_sample(image.float(), final_grid.float(), mode='bilinear', padding_mode='border').half() else: output = torch.nn.functional.grid_sample(image, final_grid, mode='bilinear', padding_mode='border') return output
C2F-FWN
positive
def __init__(self, num_classes=20): super(RetinaNet, self).__init__() self.fpn = FPN50() self.num_classes = num_classes <DeepExtract> layers = [] for _ in range(4): layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(True)) layers.append(nn.Conv2d(256, self.num_anchors * 4, kernel_size=3, stride=1, padding=1)) self.loc_head = nn.Sequential(*layers) </DeepExtract> <DeepExtract> layers = [] for _ in range(4): layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(True)) layers.append(nn.Conv2d(256, self.num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1)) self.cls_head = nn.Sequential(*layers) </DeepExtract>
def __init__(self, num_classes=20): super(RetinaNet, self).__init__() self.fpn = FPN50() self.num_classes = num_classes layers = [] for _ in range(4): layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(True)) layers.append(nn.Conv2d(256, self.num_anchors * 4, kernel_size=3, stride=1, padding=1)) self.loc_head = nn.Sequential(*layers) layers = [] for _ in range(4): layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)) layers.append(nn.ReLU(True)) layers.append(nn.Conv2d(256, self.num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1)) self.cls_head = nn.Sequential(*layers) </DeepExtract>
Detection-PyTorch-Notebook
positive
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] <DeepExtract> mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append(self.get_points_single(featmap_sizes[i], self.strides[i], bbox_preds[0].dtype, bbox_preds[0].device)) all_level_points = mlvl_points </DeepExtract> <DeepExtract> assert len(all_level_points) == len(self.regress_ranges) num_levels = len(all_level_points) expanded_regress_ranges = [all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(all_level_points[i]) for i in range(num_levels)] concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(all_level_points, dim=0) (labels_list, bbox_targets_list) = multi_apply(self.fcos_target_single, gt_bboxes, gt_labels, points=concat_points, regress_ranges=concat_regress_ranges) num_points = [center.size(0) for center in all_level_points] labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list] concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append(torch.cat([labels[i] for labels in labels_list])) concat_lvl_bbox_targets.append(torch.cat([bbox_targets[i] for bbox_targets in bbox_targets_list])) (labels, bbox_targets) = (concat_lvl_labels, concat_lvl_bbox_targets) </DeepExtract> num_imgs = cls_scores[0].size(0) flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores] flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds] flatten_centerness = [centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points]) pos_inds = flatten_labels.nonzero().reshape(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls(flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] if num_pos > 0: pos_bbox_targets = flatten_bbox_targets[pos_inds] <DeepExtract> left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] centerness_targets = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) pos_centerness_targets = torch.sqrt(centerness_targets) </DeepExtract> pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox(pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum()) loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append(self.get_points_single(featmap_sizes[i], self.strides[i], bbox_preds[0].dtype, bbox_preds[0].device)) all_level_points = mlvl_points assert len(all_level_points) == len(self.regress_ranges) num_levels = len(all_level_points) expanded_regress_ranges = [all_level_points[i].new_tensor(self.regress_ranges[i])[None].expand_as(all_level_points[i]) for i in range(num_levels)] concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(all_level_points, dim=0) (labels_list, bbox_targets_list) = multi_apply(self.fcos_target_single, gt_bboxes, gt_labels, points=concat_points, regress_ranges=concat_regress_ranges) num_points = [center.size(0) for center in all_level_points] labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list] concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append(torch.cat([labels[i] for labels in labels_list])) concat_lvl_bbox_targets.append(torch.cat([bbox_targets[i] for bbox_targets in bbox_targets_list])) (labels, bbox_targets) = (concat_lvl_labels, concat_lvl_bbox_targets) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores] flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds] flatten_centerness = [centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) flatten_points = torch.cat([points.repeat(num_imgs, 1) for points in all_level_points]) pos_inds = flatten_labels.nonzero().reshape(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls(flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] if num_pos > 0: pos_bbox_targets = flatten_bbox_targets[pos_inds] left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] centerness_targets = left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0] * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) pos_centerness_targets = torch.sqrt(centerness_targets) pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets) loss_bbox = self.loss_bbox(pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum()) loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness)
ACSL
positive
def putData(self, D, response=True): """ putData(D) -- writes samples that must be given as a NUMPY array, samples x channels. The type of the samples (D) and the number of channels must match the corresponding quantities in the FieldTrip buffer. """ if not isinstance(D, numpy.ndarray) or len(D.shape) != 2: raise ValueError('Data must be given as a NUMPY array (samples x channels)') nSamp = D.shape[0] nChan = D.shape[1] <DeepExtract> if isinstance(D, str): (dataType, dataBuf) = (0, D) if isinstance(D, numpy.ndarray): dt = D.dtype if not dt.isnative or dt.num < 1 or dt.num >= len(dataType): (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) ft = dataType[dt.num] if ft == -1: (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) if D.flags['C_CONTIGUOUS']: (dataType, dataBuf) = (ft, D.tostring()) AC = D.copy('C') (dataType, dataBuf) = (ft, AC.tostring()) if isinstance(D, int): (dataType, dataBuf) = (DATATYPE_INT32, struct.pack('i', D)) if isinstance(D, float): (dataType, dataBuf) = (DATATYPE_FLOAT64, struct.pack('d', D)) (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) </DeepExtract> dataBufSize = len(dataBuf) if response: command = PUT_DAT else: command = PUT_DAT_NORESPONSE request = struct.pack('HHI', VERSION, command, 16 + dataBufSize) dataDef = struct.pack('IIII', nChan, nSamp, dataType, dataBufSize) <DeepExtract> if not self.isConnected: raise IOError('Not connected to FieldTrip buffer') N = len(request + dataDef + dataBuf) nw = self.sock.send(request + dataDef + dataBuf) while nw < N: nw += self.sock.send(request + dataDef + dataBuf[nw:]) </DeepExtract> if response: <DeepExtract> resp_hdr = self.sock.recv(8) while len(resp_hdr) < 8: resp_hdr += self.sock.recv(8 - len(resp_hdr)) (version, command, bufsize) = struct.unpack('HHI', resp_hdr) if version != VERSION: self.disconnect() raise IOError('Bad response from buffer server - disconnecting') if bufsize > 0: payload = self.sock.recv(bufsize) while len(payload) < bufsize: payload += self.sock.recv(bufsize - len(payload)) else: payload = None (status, bufsize, resp_buf) = (command, bufsize, payload) </DeepExtract> if status != PUT_OK: raise IOError('Samples could not be written.')
def putData(self, D, response=True): """ putData(D) -- writes samples that must be given as a NUMPY array, samples x channels. The type of the samples (D) and the number of channels must match the corresponding quantities in the FieldTrip buffer. """ if not isinstance(D, numpy.ndarray) or len(D.shape) != 2: raise ValueError('Data must be given as a NUMPY array (samples x channels)') nSamp = D.shape[0] nChan = D.shape[1] if isinstance(D, str): (dataType, dataBuf) = (0, D) if isinstance(D, numpy.ndarray): dt = D.dtype if not dt.isnative or dt.num < 1 or dt.num >= len(dataType): (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) ft = dataType[dt.num] if ft == -1: (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) if D.flags['C_CONTIGUOUS']: (dataType, dataBuf) = (ft, D.tostring()) AC = D.copy('C') (dataType, dataBuf) = (ft, AC.tostring()) if isinstance(D, int): (dataType, dataBuf) = (DATATYPE_INT32, struct.pack('i', D)) if isinstance(D, float): (dataType, dataBuf) = (DATATYPE_FLOAT64, struct.pack('d', D)) (dataType, dataBuf) = (DATATYPE_UNKNOWN, None) dataBufSize = len(dataBuf) if response: command = PUT_DAT else: command = PUT_DAT_NORESPONSE request = struct.pack('HHI', VERSION, command, 16 + dataBufSize) dataDef = struct.pack('IIII', nChan, nSamp, dataType, dataBufSize) if not self.isConnected: raise IOError('Not connected to FieldTrip buffer') N = len(request + dataDef + dataBuf) nw = self.sock.send(request + dataDef + dataBuf) while nw < N: nw += self.sock.send(request + dataDef + dataBuf[nw:]) if response: resp_hdr = self.sock.recv(8) while len(resp_hdr) < 8: resp_hdr += self.sock.recv(8 - len(resp_hdr)) (version, command, bufsize) = struct.unpack('HHI', resp_hdr) if version != VERSION: self.disconnect() raise IOError('Bad response from buffer server - disconnecting') if bufsize > 0: payload = self.sock.recv(bufsize) while len(payload) < bufsize: payload += self.sock.recv(bufsize - len(payload)) else: payload = None (status, bufsize, resp_buf) = (command, bufsize, payload) if status != PUT_OK: raise IOError('Samples could not be written.')
eegsynth
positive
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 2D convolution with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: (kernel_h, kernel_w) = kernel_size assert data_format == 'NHWC' or data_format == 'NCHW' if data_format == 'NHWC': num_in_channels = inputs.get_shape()[-1].value elif data_format == 'NCHW': num_in_channels = inputs.get_shape()[1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] <DeepExtract> if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', kernel_shape, initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) kernel = var </DeepExtract> (stride_h, stride_w) = stride outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format) <DeepExtract> with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var </DeepExtract> outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) if bn: <DeepExtract> outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1, 2], bn_decay, data_format) </DeepExtract> if activation_fn is not None: outputs = activation_fn(outputs) return outputs
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 2D convolution with non-linear operation. Args: inputs: 4-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 2 ints scope: string stride: a list of 2 ints padding: 'SAME' or 'VALID' data_format: 'NHWC' or 'NCHW' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,1] is_training: bool Tensor variable Returns: Variable tensor """ with tf.variable_scope(scope) as sc: (kernel_h, kernel_w) = kernel_size assert data_format == 'NHWC' or data_format == 'NCHW' if data_format == 'NHWC': num_in_channels = inputs.get_shape()[-1].value elif data_format == 'NCHW': num_in_channels = inputs.get_shape()[1].value kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels] if use_xavier: initializer = tf.contrib.layers.xavier_initializer() else: initializer = tf.truncated_normal_initializer(stddev=stddev) var = _variable_on_cpu('weights', kernel_shape, initializer) if weight_decay is not None: weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss') tf.add_to_collection('losses', weight_decay) kernel = var (stride_h, stride_w) = stride outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format) with tf.device('/cpu:0'): dtype = tf.float16 if use_fp16 else tf.float32 var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype) biases = var outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) if bn: outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1, 2], bn_decay, data_format) if activation_fn is not None: outputs = activation_fn(outputs) return outputs
Danesfield
positive
def string_sublength(args): <DeepExtract> params = {} index = 1 for var in ('s', 'i', 'len'): value = args.get(var) if value is None: value = args.get(str(index)) if value is None: value = '' else: index += 1 params[var] = value params = params </DeepExtract> s = params.get('s', '') i = int(params.get('i', 1) or 1) - 1 len = int(params.get('len', 1) or 1) return s[i:i + len]
def string_sublength(args): params = {} index = 1 for var in ('s', 'i', 'len'): value = args.get(var) if value is None: value = args.get(str(index)) if value is None: value = '' else: index += 1 params[var] = value params = params s = params.get('s', '') i = int(params.get('i', 1) or 1) - 1 len = int(params.get('len', 1) or 1) return s[i:i + len]
DistillBERT
positive
def masked_cross_entropy(logits, target, length): """ Args: logits: A Variable containing a FloatTensor of size (batch, max_len, num_classes) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Returns: loss: An average loss value masked by the length. """ if USE_CUDA: length = Variable(torch.LongTensor(length)).cuda() else: length = Variable(torch.LongTensor(length)) logits_flat = logits.view(-1, logits.size(-1)) log_probs_flat = functional.log_softmax(logits_flat, dim=1) target_flat = target.view(-1, 1) losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat) losses = losses_flat.view(*target.size()) <DeepExtract> if target.size(1) is None: target.size(1) = length.data.max() batch_size = length.size(0) seq_range = torch.arange(0, target.size(1)).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, target.size(1)) seq_range_expand = Variable(seq_range_expand) if length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = length.unsqueeze(1).expand_as(seq_range_expand) mask = seq_range_expand < seq_length_expand </DeepExtract> losses = losses * mask.float() loss = losses.sum() / length.float().sum() return loss
def masked_cross_entropy(logits, target, length): """ Args: logits: A Variable containing a FloatTensor of size (batch, max_len, num_classes) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Returns: loss: An average loss value masked by the length. """ if USE_CUDA: length = Variable(torch.LongTensor(length)).cuda() else: length = Variable(torch.LongTensor(length)) logits_flat = logits.view(-1, logits.size(-1)) log_probs_flat = functional.log_softmax(logits_flat, dim=1) target_flat = target.view(-1, 1) losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat) losses = losses_flat.view(*target.size()) if target.size(1) is None: target.size(1) = length.data.max() batch_size = length.size(0) seq_range = torch.arange(0, target.size(1)).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, target.size(1)) seq_range_expand = Variable(seq_range_expand) if length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = length.unsqueeze(1).expand_as(seq_range_expand) mask = seq_range_expand < seq_length_expand losses = losses * mask.float() loss = losses.sum() / length.float().sum() return loss
CrossWOZ
positive
def set_attributes(self, model_1, obj_1, obj_2, overwrite=True): """ For all the attributes in obj_1 and obj_2, set the non-list attributes in obj_1 to those from obj_2 when they aren't None. If overwrite=False, attributes in object_1 which aren't empty aren't overwritten. Precondition: obj_1 and obj_2 are of the same class """ for attr in obj_2.traits(): class_name = str(type(obj_2.traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(obj_1, attr) list_2 = getattr(obj_2, attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(obj_1, attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: <DeepExtract> for attr in list_2[list_2_idx].traits(): class_name = str(type(list_2[list_2_idx].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[list_1_idx], attr) list_2 = getattr(list_2[list_2_idx], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[list_1_idx], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[list_1_idx], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[list_1_idx], attr).append(list_2[i]) else: value = getattr(list_2[list_2_idx], attr) if value is not None: if getattr(list_1[list_1_idx], attr) is not None and overwrite == False: continue setattr(list_1[list_1_idx], attr, value) </DeepExtract> list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(obj_1, attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): <DeepExtract> for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) </DeepExtract> elif len(list_1) > len(list_2): for i in range(len(list_2)): <DeepExtract> for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) </DeepExtract> else: for i in range(len(list_2)): if i < len(list_1): <DeepExtract> for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) </DeepExtract> else: getattr(obj_1, attr).append(list_2[i]) else: value = getattr(obj_2, attr) if value is not None: if getattr(obj_1, attr) is not None and overwrite == False: continue setattr(obj_1, attr, value)
def set_attributes(self, model_1, obj_1, obj_2, overwrite=True): """ For all the attributes in obj_1 and obj_2, set the non-list attributes in obj_1 to those from obj_2 when they aren't None. If overwrite=False, attributes in object_1 which aren't empty aren't overwritten. Precondition: obj_1 and obj_2 are of the same class """ for attr in obj_2.traits(): class_name = str(type(obj_2.traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(obj_1, attr) list_2 = getattr(obj_2, attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(obj_1, attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: for attr in list_2[list_2_idx].traits(): class_name = str(type(list_2[list_2_idx].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[list_1_idx], attr) list_2 = getattr(list_2[list_2_idx], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[list_1_idx], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[list_1_idx], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[list_1_idx], attr).append(list_2[i]) else: value = getattr(list_2[list_2_idx], attr) if value is not None: if getattr(list_1[list_1_idx], attr) is not None and overwrite == False: continue setattr(list_1[list_1_idx], attr, value) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(obj_1, attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) elif len(list_1) > len(list_2): for i in range(len(list_2)): for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) else: for i in range(len(list_2)): if i < len(list_1): for attr in list_2[i].traits(): class_name = str(type(list_2[i].traits()[attr])).strip("<>'").split('.')[-1] if class_name == 'List': phase_order = {'A': 0, 'B': 1, 'C': 2, 'N': 3} list_1 = getattr(list_1[i], attr) list_2 = getattr(list_2[i], attr) if list_1 is None or len(list_1) == 0: result_list = [] for element in list_2: result_list.append(self.copy(model_1, element)) setattr(list_1[i], attr, result_list) continue elif list_2 is None or len(list_2) == 0: continue has_phases = True for i in range(len(list_1)): if not (hasattr(list_1[0], 'phase') and list_1[0].phase is not None): has_phases = False for i in range(len(list_2)): if not (hasattr(list_2[0], 'phase') and list_2[0].phase is not None): has_phases = False if has_phases and len(list_1) > 0 and (len(list_2) > 0): list_1.sort(key=lambda x: phase_order[x.phase]) list_2.sort(key=lambda x: phase_order[x.phase]) list_1_phase = phase_order[list_1[0].phase] list_2_phase = phase_order[list_2[0].phase] list_1_idx = 0 list_2_idx = 0 while list_1_idx < len(list_1) and list_2_idx < len(list_2): if list_1_idx < len(list_1): list_1_phase = phase_order[list_1[list_1_idx].phase] else: list_1_phase = 1000000 if list_2_idx < len(list_2): list_2_phase = phase_order[list_2[list_2_idx].phase] else: list_2_phase = 1000001 if list_1_phase == list_2_phase: self.set_attributes(model_1, list_1[list_1_idx], list_2[list_2_idx], overwrite) list_1_idx = list_1_idx + 1 list_2_idx = list_2_idx + 1 elif list_1_phase < list_2_phase: list_1_idx = list_1_idx + 1 else: getattr(list_1[i], attr).append(list_2[list_2_idx]) list_2_idx = list_2_idx + 1 elif len(list_1) == len(list_2): for i in range(len(list_1)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) elif len(list_1) > len(list_2): for i in range(len(list_2)): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: for i in range(len(list_2)): if i < len(list_1): self.set_attributes(model_1, list_1[i], list_2[i], overwrite) else: getattr(list_1[i], attr).append(list_2[i]) else: value = getattr(list_2[i], attr) if value is not None: if getattr(list_1[i], attr) is not None and overwrite == False: continue setattr(list_1[i], attr, value) else: getattr(obj_1, attr).append(list_2[i]) else: value = getattr(obj_2, attr) if value is not None: if getattr(obj_1, attr) is not None and overwrite == False: continue setattr(obj_1, attr, value)
ditto
positive
def forward(self, top_recur, word_inputs, arc_targets=None, rel_targets=None): <DeepExtract> is_train = autograd.is_training() if is_train: top_recur = nd.Dropout(data=top_recur, axes=[0], p=self.dropout_mlp) (W_dep, b_dep) = (self.mlp_dep_W.data(), self.mlp_dep_b.data()) (W_head, b_head) = (self.mlp_head_W.data(), self.mlp_head_b.data()) (dep, head) = (leaky_relu(nd.dot(top_recur, W_dep.T) + b_dep), leaky_relu(nd.dot(top_recur, W_head.T) + b_head)) if is_train: (dep, head) = (nd.Dropout(data=dep, axes=[0], p=self.dropout_mlp), nd.Dropout(data=head, axes=[0], p=self.dropout_mlp)) (dep, head) = (nd.transpose(dep, axes=[2, 0, 1]), nd.transpose(head, axes=[2, 0, 1])) (dep_arc, dep_rel) = (dep[:self.mlp_arc_size], dep[self.mlp_arc_size:]) (head_arc, head_rel) = (head[:self.mlp_arc_size], head[self.mlp_arc_size:]) (dep_arc, dep_rel, head_arc, head_rel) = (dep_arc, dep_rel, head_arc, head_rel) </DeepExtract> is_train = autograd.is_training() mask = np.greater(word_inputs, self._vocab.ROOT).astype(np.float32) if is_train: (arc_logits, arc_loss) = self.first_biaffine(dep_arc, head_arc, mask, arc_targets) else: arc_logits = self.first_biaffine(dep_arc, head_arc, mask, arc_targets)
def forward(self, top_recur, word_inputs, arc_targets=None, rel_targets=None): is_train = autograd.is_training() if is_train: top_recur = nd.Dropout(data=top_recur, axes=[0], p=self.dropout_mlp) (W_dep, b_dep) = (self.mlp_dep_W.data(), self.mlp_dep_b.data()) (W_head, b_head) = (self.mlp_head_W.data(), self.mlp_head_b.data()) (dep, head) = (leaky_relu(nd.dot(top_recur, W_dep.T) + b_dep), leaky_relu(nd.dot(top_recur, W_head.T) + b_head)) if is_train: (dep, head) = (nd.Dropout(data=dep, axes=[0], p=self.dropout_mlp), nd.Dropout(data=head, axes=[0], p=self.dropout_mlp)) (dep, head) = (nd.transpose(dep, axes=[2, 0, 1]), nd.transpose(head, axes=[2, 0, 1])) (dep_arc, dep_rel) = (dep[:self.mlp_arc_size], dep[self.mlp_arc_size:]) (head_arc, head_rel) = (head[:self.mlp_arc_size], head[self.mlp_arc_size:]) (dep_arc, dep_rel, head_arc, head_rel) = (dep_arc, dep_rel, head_arc, head_rel) is_train = autograd.is_training() mask = np.greater(word_inputs, self._vocab.ROOT).astype(np.float32) if is_train: (arc_logits, arc_loss) = self.first_biaffine(dep_arc, head_arc, mask, arc_targets) else: arc_logits = self.first_biaffine(dep_arc, head_arc, mask, arc_targets)
elit
positive
def __len__(self): if not hasattr(self, '_batches'): <DeepExtract> dataset_size = len(self.group_ids) sampled_ids = torch.as_tensor(list(self.sampler)) order = torch.full((dataset_size,), -1, dtype=torch.int64) order[sampled_ids] = torch.arange(len(sampled_ids)) mask = order >= 0 clusters = [(self.group_ids == i) & mask for i in self.groups] relative_order = [order[cluster] for cluster in clusters] permutation_ids = [s[s.sort()[1]] for s in relative_order] permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] splits = [c.split(self.batch_size) for c in permuted_clusters] merged = tuple(itertools.chain.from_iterable(splits)) first_element_of_batch = [t[0].item() for t in merged if t.numel() > 0] inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())} first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch]) permutation_order = first_index_of_batch.sort(0)[1].tolist() batches = [merged[i].tolist() for i in permutation_order] if self.drop_uneven: kept = [] for batch in batches: if len(batch) == self.batch_size: kept.append(batch) batches = kept self._batches = batches </DeepExtract> self._can_reuse_batches = True return len(self._batches)
def __len__(self): if not hasattr(self, '_batches'): dataset_size = len(self.group_ids) sampled_ids = torch.as_tensor(list(self.sampler)) order = torch.full((dataset_size,), -1, dtype=torch.int64) order[sampled_ids] = torch.arange(len(sampled_ids)) mask = order >= 0 clusters = [(self.group_ids == i) & mask for i in self.groups] relative_order = [order[cluster] for cluster in clusters] permutation_ids = [s[s.sort()[1]] for s in relative_order] permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] splits = [c.split(self.batch_size) for c in permuted_clusters] merged = tuple(itertools.chain.from_iterable(splits)) first_element_of_batch = [t[0].item() for t in merged if t.numel() > 0] inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())} first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch]) permutation_order = first_index_of_batch.sort(0)[1].tolist() batches = [merged[i].tolist() for i in permutation_order] if self.drop_uneven: kept = [] for batch in batches: if len(batch) == self.batch_size: kept.append(batch) batches = kept self._batches = batches self._can_reuse_batches = True return len(self._batches)
Box_Discretization_Network
positive
def _load_instruction(address, thumb): <DeepExtract> r = (self.count,) + self.key self.count = 0 self.key = (None, None, None) return r </DeepExtract> <DeepExtract> avail = self.local_data_available(address) if address < 2097152 and avail < 8: self.flush() self.log_prefetch(address) avail = self.fetch_local_data(address, size=256, max_round_trips=1) block_size = avail </DeepExtract> assert block_size >= 8 self.local_data.seek(address) data = self.local_data.read(block_size) lines = disassembly_lines(disassemble_string(data, address, thumb=thumb)) <DeepExtract> for i in range(len(lines) - 1): instr = lines[i] instr.next_address = lines[i + 1].address addr = thumb | lines[i].address & ~1 instr.hle = self.patch_hle.get(addr) if addr not in self.instructions: self.instructions[addr] = instr </DeepExtract>
def _load_instruction(address, thumb): r = (self.count,) + self.key self.count = 0 self.key = (None, None, None) return r avail = self.local_data_available(address) if address < 2097152 and avail < 8: self.flush() self.log_prefetch(address) avail = self.fetch_local_data(address, size=256, max_round_trips=1) block_size = avail assert block_size >= 8 self.local_data.seek(address) data = self.local_data.read(block_size) lines = disassembly_lines(disassemble_string(data, address, thumb=thumb)) for i in range(len(lines) - 1): instr = lines[i] instr.next_address = lines[i + 1].address addr = thumb | lines[i].address & ~1 instr.hle = self.patch_hle.get(addr) if addr not in self.instructions: self.instructions[addr] = instr </DeepExtract>
coastermelt
positive
def eval(self): for model_count in range(2): <DeepExtract> scores_for_ground_truths = [] for ground_truth in self.ground_truth: score = self.exact_match_score(self.answer_text[model_count], ground_truth) scores_for_ground_truths.append(score) self.em[model_count] = max(scores_for_ground_truths) </DeepExtract> <DeepExtract> scores_for_ground_truths = [] for ground_truth in self.ground_truth: score = self.f1_score(self.answer_text[model_count], ground_truth) scores_for_ground_truths.append(score) self.f1[model_count] = max(scores_for_ground_truths) </DeepExtract>
def eval(self): for model_count in range(2): scores_for_ground_truths = [] for ground_truth in self.ground_truth: score = self.exact_match_score(self.answer_text[model_count], ground_truth) scores_for_ground_truths.append(score) self.em[model_count] = max(scores_for_ground_truths) scores_for_ground_truths = [] for ground_truth in self.ground_truth: score = self.f1_score(self.answer_text[model_count], ground_truth) scores_for_ground_truths.append(score) self.f1[model_count] = max(scores_for_ground_truths) </DeepExtract>
bi-att-flow
positive
def test_error(self): class TestFunction(WrapperFunction): def _error(self, err, *args, **kw): ErrorTest.err = err ErrorTest.args = self._resolve_args(*args, **kw) @TestFunction def foo(a, b=0): raise Exception() with self.assertRaises(Exception): <DeepExtract> return 1 </DeepExtract> self.assertIsInstance(self.err, Exception) self.assertEqual({'a': 1, 'b': 2}, self.args)
def test_error(self): class TestFunction(WrapperFunction): def _error(self, err, *args, **kw): ErrorTest.err = err ErrorTest.args = self._resolve_args(*args, **kw) @TestFunction def foo(a, b=0): raise Exception() with self.assertRaises(Exception): return 1 self.assertIsInstance(self.err, Exception) self.assertEqual({'a': 1, 'b': 2}, self.args)
decorated
positive
def when(self, *_predicates: Predicate[Weight] | Predicate[NeutralWeight], qualified_by: Optional[object | list[object]]=None) -> Callable[[T], T]: def register(__impl: T) -> T: prepared = self.__prepare(__impl) <DeepExtract> weights: list[Weight] = list() distinct_predicates: dict[Type[Predicate[Any]], Predicate[Any]] = dict() for condition in create_conditions(*_predicates, qualified_by=qualified_by): if condition is None or condition is False: return if condition is True: continue if isinstance(condition, Predicate): cls = type(condition) previous = distinct_predicates.get(cls) if previous is not None: if not issubclass(cls, MergeablePredicate): raise TypeError(f'Cannot have multiple predicates of type {cls!r} without declaring a merge method! See MergeablePredicate.') cls = cast(Type[MergeablePredicate[Any]], cls) distinct_predicates[cls] = cls.merge(cast(MergeablePredicate[Any], previous), cast(MergeablePredicate[Any], condition)) else: distinct_predicates[cls] = condition elif isinstance(condition, ImplementationWeight): if not isinstance(condition, NeutralWeight): weights.append(condition) else: raise TypeError(f'A condition must either be a predicate, an optional weight or a boolean, not a {type(condition)!r}') self.__catalog[ImplementationsRegistryDependency(self.__interface)].add(identifier=prepared.out, dependency=prepared.dependency, predicates=list(distinct_predicates.values()), weights=weights) </DeepExtract> return prepared.out return register
def when(self, *_predicates: Predicate[Weight] | Predicate[NeutralWeight], qualified_by: Optional[object | list[object]]=None) -> Callable[[T], T]: def register(__impl: T) -> T: prepared = self.__prepare(__impl) weights: list[Weight] = list() distinct_predicates: dict[Type[Predicate[Any]], Predicate[Any]] = dict() for condition in create_conditions(*_predicates, qualified_by=qualified_by): if condition is None or condition is False: return if condition is True: continue if isinstance(condition, Predicate): cls = type(condition) previous = distinct_predicates.get(cls) if previous is not None: if not issubclass(cls, MergeablePredicate): raise TypeError(f'Cannot have multiple predicates of type {cls!r} without declaring a merge method! See MergeablePredicate.') cls = cast(Type[MergeablePredicate[Any]], cls) distinct_predicates[cls] = cls.merge(cast(MergeablePredicate[Any], previous), cast(MergeablePredicate[Any], condition)) else: distinct_predicates[cls] = condition elif isinstance(condition, ImplementationWeight): if not isinstance(condition, NeutralWeight): weights.append(condition) else: raise TypeError(f'A condition must either be a predicate, an optional weight or a boolean, not a {type(condition)!r}') self.__catalog[ImplementationsRegistryDependency(self.__interface)].add(identifier=prepared.out, dependency=prepared.dependency, predicates=list(distinct_predicates.values()), weights=weights) return prepared.out return register
antidote
positive
def _frontline_center(from_cp: ControlPoint, to_cp: ControlPoint) -> typing.Optional[Point]: <DeepExtract> key = str(from_cp.id) + '_' + str(to_cp.id) if key in self.frontline_vector_cache: frontline_vector = self.frontline_vector_cache[key] else: frontline = Conflict.frontline_vector(from_cp, to_cp, self.game.theater) self.frontline_vector_cache[key] = frontline frontline_vector = frontline </DeepExtract> if frontline_vector: return frontline_vector[0].point_from_heading(frontline_vector[1], frontline_vector[2] / 2) else: return None
def _frontline_center(from_cp: ControlPoint, to_cp: ControlPoint) -> typing.Optional[Point]: key = str(from_cp.id) + '_' + str(to_cp.id) if key in self.frontline_vector_cache: frontline_vector = self.frontline_vector_cache[key] else: frontline = Conflict.frontline_vector(from_cp, to_cp, self.game.theater) self.frontline_vector_cache[key] = frontline frontline_vector = frontline if frontline_vector: return frontline_vector[0].point_from_heading(frontline_vector[1], frontline_vector[2] / 2) else: return None
dcs_liberation
positive
def JsonDictFromEntity(entity, entity_id=None, json_dict=None, include=None, exclude=None): """Make a dict suitable for json encoding from a db or ndb model. Args: entity: the model entity to get the values from (db.Model or ndb.Model). entity_id: the id to use for this entity (string). If None the entity key's id or name will be used, or crud_model.NEW_ENTITY_ID if no key is available (the entity has not be saved in datastore). json_dict: a dict to fill in. If None is provided it will be created. include: a list of keys you want added to the dict. exclude: a list of keys to not use (overrides "include" if key is in both). Returns: A dict suitable for json encoding representing the entity parameter. """ if json_dict is None: json_dict = {} if hasattr(entity, 'to_dict'): json_dict.update(entity.to_dict(include=include, exclude=exclude)) else: <DeepExtract> keys = set(entity.properties().keys()) if include: keys = keys.intersection(include) if exclude: keys -= set(exclude) for key in keys: json_dict[key] = getattr(entity, key) </DeepExtract> if hasattr(entity, 'AddToJsonDict'): entity.AddToJsonDict(json_dict) key = None if not exclude or 'id' not in exclude: if entity_id is None: <DeepExtract> try: if callable(entity.key): key = entity.key() elif hasattr(entity, 'key'): key = entity.key except db.NotSavedError: pass key = None </DeepExtract> if key is None: entity_id = NEW_ENTITY_ID else: entity_id = key.id() json_dict['id'] = str(entity_id) if not exclude or 'parent_id' not in exclude: if key and key.parent(): json_dict['parent_id'] = key.parent().id() return json_dict
def JsonDictFromEntity(entity, entity_id=None, json_dict=None, include=None, exclude=None): """Make a dict suitable for json encoding from a db or ndb model. Args: entity: the model entity to get the values from (db.Model or ndb.Model). entity_id: the id to use for this entity (string). If None the entity key's id or name will be used, or crud_model.NEW_ENTITY_ID if no key is available (the entity has not be saved in datastore). json_dict: a dict to fill in. If None is provided it will be created. include: a list of keys you want added to the dict. exclude: a list of keys to not use (overrides "include" if key is in both). Returns: A dict suitable for json encoding representing the entity parameter. """ if json_dict is None: json_dict = {} if hasattr(entity, 'to_dict'): json_dict.update(entity.to_dict(include=include, exclude=exclude)) else: keys = set(entity.properties().keys()) if include: keys = keys.intersection(include) if exclude: keys -= set(exclude) for key in keys: json_dict[key] = getattr(entity, key) if hasattr(entity, 'AddToJsonDict'): entity.AddToJsonDict(json_dict) key = None if not exclude or 'id' not in exclude: if entity_id is None: try: if callable(entity.key): key = entity.key() elif hasattr(entity, 'key'): key = entity.key except db.NotSavedError: pass key = None if key is None: entity_id = NEW_ENTITY_ID else: entity_id = key.id() json_dict['id'] = str(entity_id) if not exclude or 'parent_id' not in exclude: if key and key.parent(): json_dict['parent_id'] = key.parent().id() return json_dict
Data-Pipeline
positive
@property def cam(self): """ Camera object. If :attr:`._cam` hasn't been initialized yet, use :meth:`.init_cam` to do so Returns: Camera object, different for each camera. """ if not self._cam: <DeepExtract> raise Exception('init_cam must be overwritten by camera subclass!!') </DeepExtract> return self._cam
@property def cam(self): """ Camera object. If :attr:`._cam` hasn't been initialized yet, use :meth:`.init_cam` to do so Returns: Camera object, different for each camera. """ if not self._cam: raise Exception('init_cam must be overwritten by camera subclass!!') return self._cam
autopilot
positive
def format_unit(value: str | float | decimal.Decimal, measurement_unit: str, length: Literal['short', 'long', 'narrow']='long', format: str | None=None, locale: Locale | str | None=LC_NUMERIC) -> str: """Format a value of a given unit. Values are formatted according to the locale's usual pluralization rules and number formats. >>> format_unit(12, 'length-meter', locale='ro_RO') u'12 metri' >>> format_unit(15.5, 'length-mile', locale='fi_FI') u'15,5 mailia' >>> format_unit(1200, 'pressure-millimeter-ofhg', locale='nb') u'1\\xa0200 millimeter kvikks\\xf8lv' >>> format_unit(270, 'ton', locale='en') u'270 tons' Number formats may be overridden with the ``format`` parameter. >>> import decimal >>> format_unit(decimal.Decimal("-42.774"), 'temperature-celsius', 'short', format='#.0', locale='fr') u'-42,8\\u202f\\xb0C' The locale's usual pluralization rules are respected. >>> format_unit(1, 'length-meter', locale='ro_RO') u'1 metru' >>> format_unit(0, 'length-mile', locale='cy') u'0 mi' >>> format_unit(1, 'length-mile', locale='cy') u'1 filltir' >>> format_unit(3, 'length-mile', locale='cy') u'3 milltir' >>> format_unit(15, 'length-horse', locale='fi') Traceback (most recent call last): ... UnknownUnitError: length-horse is not a known unit in fi .. versionadded:: 2.2.0 :param value: the value to format. If this is a string, no number formatting will be attempted. :param measurement_unit: the code of a measurement unit. Known units can be found in the CLDR Unit Validity XML file: https://unicode.org/repos/cldr/tags/latest/common/validity/unit.xml :param length: "short", "long" or "narrow" :param format: An optional format, as accepted by `format_decimal`. :param locale: the `Locale` object or locale identifier """ locale = Locale.parse(locale) <DeepExtract> locale = Locale.parse(locale) unit_patterns = locale._data['unit_patterns'] if measurement_unit in unit_patterns: q_unit = measurement_unit for unit_pattern in sorted(unit_patterns, key=len): if unit_pattern.endswith(measurement_unit): q_unit = unit_pattern q_unit = None </DeepExtract> if not q_unit: raise UnknownUnitError(unit=measurement_unit, locale=locale) unit_patterns = locale._data['unit_patterns'][q_unit].get(length, {}) if isinstance(value, str): formatted_value = value plural_form = 'one' else: formatted_value = format_decimal(value, format, locale) plural_form = locale.plural_form(value) if plural_form in unit_patterns: return unit_patterns[plural_form].format(formatted_value) <DeepExtract> locale = Locale.parse(locale) unit = _find_unit_pattern(measurement_unit, locale=locale) if not unit: raise UnknownUnitError(unit=measurement_unit, locale=locale) fallback_name = locale.unit_display_names.get(unit, {}).get(length) </DeepExtract> return f'{formatted_value} {fallback_name or measurement_unit}'
def format_unit(value: str | float | decimal.Decimal, measurement_unit: str, length: Literal['short', 'long', 'narrow']='long', format: str | None=None, locale: Locale | str | None=LC_NUMERIC) -> str: """Format a value of a given unit. Values are formatted according to the locale's usual pluralization rules and number formats. >>> format_unit(12, 'length-meter', locale='ro_RO') u'12 metri' >>> format_unit(15.5, 'length-mile', locale='fi_FI') u'15,5 mailia' >>> format_unit(1200, 'pressure-millimeter-ofhg', locale='nb') u'1\\xa0200 millimeter kvikks\\xf8lv' >>> format_unit(270, 'ton', locale='en') u'270 tons' Number formats may be overridden with the ``format`` parameter. >>> import decimal >>> format_unit(decimal.Decimal("-42.774"), 'temperature-celsius', 'short', format='#.0', locale='fr') u'-42,8\\u202f\\xb0C' The locale's usual pluralization rules are respected. >>> format_unit(1, 'length-meter', locale='ro_RO') u'1 metru' >>> format_unit(0, 'length-mile', locale='cy') u'0 mi' >>> format_unit(1, 'length-mile', locale='cy') u'1 filltir' >>> format_unit(3, 'length-mile', locale='cy') u'3 milltir' >>> format_unit(15, 'length-horse', locale='fi') Traceback (most recent call last): ... UnknownUnitError: length-horse is not a known unit in fi .. versionadded:: 2.2.0 :param value: the value to format. If this is a string, no number formatting will be attempted. :param measurement_unit: the code of a measurement unit. Known units can be found in the CLDR Unit Validity XML file: https://unicode.org/repos/cldr/tags/latest/common/validity/unit.xml :param length: "short", "long" or "narrow" :param format: An optional format, as accepted by `format_decimal`. :param locale: the `Locale` object or locale identifier """ locale = Locale.parse(locale) locale = Locale.parse(locale) unit_patterns = locale._data['unit_patterns'] if measurement_unit in unit_patterns: q_unit = measurement_unit for unit_pattern in sorted(unit_patterns, key=len): if unit_pattern.endswith(measurement_unit): q_unit = unit_pattern q_unit = None if not q_unit: raise UnknownUnitError(unit=measurement_unit, locale=locale) unit_patterns = locale._data['unit_patterns'][q_unit].get(length, {}) if isinstance(value, str): formatted_value = value plural_form = 'one' else: formatted_value = format_decimal(value, format, locale) plural_form = locale.plural_form(value) if plural_form in unit_patterns: return unit_patterns[plural_form].format(formatted_value) locale = Locale.parse(locale) unit = _find_unit_pattern(measurement_unit, locale=locale) if not unit: raise UnknownUnitError(unit=measurement_unit, locale=locale) fallback_name = locale.unit_display_names.get(unit, {}).get(length) return f'{formatted_value} {fallback_name or measurement_unit}'
babel
positive
def delete(self): <DeepExtract> del _pyfs_files[self.basename] </DeepExtract> self._file.close()
def delete(self): del _pyfs_files[self.basename] self._file.close()
bits
positive
def convert_docutils_node(list_item, only_pages=False): if not list_item.children: return None reference = list_item.children[0].children[0] title = reference.astext() url = reference.attributes['refuri'] active = 'current' in list_item.attributes['classes'] if only_pages and '#' in url: return None nav = {} nav['title'] = title nav['url'] = url nav['children'] = [] nav['active'] = active if len(list_item.children) > 1: for child_item in list_item.children[1].children: <DeepExtract> if not child_item.children: child_nav = None reference = child_item.children[0].children[0] title = reference.astext() url = reference.attributes['refuri'] active = 'current' in child_item.attributes['classes'] if only_pages and '#' in url: child_nav = None nav = {} nav['title'] = title nav['url'] = url nav['children'] = [] nav['active'] = active if len(child_item.children) > 1: for child_item in child_item.children[1].children: child_nav = convert_docutils_node(child_item, only_pages=only_pages) if child_nav is not None: nav['children'].append(child_nav) child_nav = nav </DeepExtract> if child_nav is not None: nav['children'].append(child_nav) return nav
def convert_docutils_node(list_item, only_pages=False): if not list_item.children: return None reference = list_item.children[0].children[0] title = reference.astext() url = reference.attributes['refuri'] active = 'current' in list_item.attributes['classes'] if only_pages and '#' in url: return None nav = {} nav['title'] = title nav['url'] = url nav['children'] = [] nav['active'] = active if len(list_item.children) > 1: for child_item in list_item.children[1].children: if not child_item.children: child_nav = None reference = child_item.children[0].children[0] title = reference.astext() url = reference.attributes['refuri'] active = 'current' in child_item.attributes['classes'] if only_pages and '#' in url: child_nav = None nav = {} nav['title'] = title nav['url'] = url nav['children'] = [] nav['active'] = active if len(child_item.children) > 1: for child_item in child_item.children[1].children: child_nav = convert_docutils_node(child_item, only_pages=only_pages) if child_nav is not None: nav['children'].append(child_nav) child_nav = nav if child_nav is not None: nav['children'].append(child_nav) return nav
dataprep
positive
def examine_abi(download_path, name): """Proxy the ABI reporting to the right function.""" download_path = os.path.abspath(download_path) results_dir = os.path.abspath(os.path.join(download_path, 'results')) if not os.path.exists(results_dir): util.print_fatal('Results directory does not exist, aborting') sys.exit(1) if util.binary_in_path('abireport'): <DeepExtract> rpms = set() for item in os.listdir(results_dir): namelen = len(name) if item.find('-extras-', namelen) >= namelen: continue if item.endswith('.rpm') and (not item.endswith('.src.rpm')): rpms.add('{}/{}'.format(results_dir, item)) if len(rpms) == 0: util.print_fatal('No usable rpms found, aborting') sys.exit(1) try: util.call('abireport scan-packages {}'.format(' '.join(rpms)), cwd=download_path) except Exception as e: util.print_fatal('Error invoking abireport: {}'.format(e)) </DeepExtract> else: util.print_warning('abireport is not installed. Using slow scanning') <DeepExtract> old_dir = os.getcwd() rpms = set() for item in os.listdir(results_dir): namelen = len(name) if item.find('-extras-', namelen) >= namelen: continue if item.endswith('.rpm') and (not item.endswith('.src.rpm')): rpms.add(os.path.basename(item)) if len(rpms) == 0: util.print_fatal('No usable rpms found, aborting') sys.exit(1) extract_dir = os.path.abspath(os.path.join(download_path, '__extraction')) purge_tree(extract_dir) try: os.makedirs(extract_dir) except Exception as e: util.print_fatal('Cannot create extraction tree: {}'.format(e)) sys.exit(1) os.chdir(extract_dir) try: for rpm in rpms: cmd = 'rpm2cpio "{}" | cpio -imd 2>/dev/null'.format(os.path.join(results_dir, rpm)) subprocess.check_call(cmd, shell=True) except Exception as e: util.print_fatal('Error extracting RPMS: {}'.format(e)) os.chdir(download_path) collected_files = set() for check_path in valid_dirs: if check_path[0] == '/': check_path = check_path[1:] dirn = os.path.join(extract_dir, check_path) if not os.path.isdir(dirn): continue for file in os.listdir(dirn): f = os.path.basename(file) clean_path = os.path.abspath(os.path.join(dirn, f)) if not is_file_valid(clean_path): continue collected_files.add(clean_path) abi_report = dict() for library in sorted(collected_files): soname = get_soname(library) if not soname: warn = 'Failed to determine soname of: {}'.format(library) util.print_warning(warn) soname = os.path.basename(library) symbols = dump_symbols(library) if symbols and len(symbols) > 0: if soname not in abi_report: abi_report[soname] = set() abi_report[soname].update(symbols) report_file = os.path.join(download_path, 'symbols') if len(abi_report) > 0: report = util.open_auto(report_file, 'w') for soname in sorted(abi_report.keys()): for symbol in sorted(abi_report[soname]): report.write('{}:{}\n'.format(soname, symbol)) report.close() else: truncate_file(report_file) lib_deps = get_all_dependencies(extract_dir) report_file = os.path.join(download_path, 'used_libs') if len(lib_deps) > 0: report = util.open_auto(report_file, 'w') for soname in sorted(lib_deps): report.write('{}\n'.format(soname)) report.close() else: truncate_file(report_file) os.chdir(old_dir) purge_tree(extract_dir) </DeepExtract>
def examine_abi(download_path, name): """Proxy the ABI reporting to the right function.""" download_path = os.path.abspath(download_path) results_dir = os.path.abspath(os.path.join(download_path, 'results')) if not os.path.exists(results_dir): util.print_fatal('Results directory does not exist, aborting') sys.exit(1) if util.binary_in_path('abireport'): rpms = set() for item in os.listdir(results_dir): namelen = len(name) if item.find('-extras-', namelen) >= namelen: continue if item.endswith('.rpm') and (not item.endswith('.src.rpm')): rpms.add('{}/{}'.format(results_dir, item)) if len(rpms) == 0: util.print_fatal('No usable rpms found, aborting') sys.exit(1) try: util.call('abireport scan-packages {}'.format(' '.join(rpms)), cwd=download_path) except Exception as e: util.print_fatal('Error invoking abireport: {}'.format(e)) else: util.print_warning('abireport is not installed. Using slow scanning') old_dir = os.getcwd() rpms = set() for item in os.listdir(results_dir): namelen = len(name) if item.find('-extras-', namelen) >= namelen: continue if item.endswith('.rpm') and (not item.endswith('.src.rpm')): rpms.add(os.path.basename(item)) if len(rpms) == 0: util.print_fatal('No usable rpms found, aborting') sys.exit(1) extract_dir = os.path.abspath(os.path.join(download_path, '__extraction')) purge_tree(extract_dir) try: os.makedirs(extract_dir) except Exception as e: util.print_fatal('Cannot create extraction tree: {}'.format(e)) sys.exit(1) os.chdir(extract_dir) try: for rpm in rpms: cmd = 'rpm2cpio "{}" | cpio -imd 2>/dev/null'.format(os.path.join(results_dir, rpm)) subprocess.check_call(cmd, shell=True) except Exception as e: util.print_fatal('Error extracting RPMS: {}'.format(e)) os.chdir(download_path) collected_files = set() for check_path in valid_dirs: if check_path[0] == '/': check_path = check_path[1:] dirn = os.path.join(extract_dir, check_path) if not os.path.isdir(dirn): continue for file in os.listdir(dirn): f = os.path.basename(file) clean_path = os.path.abspath(os.path.join(dirn, f)) if not is_file_valid(clean_path): continue collected_files.add(clean_path) abi_report = dict() for library in sorted(collected_files): soname = get_soname(library) if not soname: warn = 'Failed to determine soname of: {}'.format(library) util.print_warning(warn) soname = os.path.basename(library) symbols = dump_symbols(library) if symbols and len(symbols) > 0: if soname not in abi_report: abi_report[soname] = set() abi_report[soname].update(symbols) report_file = os.path.join(download_path, 'symbols') if len(abi_report) > 0: report = util.open_auto(report_file, 'w') for soname in sorted(abi_report.keys()): for symbol in sorted(abi_report[soname]): report.write('{}:{}\n'.format(soname, symbol)) report.close() else: truncate_file(report_file) lib_deps = get_all_dependencies(extract_dir) report_file = os.path.join(download_path, 'used_libs') if len(lib_deps) > 0: report = util.open_auto(report_file, 'w') for soname in sorted(lib_deps): report.write('{}\n'.format(soname)) report.close() else: truncate_file(report_file) os.chdir(old_dir) purge_tree(extract_dir) </DeepExtract>
autospec
positive
def ensemble_lightgbm(forecast_in, forecast_out, pred): forecast_in_copy = forecast_in.copy() <DeepExtract> if pred in ['MS', 'M', 'BM', 'BMS']: forecast_in_copy['month'] = forecast_in_copy.index.month elif pred in ['BH', 'H']: forecast_in_copy['hour'] = forecast_in_copy.index.hour elif pred == 'B': forecast_in_copy['dayofweek'] = forecast_in_copy.index.dayofweek elif pred == 'D': forecast_in_copy['dayofweek'] = forecast_in_copy.index.dayofweek elif pred in ['W', 'W-SUN', 'W-MON', 'W-TUE', 'W-WED', 'W-THU', 'W-FRI', 'W-SAT']: forecast_in_copy['week'] = forecast_in_copy.index.week elif pred in ['Q', 'QS', 'BQ', 'BQS']: forecast_in_copy['quarter'] = forecast_in_copy.index.quarter elif pred in ['T', 'min']: forecast_in_copy['minute'] = forecast_in_copy.index.minute elif pred == 'S': forecast_in_copy['second'] = forecast_in_copy.index.second forecast_in_copy = forecast_in_copy </DeepExtract> forecast_in_copy['mean'] = forecast_in_copy.drop(['Target'], axis=1).mean(axis=1) (forecast_train, forecast_test) = tts(forecast_in_copy, train_size=0.5, shuffle=False, stratify=None) target = 'Target' d_train = lgb.Dataset(forecast_train.drop(columns=[target]), label=forecast_train[target]) params = {'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmsle', 'max_depth': 6, 'learning_rate': 0.1, 'verbose': 0, 'num_threads': 16} model = lgb.train(params, d_train, 100, verbose_eval=1) ensemble_lgb = pd.DataFrame(index=forecast_test.index) ensemble_lgb['ensemble_lgb'] = model.predict(forecast_test.drop(columns=[target])) ensemble_lgb_out = pd.DataFrame(index=forecast_out.index) ensemble_lgb_out['ensemble_lgb'] = model.predict(forecast_out) return (ensemble_lgb, ensemble_lgb_out)
def ensemble_lightgbm(forecast_in, forecast_out, pred): forecast_in_copy = forecast_in.copy() if pred in ['MS', 'M', 'BM', 'BMS']: forecast_in_copy['month'] = forecast_in_copy.index.month elif pred in ['BH', 'H']: forecast_in_copy['hour'] = forecast_in_copy.index.hour elif pred == 'B': forecast_in_copy['dayofweek'] = forecast_in_copy.index.dayofweek elif pred == 'D': forecast_in_copy['dayofweek'] = forecast_in_copy.index.dayofweek elif pred in ['W', 'W-SUN', 'W-MON', 'W-TUE', 'W-WED', 'W-THU', 'W-FRI', 'W-SAT']: forecast_in_copy['week'] = forecast_in_copy.index.week elif pred in ['Q', 'QS', 'BQ', 'BQS']: forecast_in_copy['quarter'] = forecast_in_copy.index.quarter elif pred in ['T', 'min']: forecast_in_copy['minute'] = forecast_in_copy.index.minute elif pred == 'S': forecast_in_copy['second'] = forecast_in_copy.index.second forecast_in_copy = forecast_in_copy forecast_in_copy['mean'] = forecast_in_copy.drop(['Target'], axis=1).mean(axis=1) (forecast_train, forecast_test) = tts(forecast_in_copy, train_size=0.5, shuffle=False, stratify=None) target = 'Target' d_train = lgb.Dataset(forecast_train.drop(columns=[target]), label=forecast_train[target]) params = {'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmsle', 'max_depth': 6, 'learning_rate': 0.1, 'verbose': 0, 'num_threads': 16} model = lgb.train(params, d_train, 100, verbose_eval=1) ensemble_lgb = pd.DataFrame(index=forecast_test.index) ensemble_lgb['ensemble_lgb'] = model.predict(forecast_test.drop(columns=[target])) ensemble_lgb_out = pd.DataFrame(index=forecast_out.index) ensemble_lgb_out['ensemble_lgb'] = model.predict(forecast_out) return (ensemble_lgb, ensemble_lgb_out)
atspy
positive
def sin(ys=None, lengths=None, xs=None, angles=None): """ calculate sin with multiple kinds of parameters """ if not angles is None: return np.sin(angles) if ys is None: raise ValueError('ys must be provided when "angles" is None ') if lengths is None: if xs is None: raise ValueError('xs must be provided when "lengths" is None ') lengths = np.sqrt(xs ** 2 + ys ** 2) if not np.iterable(lengths): sins = ys / lengths if lengths > 0 else 0 else: lengths = np.asarray(lengths) shape = lengths.shape <DeepExtract> ys = np.asarray(ys) dims = len(ys.shape) shape = [np.prod(ys.shape[0:dims + 1 - ndim])] shape.extend(ys.shape[dims + 1 - ndim:dims]) ys = np.reshape(ys, shape) </DeepExtract> <DeepExtract> lengths = np.asarray(lengths) dims = len(lengths.shape) shape = [np.prod(lengths.shape[0:dims + 1 - ndim])] shape.extend(lengths.shape[dims + 1 - ndim:dims]) lengths = np.reshape(lengths, shape) </DeepExtract> sins = [y / length if length > 0 else 0 for (y, length) in zip(ys, lengths)] sins = np.reshape(sins, shape) return sins
def sin(ys=None, lengths=None, xs=None, angles=None): """ calculate sin with multiple kinds of parameters """ if not angles is None: return np.sin(angles) if ys is None: raise ValueError('ys must be provided when "angles" is None ') if lengths is None: if xs is None: raise ValueError('xs must be provided when "lengths" is None ') lengths = np.sqrt(xs ** 2 + ys ** 2) if not np.iterable(lengths): sins = ys / lengths if lengths > 0 else 0 else: lengths = np.asarray(lengths) shape = lengths.shape ys = np.asarray(ys) dims = len(ys.shape) shape = [np.prod(ys.shape[0:dims + 1 - ndim])] shape.extend(ys.shape[dims + 1 - ndim:dims]) ys = np.reshape(ys, shape) lengths = np.asarray(lengths) dims = len(lengths.shape) shape = [np.prod(lengths.shape[0:dims + 1 - ndim])] shape.extend(lengths.shape[dims + 1 - ndim:dims]) lengths = np.reshape(lengths, shape) sins = [y / length if length > 0 else 0 for (y, length) in zip(ys, lengths)] sins = np.reshape(sins, shape) return sins
ContourNet
positive
def run(self, image_or_path_or_tensor, meta={}): (load_time, pre_time, net_time, dec_time, post_time) = (0, 0, 0, 0, 0) (merge_time, track_time, tot_time, display_time) = (0, 0, 0, 0) self.debugger.clear() start_time = time.time() pre_processed = False if isinstance(image_or_path_or_tensor, np.ndarray): image = image_or_path_or_tensor elif type(image_or_path_or_tensor) == type(''): image = cv2.imread(image_or_path_or_tensor) else: image = image_or_path_or_tensor['image'][0].numpy() pre_processed_images = image_or_path_or_tensor pre_processed = True loaded_time = time.time() load_time += loaded_time - start_time detections = [] for scale in self.opt.test_scales: scale_start_time = time.time() if not pre_processed: <DeepExtract> (resized_image, c, s, inp_width, inp_height, height, width) = self._transform_scale(image) trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) out_height = inp_height // self.opt.down_ratio out_width = inp_width // self.opt.down_ratio trans_output = get_affine_transform(c, s, 0, [out_width, out_height]) inp_image = cv2.warpAffine(resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255.0 - self.mean) / self.std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) if self.opt.flip_test: images = np.concatenate((images, images[:, :, :, ::-1]), axis=0) images = torch.from_numpy(images) meta = {'calib': np.array(meta['calib'], dtype=np.float32) if 'calib' in meta else self._get_default_calib(width, height)} meta.update({'c': c, 's': s, 'height': height, 'width': width, 'out_height': out_height, 'out_width': out_width, 'inp_height': inp_height, 'inp_width': inp_width, 'trans_input': trans_input, 'trans_output': trans_output}) if 'pre_dets' in meta: meta['pre_dets'] = meta['pre_dets'] if 'cur_dets' in meta: meta['cur_dets'] = meta['cur_dets'] (images, meta) = (images, meta) </DeepExtract> else: images = pre_processed_images['images'][scale][0] meta = pre_processed_images['meta'][scale] meta = {k: v.numpy()[0] for (k, v) in meta.items()} if 'pre_dets' in pre_processed_images['meta']: meta['pre_dets'] = pre_processed_images['meta']['pre_dets'] if 'cur_dets' in pre_processed_images['meta']: meta['cur_dets'] = pre_processed_images['meta']['cur_dets'] images = images.to(self.opt.device, non_blocking=self.opt.non_block_test) (pre_hms, pre_inds) = (None, None) if self.opt.tracking: if self.pre_images is None: print('Initialize tracking!') self.pre_images = images self.tracker.init_track(meta['pre_dets'] if 'pre_dets' in meta else []) if self.opt.pre_hm: <DeepExtract> (trans_input, trans_output) = (meta['trans_input'], meta['trans_output']) (inp_width, inp_height) = (meta['inp_width'], meta['inp_height']) (out_width, out_height) = (meta['out_width'], meta['out_height']) input_hm = np.zeros((1, inp_height, inp_width), dtype=np.float32) output_inds = [] for det in self.tracker.tracks: if det['score'] < self.opt.pre_thresh or det['active'] == 0: continue bbox = self._trans_bbox(det['bbox'], trans_input, inp_width, inp_height) bbox_out = self._trans_bbox(det['bbox'], trans_output, out_width, out_height) (h, w) = (bbox[3] - bbox[1], bbox[2] - bbox[0]) if h > 0 and w > 0: radius = gaussian_radius((math.ceil(h), math.ceil(w))) radius = max(0, int(radius)) ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) ct_int = ct.astype(np.int32) if not self.opt.zero_pre_hm: draw_umich_gaussian(input_hm[0], ct_int, radius) ct_out = np.array([(bbox_out[0] + bbox_out[2]) / 2, (bbox_out[1] + bbox_out[3]) / 2], dtype=np.int32) output_inds.append(ct_out[1] * out_width + ct_out[0]) if not self.opt.zero_pre_hm: input_hm = input_hm[np.newaxis] if self.opt.flip_test: input_hm = np.concatenate((input_hm, input_hm[:, :, :, ::-1]), axis=0) input_hm = torch.from_numpy(input_hm).to(self.opt.device) output_inds = np.array(output_inds, np.int64).reshape(1, -1) output_inds = torch.from_numpy(output_inds).to(self.opt.device) (pre_hms, pre_inds) = (input_hm, output_inds) </DeepExtract> pre_process_time = time.time() pre_time += pre_process_time - scale_start_time <DeepExtract> with torch.no_grad(): if self.opt.gpus[0] >= 0: torch.cuda.synchronize() output = self.model(images, self.pre_images, pre_hms)[-1] output = self._sigmoid_output(output) output.update({'pre_inds': pre_inds}) if self.opt.flip_test: output = self._flip_output(output) if self.opt.gpus[0] >= 0: torch.cuda.synchronize() forward_time = time.time() dets = generic_decode(output, K=self.opt.K, opt=self.opt) if self.opt.gpus[0] >= 0: torch.cuda.synchronize() for k in dets: dets[k] = dets[k].detach().cpu().numpy() if True: (output, dets, forward_time) = (output, dets, forward_time) else: (output, dets, forward_time) = (output, dets) </DeepExtract> net_time += forward_time - pre_process_time decode_time = time.time() dec_time += decode_time - forward_time <DeepExtract> dets = generic_post_process(self.opt, dets, [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.opt.num_classes, [meta['calib']], meta['height'], meta['width']) self.this_calib = meta['calib'] if scale != 1: for i in range(len(dets[0])): for k in ['bbox', 'hps']: if k in dets[0][i]: dets[0][i][k] = (np.array(dets[0][i][k], np.float32) / scale).tolist() result = dets[0] </DeepExtract> post_process_time = time.time() post_time += post_process_time - decode_time detections.append(result) if self.opt.debug >= 2: <DeepExtract> img = images[0].detach().cpu().numpy().transpose(1, 2, 0) img = np.clip((img * self.std + self.mean) * 255.0, 0, 255).astype(np.uint8) pred = self.debugger.gen_colormap(output['hm'][0].detach().cpu().numpy()) self.debugger.add_blend_img(img, pred, 'pred_hm') if 'hm_hp' in output: pred = self.debugger.gen_colormap_hp(output['hm_hp'][0].detach().cpu().numpy()) self.debugger.add_blend_img(img, pred, 'pred_hmhp') if self.pre_images if not self.opt.no_pre_img else None is not None: pre_img = self.pre_images if not self.opt.no_pre_img else None[0].detach().cpu().numpy().transpose(1, 2, 0) pre_img = np.clip((pre_img * self.std + self.mean) * 255.0, 0, 255).astype(np.uint8) self.debugger.add_img(pre_img, 'pre_img') if pre_hms is not None: pre_hm = self.debugger.gen_colormap(pre_hms[0].detach().cpu().numpy()) self.debugger.add_blend_img(pre_img, pre_hm, 'pre_hm') </DeepExtract> <DeepExtract> assert len(self.opt.test_scales) == 1, 'multi_scale not supported!' results = [] for i in range(len(detections[0])): if detections[0][i]['score'] > self.opt.out_thresh: results.append(detections[0][i]) results = results </DeepExtract> if self.opt.gpus[0] >= 0: torch.cuda.synchronize() end_time = time.time() merge_time += end_time - post_process_time if self.opt.tracking: public_det = meta['cur_dets'] if self.opt.public_det else None results = self.tracker.step(results, public_det) self.pre_images = images tracking_time = time.time() track_time += tracking_time - end_time tot_time += tracking_time - start_time if self.opt.debug >= 1: <DeepExtract> self.debugger.add_img(image, img_id='generic') for j in range(len(results)): if results[j]['score'] > self.opt.vis_thresh: if 'active' in results[j] and results[j]['active'] == 0: continue item = results[j] if 'bbox' in item: sc = item['score'] if self.opt.demo == '' or not 'tracking_id' in item else item['tracking_id'] sc = item['tracking_id'] if self.opt.show_track_color else sc self.debugger.add_coco_bbox(item['bbox'], item['class'] - 1, sc, img_id='generic') if 'tracking' in item: self.debugger.add_arrow(item['ct'], item['tracking'], img_id='generic') tracking_id = item['tracking_id'] if 'tracking_id' in item else -1 if 'tracking_id' in item and self.opt.demo == '' and (not self.opt.show_track_color): self.debugger.add_tracking_id(item['ct'], item['tracking_id'], img_id='generic') if item['class'] in [1, 2] and 'hps' in item: self.debugger.add_coco_hp(item['hps'], tracking_id=tracking_id, img_id='generic') if len(results) > 0 and 'dep' in results[0] and ('alpha' in results[0]) and ('dim' in results[0]): self.debugger.add_3d_detection(image if not self.opt.qualitative else cv2.resize(self.debugger.imgs['pred_hm'], (image.shape[1], image.shape[0])), False, results, self.this_calib, vis_thresh=self.opt.vis_thresh, img_id='ddd_pred') self.debugger.add_bird_view(results, vis_thresh=self.opt.vis_thresh, img_id='bird_pred', cnt=self.cnt) if self.opt.show_track_color and self.opt.debug == 4: del self.debugger.imgs['generic'], self.debugger.imgs['bird_pred'] if 'ddd_pred' in self.debugger.imgs: self.debugger.imgs['generic'] = self.debugger.imgs['ddd_pred'] if self.opt.debug == 4: self.debugger.save_all_imgs(self.opt.debug_dir, prefix='{}'.format(self.cnt)) else: self.debugger.show_all_imgs(pause=self.pause) </DeepExtract> self.cnt += 1 show_results_time = time.time() display_time += show_results_time - end_time ret = {'results': results, 'tot': tot_time, 'load': load_time, 'pre': pre_time, 'net': net_time, 'dec': dec_time, 'post': post_time, 'merge': merge_time, 'track': track_time, 'display': display_time} if self.opt.save_video: try: ret.update({'generic': self.debugger.imgs['generic']}) except: pass return ret
def run(self, image_or_path_or_tensor, meta={}): (load_time, pre_time, net_time, dec_time, post_time) = (0, 0, 0, 0, 0) (merge_time, track_time, tot_time, display_time) = (0, 0, 0, 0) self.debugger.clear() start_time = time.time() pre_processed = False if isinstance(image_or_path_or_tensor, np.ndarray): image = image_or_path_or_tensor elif type(image_or_path_or_tensor) == type(''): image = cv2.imread(image_or_path_or_tensor) else: image = image_or_path_or_tensor['image'][0].numpy() pre_processed_images = image_or_path_or_tensor pre_processed = True loaded_time = time.time() load_time += loaded_time - start_time detections = [] for scale in self.opt.test_scales: scale_start_time = time.time() if not pre_processed: (resized_image, c, s, inp_width, inp_height, height, width) = self._transform_scale(image) trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height]) out_height = inp_height // self.opt.down_ratio out_width = inp_width // self.opt.down_ratio trans_output = get_affine_transform(c, s, 0, [out_width, out_height]) inp_image = cv2.warpAffine(resized_image, trans_input, (inp_width, inp_height), flags=cv2.INTER_LINEAR) inp_image = ((inp_image / 255.0 - self.mean) / self.std).astype(np.float32) images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width) if self.opt.flip_test: images = np.concatenate((images, images[:, :, :, ::-1]), axis=0) images = torch.from_numpy(images) meta = {'calib': np.array(meta['calib'], dtype=np.float32) if 'calib' in meta else self._get_default_calib(width, height)} meta.update({'c': c, 's': s, 'height': height, 'width': width, 'out_height': out_height, 'out_width': out_width, 'inp_height': inp_height, 'inp_width': inp_width, 'trans_input': trans_input, 'trans_output': trans_output}) if 'pre_dets' in meta: meta['pre_dets'] = meta['pre_dets'] if 'cur_dets' in meta: meta['cur_dets'] = meta['cur_dets'] (images, meta) = (images, meta) else: images = pre_processed_images['images'][scale][0] meta = pre_processed_images['meta'][scale] meta = {k: v.numpy()[0] for (k, v) in meta.items()} if 'pre_dets' in pre_processed_images['meta']: meta['pre_dets'] = pre_processed_images['meta']['pre_dets'] if 'cur_dets' in pre_processed_images['meta']: meta['cur_dets'] = pre_processed_images['meta']['cur_dets'] images = images.to(self.opt.device, non_blocking=self.opt.non_block_test) (pre_hms, pre_inds) = (None, None) if self.opt.tracking: if self.pre_images is None: print('Initialize tracking!') self.pre_images = images self.tracker.init_track(meta['pre_dets'] if 'pre_dets' in meta else []) if self.opt.pre_hm: (trans_input, trans_output) = (meta['trans_input'], meta['trans_output']) (inp_width, inp_height) = (meta['inp_width'], meta['inp_height']) (out_width, out_height) = (meta['out_width'], meta['out_height']) input_hm = np.zeros((1, inp_height, inp_width), dtype=np.float32) output_inds = [] for det in self.tracker.tracks: if det['score'] < self.opt.pre_thresh or det['active'] == 0: continue bbox = self._trans_bbox(det['bbox'], trans_input, inp_width, inp_height) bbox_out = self._trans_bbox(det['bbox'], trans_output, out_width, out_height) (h, w) = (bbox[3] - bbox[1], bbox[2] - bbox[0]) if h > 0 and w > 0: radius = gaussian_radius((math.ceil(h), math.ceil(w))) radius = max(0, int(radius)) ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32) ct_int = ct.astype(np.int32) if not self.opt.zero_pre_hm: draw_umich_gaussian(input_hm[0], ct_int, radius) ct_out = np.array([(bbox_out[0] + bbox_out[2]) / 2, (bbox_out[1] + bbox_out[3]) / 2], dtype=np.int32) output_inds.append(ct_out[1] * out_width + ct_out[0]) if not self.opt.zero_pre_hm: input_hm = input_hm[np.newaxis] if self.opt.flip_test: input_hm = np.concatenate((input_hm, input_hm[:, :, :, ::-1]), axis=0) input_hm = torch.from_numpy(input_hm).to(self.opt.device) output_inds = np.array(output_inds, np.int64).reshape(1, -1) output_inds = torch.from_numpy(output_inds).to(self.opt.device) (pre_hms, pre_inds) = (input_hm, output_inds) pre_process_time = time.time() pre_time += pre_process_time - scale_start_time with torch.no_grad(): if self.opt.gpus[0] >= 0: torch.cuda.synchronize() output = self.model(images, self.pre_images, pre_hms)[-1] output = self._sigmoid_output(output) output.update({'pre_inds': pre_inds}) if self.opt.flip_test: output = self._flip_output(output) if self.opt.gpus[0] >= 0: torch.cuda.synchronize() forward_time = time.time() dets = generic_decode(output, K=self.opt.K, opt=self.opt) if self.opt.gpus[0] >= 0: torch.cuda.synchronize() for k in dets: dets[k] = dets[k].detach().cpu().numpy() if True: (output, dets, forward_time) = (output, dets, forward_time) else: (output, dets, forward_time) = (output, dets) net_time += forward_time - pre_process_time decode_time = time.time() dec_time += decode_time - forward_time dets = generic_post_process(self.opt, dets, [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.opt.num_classes, [meta['calib']], meta['height'], meta['width']) self.this_calib = meta['calib'] if scale != 1: for i in range(len(dets[0])): for k in ['bbox', 'hps']: if k in dets[0][i]: dets[0][i][k] = (np.array(dets[0][i][k], np.float32) / scale).tolist() result = dets[0] post_process_time = time.time() post_time += post_process_time - decode_time detections.append(result) if self.opt.debug >= 2: img = images[0].detach().cpu().numpy().transpose(1, 2, 0) img = np.clip((img * self.std + self.mean) * 255.0, 0, 255).astype(np.uint8) pred = self.debugger.gen_colormap(output['hm'][0].detach().cpu().numpy()) self.debugger.add_blend_img(img, pred, 'pred_hm') if 'hm_hp' in output: pred = self.debugger.gen_colormap_hp(output['hm_hp'][0].detach().cpu().numpy()) self.debugger.add_blend_img(img, pred, 'pred_hmhp') if self.pre_images if not self.opt.no_pre_img else None is not None: pre_img = self.pre_images if not self.opt.no_pre_img else None[0].detach().cpu().numpy().transpose(1, 2, 0) pre_img = np.clip((pre_img * self.std + self.mean) * 255.0, 0, 255).astype(np.uint8) self.debugger.add_img(pre_img, 'pre_img') if pre_hms is not None: pre_hm = self.debugger.gen_colormap(pre_hms[0].detach().cpu().numpy()) self.debugger.add_blend_img(pre_img, pre_hm, 'pre_hm') assert len(self.opt.test_scales) == 1, 'multi_scale not supported!' results = [] for i in range(len(detections[0])): if detections[0][i]['score'] > self.opt.out_thresh: results.append(detections[0][i]) results = results if self.opt.gpus[0] >= 0: torch.cuda.synchronize() end_time = time.time() merge_time += end_time - post_process_time if self.opt.tracking: public_det = meta['cur_dets'] if self.opt.public_det else None results = self.tracker.step(results, public_det) self.pre_images = images tracking_time = time.time() track_time += tracking_time - end_time tot_time += tracking_time - start_time if self.opt.debug >= 1: self.debugger.add_img(image, img_id='generic') for j in range(len(results)): if results[j]['score'] > self.opt.vis_thresh: if 'active' in results[j] and results[j]['active'] == 0: continue item = results[j] if 'bbox' in item: sc = item['score'] if self.opt.demo == '' or not 'tracking_id' in item else item['tracking_id'] sc = item['tracking_id'] if self.opt.show_track_color else sc self.debugger.add_coco_bbox(item['bbox'], item['class'] - 1, sc, img_id='generic') if 'tracking' in item: self.debugger.add_arrow(item['ct'], item['tracking'], img_id='generic') tracking_id = item['tracking_id'] if 'tracking_id' in item else -1 if 'tracking_id' in item and self.opt.demo == '' and (not self.opt.show_track_color): self.debugger.add_tracking_id(item['ct'], item['tracking_id'], img_id='generic') if item['class'] in [1, 2] and 'hps' in item: self.debugger.add_coco_hp(item['hps'], tracking_id=tracking_id, img_id='generic') if len(results) > 0 and 'dep' in results[0] and ('alpha' in results[0]) and ('dim' in results[0]): self.debugger.add_3d_detection(image if not self.opt.qualitative else cv2.resize(self.debugger.imgs['pred_hm'], (image.shape[1], image.shape[0])), False, results, self.this_calib, vis_thresh=self.opt.vis_thresh, img_id='ddd_pred') self.debugger.add_bird_view(results, vis_thresh=self.opt.vis_thresh, img_id='bird_pred', cnt=self.cnt) if self.opt.show_track_color and self.opt.debug == 4: del self.debugger.imgs['generic'], self.debugger.imgs['bird_pred'] if 'ddd_pred' in self.debugger.imgs: self.debugger.imgs['generic'] = self.debugger.imgs['ddd_pred'] if self.opt.debug == 4: self.debugger.save_all_imgs(self.opt.debug_dir, prefix='{}'.format(self.cnt)) else: self.debugger.show_all_imgs(pause=self.pause) self.cnt += 1 show_results_time = time.time() display_time += show_results_time - end_time ret = {'results': results, 'tot': tot_time, 'load': load_time, 'pre': pre_time, 'net': net_time, 'dec': dec_time, 'post': post_time, 'merge': merge_time, 'track': track_time, 'display': display_time} if self.opt.save_video: try: ret.update({'generic': self.debugger.imgs['generic']}) except: pass return ret
CenterTrack-IOU
positive
def correct_coords(wanted_x, wanted_y): <DeepExtract> available_w = self._drawing_area.get_allocated_width() </DeepExtract> <DeepExtract> available_h = self._drawing_area.get_allocated_height() </DeepExtract> if available_w < 2: return <DeepExtract> if self.active_tool().menu_id == 1: if not self.active_tool().apply_to_selection: mpb_width = self.temp_pixbuf.get_width() + 12 mpb_width = self.get_pixbuf_width() </DeepExtract> wanted_x = min(wanted_x, self.get_max_coord(mpb_width, available_w)) wanted_x = max(wanted_x, 0) <DeepExtract> if False: scrollbar = self._v_scrollbar else: scrollbar = self._h_scrollbar scrollbar.set_visible(available_w / self.zoom_level < int(mpb_width)) scrollbar.set_range(0, int(mpb_width)) scrollbar.get_adjustment().set_page_size(available_w / self.zoom_level) scrollbar.set_value(int(wanted_x)) if False: self.scroll_y = int(scrollbar.get_value()) else: self.scroll_x = int(scrollbar.get_value()) </DeepExtract> <DeepExtract> if self.active_tool().menu_id == 1: if not self.active_tool().apply_to_selection: mpb_height = self.temp_pixbuf.get_height() + 12 mpb_height = self.get_pixbuf_height() </DeepExtract> wanted_y = min(wanted_y, self.get_max_coord(mpb_height, available_h)) wanted_y = max(wanted_y, 0) <DeepExtract> if True: scrollbar = self._v_scrollbar else: scrollbar = self._h_scrollbar scrollbar.set_visible(available_h / self.zoom_level < int(mpb_height)) scrollbar.set_range(0, int(mpb_height)) scrollbar.get_adjustment().set_page_size(available_h / self.zoom_level) scrollbar.set_value(int(wanted_y)) if True: self.scroll_y = int(scrollbar.get_value()) else: self.scroll_x = int(scrollbar.get_value()) </DeepExtract>
def correct_coords(wanted_x, wanted_y): available_w = self._drawing_area.get_allocated_width() available_h = self._drawing_area.get_allocated_height() if available_w < 2: return if self.active_tool().menu_id == 1: if not self.active_tool().apply_to_selection: mpb_width = self.temp_pixbuf.get_width() + 12 mpb_width = self.get_pixbuf_width() wanted_x = min(wanted_x, self.get_max_coord(mpb_width, available_w)) wanted_x = max(wanted_x, 0) if False: scrollbar = self._v_scrollbar else: scrollbar = self._h_scrollbar scrollbar.set_visible(available_w / self.zoom_level < int(mpb_width)) scrollbar.set_range(0, int(mpb_width)) scrollbar.get_adjustment().set_page_size(available_w / self.zoom_level) scrollbar.set_value(int(wanted_x)) if False: self.scroll_y = int(scrollbar.get_value()) else: self.scroll_x = int(scrollbar.get_value()) if self.active_tool().menu_id == 1: if not self.active_tool().apply_to_selection: mpb_height = self.temp_pixbuf.get_height() + 12 mpb_height = self.get_pixbuf_height() wanted_y = min(wanted_y, self.get_max_coord(mpb_height, available_h)) wanted_y = max(wanted_y, 0) if True: scrollbar = self._v_scrollbar else: scrollbar = self._h_scrollbar scrollbar.set_visible(available_h / self.zoom_level < int(mpb_height)) scrollbar.set_range(0, int(mpb_height)) scrollbar.get_adjustment().set_page_size(available_h / self.zoom_level) scrollbar.set_value(int(wanted_y)) if True: self.scroll_y = int(scrollbar.get_value()) else: self.scroll_x = int(scrollbar.get_value()) </DeepExtract>
drawing
positive
def InputEvent(self, msg): self.widgets.process_queue() result = self.root.input_event(msg) self.widgets.process_queue() <DeepExtract> self.widgets.update(self.root) </DeepExtract> return result
def InputEvent(self, msg): self.widgets.process_queue() result = self.root.input_event(msg) self.widgets.process_queue() self.widgets.update(self.root) return result
c4ddev
positive
def get_payload(self, shape, sync_state=None, additional_fields=None): sync_folder_hierarchy = create_element('m:%s' % self.SERVICE_NAME) foldershape = create_element('m:FolderShape') add_xml_child(foldershape, 't:BaseShape', shape) <DeepExtract> if not additional_fields: return from .fields import FieldPath additional_field_paths = [] for field in additional_fields: if isinstance(field, FieldPath): field_path = field else: field_path = FieldPath(field=field) additional_field_paths.append(field_path) def consistent_key(field_path): known_attrs = ['field_uri', 'value_cls.FIELDS[0].default'] key = [field_path.path] for attr in known_attrs: val = getattr(field_path.field, attr, None) if val is not None: key.append(val) return key additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_field_paths)) set_xml_value(additional_properties, sorted(expanded_fields, key=consistent_key), self.account.version) foldershape.append(additional_properties) </DeepExtract> sync_folder_hierarchy.append(foldershape) if sync_state is not None: syncstate = create_element('m:SyncState') syncstate.text = sync_state sync_folder_hierarchy.append(syncstate) return sync_folder_hierarchy
def get_payload(self, shape, sync_state=None, additional_fields=None): sync_folder_hierarchy = create_element('m:%s' % self.SERVICE_NAME) foldershape = create_element('m:FolderShape') add_xml_child(foldershape, 't:BaseShape', shape) if not additional_fields: return from .fields import FieldPath additional_field_paths = [] for field in additional_fields: if isinstance(field, FieldPath): field_path = field else: field_path = FieldPath(field=field) additional_field_paths.append(field_path) def consistent_key(field_path): known_attrs = ['field_uri', 'value_cls.FIELDS[0].default'] key = [field_path.path] for attr in known_attrs: val = getattr(field_path.field, attr, None) if val is not None: key.append(val) return key additional_properties = create_element('t:AdditionalProperties') expanded_fields = chain(*(f.expand(version=self.account.version) for f in additional_field_paths)) set_xml_value(additional_properties, sorted(expanded_fields, key=consistent_key), self.account.version) foldershape.append(additional_properties) sync_folder_hierarchy.append(foldershape) if sync_state is not None: syncstate = create_element('m:SyncState') syncstate.text = sync_state sync_folder_hierarchy.append(syncstate) return sync_folder_hierarchy
exchangelib
positive
def write_docs_index(api_version: str) -> None: path = Path.cwd() / 'docs' / 'index.rst' content = path.read_text() <DeepExtract> result = API_VERSION_BADGE.sub(f'\\g<1>{api_version}\\g<2>', content) content = result </DeepExtract> print(f'Write {path}') path.write_text(content)
def write_docs_index(api_version: str) -> None: path = Path.cwd() / 'docs' / 'index.rst' content = path.read_text() result = API_VERSION_BADGE.sub(f'\\g<1>{api_version}\\g<2>', content) content = result print(f'Write {path}') path.write_text(content)
aiogram
positive
def get_centos_docker_mirrors(image): <DeepExtract> rmi = 'localhost:5000/mirror-packages' rep = 'centos-repo' ver = self.get_centos_latest_version(onlyversion(image)) main = self.docker_mirror(rmi, rep, ver, 'mirrorlist.centos.org') </DeepExtract> return [main]
def get_centos_docker_mirrors(image): rmi = 'localhost:5000/mirror-packages' rep = 'centos-repo' ver = self.get_centos_latest_version(onlyversion(image)) main = self.docker_mirror(rmi, rep, ver, 'mirrorlist.centos.org') return [main]
docker-systemctl-images
positive
def run(self): try: if self.s3_collection_enabled: <DeepExtract> self.log.debug('Updating S3Buckets for {}'.format(self.account.account_name)) s3 = self.session.resource('s3') s3c = self.session.client('s3') try: existing_buckets = S3Bucket.get_all(self.account) buckets = {bucket.name: bucket for bucket in s3.buckets.all()} for data in buckets.values(): try: bucket_region = s3c.get_bucket_location(Bucket=data.name)['LocationConstraint'] if not bucket_region: bucket_region = 'us-east-1' except ClientError as e: self.log.info('Could not get bucket location..bucket possibly removed / {}'.format(e)) bucket_region = 'unavailable' try: bucket_policy = data.Policy().policy except ClientError as e: if e.response['Error']['Code'] == 'NoSuchBucketPolicy': bucket_policy = None else: self.log.info('There was a problem collecting bucket policy for bucket {} on account {}, {}'.format(data.name, self.account, e.response)) bucket_policy = 'cinq cannot poll' try: website_enabled = 'Enabled' if data.Website().index_document else 'Disabled' except ClientError as e: if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': website_enabled = 'Disabled' else: self.log.info('There was a problem collecting website config for bucket {} on account {}'.format(data.name, self.account)) website_enabled = 'cinq cannot poll' try: tags = {t['Key']: t['Value'] for t in data.Tagging().tag_set} except ClientError: tags = {} try: bucket_size = self._get_bucket_statistics(data.name, bucket_region, 'StandardStorage', 'BucketSizeBytes', 3) bucket_obj_count = self._get_bucket_statistics(data.name, bucket_region, 'AllStorageTypes', 'NumberOfObjects', 3) metrics = {'size': bucket_size, 'object_count': bucket_obj_count} except Exception as e: self.log.info('Could not retrieve bucket statistics / {}'.format(e)) metrics = {'found': False} properties = {'bucket_policy': bucket_policy, 'creation_date': data.creation_date, 'location': bucket_region, 'website_enabled': website_enabled, 'metrics': metrics, 'tags': tags} if data.name in existing_buckets: bucket = existing_buckets[data.name] if bucket.update(data, properties): self.log.debug('Change detected for S3Bucket {}/{}'.format(self.account.account_name, bucket.id)) bucket.save() else: S3Bucket.create(data.name, account_id=self.account.account_id, properties=properties, location=bucket_region, tags=tags) self.log.debug('Added new S3Bucket {}/{}'.format(self.account.account_name, data.name)) db.session.commit() bk = set(list(buckets.keys())) ebk = set(list(existing_buckets.keys())) try: for resource_id in ebk - bk: db.session.delete(existing_buckets[resource_id].resource) self.log.debug('Deleted S3Bucket {}/{}'.format(self.account.account_name, resource_id)) db.session.commit() except Exception as e: self.log.error('Could not update the current S3Bucket list for account {}/{}'.format(self.account.account_name, e)) db.session.rollback() finally: del s3, s3c </DeepExtract> if self.cloudfront_collection_enabled: <DeepExtract> self.log.debug('Updating CloudFront distributions for {}'.format(self.account.account_name)) cfr = self.session.client('cloudfront') try: existing_dists = CloudFrontDist.get_all(self.account, None) dists = [] done = False marker = None while not done: if marker: response = cfr.list_distributions(Marker=marker) else: response = cfr.list_distributions() dl = response['DistributionList'] if dl['IsTruncated']: marker = dl['NextMarker'] else: done = True if 'Items' in dl: for dist in dl['Items']: origins = [] for origin in dist['Origins']['Items']: if 'S3OriginConfig' in origin: origins.append({'type': 's3', 'source': origin['DomainName']}) elif 'CustomOriginConfig' in origin: origins.append({'type': 'custom-http', 'source': origin['DomainName']}) data = {'arn': dist['ARN'], 'name': dist['DomainName'], 'origins': origins, 'enabled': dist['Enabled'], 'type': 'web', 'tags': self.__get_distribution_tags(cfr, dist['ARN'])} dists.append(data) done = False marker = None while not done: if marker: response = cfr.list_streaming_distributions(Marker=marker) else: response = cfr.list_streaming_distributions() dl = response['StreamingDistributionList'] if dl['IsTruncated']: marker = dl['NextMarker'] else: done = True if 'Items' in dl: dists += [{'arn': x['ARN'], 'name': x['DomainName'], 'origins': [{'type': 's3', 'source': x['S3Origin']['DomainName']}], 'enabled': x['Enabled'], 'type': 'rtmp', 'tags': self.__get_distribution_tags(cfr, x['ARN'])} for x in dl['Items']] for data in dists: if data['arn'] in existing_dists: dist = existing_dists[data['arn']] if dist.update(data): self.log.debug('Updated CloudFrontDist {}/{}'.format(self.account.account_name, data['name'])) dist.save() else: properties = {'domain_name': data['name'], 'origins': data['origins'], 'enabled': data['enabled'], 'type': data['type']} CloudFrontDist.create(data['arn'], account_id=self.account.account_id, properties=properties, tags=data['tags']) self.log.debug('Added new CloudFrontDist {}/{}'.format(self.account.account_name, data['name'])) db.session.commit() dk = set((x['arn'] for x in dists)) edk = set(existing_dists.keys()) try: for resource_id in edk - dk: db.session.delete(existing_dists[resource_id].resource) self.log.debug('Deleted CloudFrontDist {}/{}'.format(resource_id, self.account.account_name)) db.session.commit() except: db.session.rollback() finally: del cfr </DeepExtract> if self.route53_collection_enabled: <DeepExtract> self.log.debug('Updating Route53 information for {}'.format(self.account)) existing_zones = DNSZone.get_all(self.account) zones = self.__fetch_route53_zones() for (resource_id, data) in zones.items(): if resource_id in existing_zones: zone = DNSZone.get(resource_id) if zone.update(data): self.log.debug('Change detected for Route53 zone {}/{}'.format(self.account, zone.name)) zone.save() else: tags = data.pop('tags') DNSZone.create(resource_id, account_id=self.account.account_id, properties=data, tags=tags) self.log.debug('Added Route53 zone {}/{}'.format(self.account, data['name'])) db.session.commit() zk = set(zones.keys()) ezk = set(existing_zones.keys()) for resource_id in ezk - zk: zone = existing_zones[resource_id] db.session.delete(zone.resource) self.log.debug('Deleted Route53 zone {}/{}'.format(self.account.account_name, zone.name.value)) db.session.commit() try: for (zone_id, zone) in DNSZone.get_all(self.account).items(): existing_records = {rec.id: rec for rec in zone.records} records = self.__fetch_route53_zone_records(zone.get_property('zone_id').value) for data in records: if data['id'] in existing_records: record = existing_records[data['id']] if record.update(data): self.log.debug('Changed detected for DNSRecord {}/{}/{}'.format(self.account, zone.name, data['name'])) record.save() else: record = DNSRecord.create(data['id'], account_id=self.account.account_id, properties={k: v for (k, v) in data.items() if k != 'id'}, tags={}) self.log.debug('Added new DNSRecord {}/{}/{}'.format(self.account, zone.name, data['name'])) zone.add_record(record) db.session.commit() rk = set((x['id'] for x in records)) erk = set(existing_records.keys()) for resource_id in erk - rk: record = existing_records[resource_id] zone.delete_record(record) self.log.debug('Deleted Route53 record {}/{}/{}'.format(self.account.account_name, zone_id, record.name)) db.session.commit() except: raise </DeepExtract> except Exception as ex: self.log.exception(ex) raise finally: del self.session
def run(self): try: if self.s3_collection_enabled: self.log.debug('Updating S3Buckets for {}'.format(self.account.account_name)) s3 = self.session.resource('s3') s3c = self.session.client('s3') try: existing_buckets = S3Bucket.get_all(self.account) buckets = {bucket.name: bucket for bucket in s3.buckets.all()} for data in buckets.values(): try: bucket_region = s3c.get_bucket_location(Bucket=data.name)['LocationConstraint'] if not bucket_region: bucket_region = 'us-east-1' except ClientError as e: self.log.info('Could not get bucket location..bucket possibly removed / {}'.format(e)) bucket_region = 'unavailable' try: bucket_policy = data.Policy().policy except ClientError as e: if e.response['Error']['Code'] == 'NoSuchBucketPolicy': bucket_policy = None else: self.log.info('There was a problem collecting bucket policy for bucket {} on account {}, {}'.format(data.name, self.account, e.response)) bucket_policy = 'cinq cannot poll' try: website_enabled = 'Enabled' if data.Website().index_document else 'Disabled' except ClientError as e: if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration': website_enabled = 'Disabled' else: self.log.info('There was a problem collecting website config for bucket {} on account {}'.format(data.name, self.account)) website_enabled = 'cinq cannot poll' try: tags = {t['Key']: t['Value'] for t in data.Tagging().tag_set} except ClientError: tags = {} try: bucket_size = self._get_bucket_statistics(data.name, bucket_region, 'StandardStorage', 'BucketSizeBytes', 3) bucket_obj_count = self._get_bucket_statistics(data.name, bucket_region, 'AllStorageTypes', 'NumberOfObjects', 3) metrics = {'size': bucket_size, 'object_count': bucket_obj_count} except Exception as e: self.log.info('Could not retrieve bucket statistics / {}'.format(e)) metrics = {'found': False} properties = {'bucket_policy': bucket_policy, 'creation_date': data.creation_date, 'location': bucket_region, 'website_enabled': website_enabled, 'metrics': metrics, 'tags': tags} if data.name in existing_buckets: bucket = existing_buckets[data.name] if bucket.update(data, properties): self.log.debug('Change detected for S3Bucket {}/{}'.format(self.account.account_name, bucket.id)) bucket.save() else: S3Bucket.create(data.name, account_id=self.account.account_id, properties=properties, location=bucket_region, tags=tags) self.log.debug('Added new S3Bucket {}/{}'.format(self.account.account_name, data.name)) db.session.commit() bk = set(list(buckets.keys())) ebk = set(list(existing_buckets.keys())) try: for resource_id in ebk - bk: db.session.delete(existing_buckets[resource_id].resource) self.log.debug('Deleted S3Bucket {}/{}'.format(self.account.account_name, resource_id)) db.session.commit() except Exception as e: self.log.error('Could not update the current S3Bucket list for account {}/{}'.format(self.account.account_name, e)) db.session.rollback() finally: del s3, s3c if self.cloudfront_collection_enabled: self.log.debug('Updating CloudFront distributions for {}'.format(self.account.account_name)) cfr = self.session.client('cloudfront') try: existing_dists = CloudFrontDist.get_all(self.account, None) dists = [] done = False marker = None while not done: if marker: response = cfr.list_distributions(Marker=marker) else: response = cfr.list_distributions() dl = response['DistributionList'] if dl['IsTruncated']: marker = dl['NextMarker'] else: done = True if 'Items' in dl: for dist in dl['Items']: origins = [] for origin in dist['Origins']['Items']: if 'S3OriginConfig' in origin: origins.append({'type': 's3', 'source': origin['DomainName']}) elif 'CustomOriginConfig' in origin: origins.append({'type': 'custom-http', 'source': origin['DomainName']}) data = {'arn': dist['ARN'], 'name': dist['DomainName'], 'origins': origins, 'enabled': dist['Enabled'], 'type': 'web', 'tags': self.__get_distribution_tags(cfr, dist['ARN'])} dists.append(data) done = False marker = None while not done: if marker: response = cfr.list_streaming_distributions(Marker=marker) else: response = cfr.list_streaming_distributions() dl = response['StreamingDistributionList'] if dl['IsTruncated']: marker = dl['NextMarker'] else: done = True if 'Items' in dl: dists += [{'arn': x['ARN'], 'name': x['DomainName'], 'origins': [{'type': 's3', 'source': x['S3Origin']['DomainName']}], 'enabled': x['Enabled'], 'type': 'rtmp', 'tags': self.__get_distribution_tags(cfr, x['ARN'])} for x in dl['Items']] for data in dists: if data['arn'] in existing_dists: dist = existing_dists[data['arn']] if dist.update(data): self.log.debug('Updated CloudFrontDist {}/{}'.format(self.account.account_name, data['name'])) dist.save() else: properties = {'domain_name': data['name'], 'origins': data['origins'], 'enabled': data['enabled'], 'type': data['type']} CloudFrontDist.create(data['arn'], account_id=self.account.account_id, properties=properties, tags=data['tags']) self.log.debug('Added new CloudFrontDist {}/{}'.format(self.account.account_name, data['name'])) db.session.commit() dk = set((x['arn'] for x in dists)) edk = set(existing_dists.keys()) try: for resource_id in edk - dk: db.session.delete(existing_dists[resource_id].resource) self.log.debug('Deleted CloudFrontDist {}/{}'.format(resource_id, self.account.account_name)) db.session.commit() except: db.session.rollback() finally: del cfr if self.route53_collection_enabled: self.log.debug('Updating Route53 information for {}'.format(self.account)) existing_zones = DNSZone.get_all(self.account) zones = self.__fetch_route53_zones() for (resource_id, data) in zones.items(): if resource_id in existing_zones: zone = DNSZone.get(resource_id) if zone.update(data): self.log.debug('Change detected for Route53 zone {}/{}'.format(self.account, zone.name)) zone.save() else: tags = data.pop('tags') DNSZone.create(resource_id, account_id=self.account.account_id, properties=data, tags=tags) self.log.debug('Added Route53 zone {}/{}'.format(self.account, data['name'])) db.session.commit() zk = set(zones.keys()) ezk = set(existing_zones.keys()) for resource_id in ezk - zk: zone = existing_zones[resource_id] db.session.delete(zone.resource) self.log.debug('Deleted Route53 zone {}/{}'.format(self.account.account_name, zone.name.value)) db.session.commit() try: for (zone_id, zone) in DNSZone.get_all(self.account).items(): existing_records = {rec.id: rec for rec in zone.records} records = self.__fetch_route53_zone_records(zone.get_property('zone_id').value) for data in records: if data['id'] in existing_records: record = existing_records[data['id']] if record.update(data): self.log.debug('Changed detected for DNSRecord {}/{}/{}'.format(self.account, zone.name, data['name'])) record.save() else: record = DNSRecord.create(data['id'], account_id=self.account.account_id, properties={k: v for (k, v) in data.items() if k != 'id'}, tags={}) self.log.debug('Added new DNSRecord {}/{}/{}'.format(self.account, zone.name, data['name'])) zone.add_record(record) db.session.commit() rk = set((x['id'] for x in records)) erk = set(existing_records.keys()) for resource_id in erk - rk: record = existing_records[resource_id] zone.delete_record(record) self.log.debug('Deleted Route53 record {}/{}/{}'.format(self.account.account_name, zone_id, record.name)) db.session.commit() except: raise except Exception as ex: self.log.exception(ex) raise finally: del self.session
cloud-inquisitor
positive
def test_must_execute_actions_in_sequence(self): <DeepExtract> self.validator_mock = Mock() self.validator_mock.validate = Mock() self.validator_mock.validate.return_value = '/usr/bin/binary' self.resolver_mock = Mock() self.resolver_mock.exec_paths = ['/usr/bin/binary'] self.binaries_mock = Mock() self.binaries_mock.return_value = [] self.work.get_validators = lambda : self.validator_mock self.work.get_resolvers = lambda : self.resolver_mock self.work.binaries = {'binary': BinaryPath(resolver=self.resolver_mock, validator=self.validator_mock, binary='binary')} </DeepExtract> action_mock = Mock() self.work.actions = [action_mock.action1, action_mock.action2, action_mock.action3] self.work.run() self.assertEqual(action_mock.method_calls, [call.action1.execute(), call.action2.execute(), call.action3.execute()]) self.assertTrue(self.validator_mock.validate.call_count, 1)
def test_must_execute_actions_in_sequence(self): self.validator_mock = Mock() self.validator_mock.validate = Mock() self.validator_mock.validate.return_value = '/usr/bin/binary' self.resolver_mock = Mock() self.resolver_mock.exec_paths = ['/usr/bin/binary'] self.binaries_mock = Mock() self.binaries_mock.return_value = [] self.work.get_validators = lambda : self.validator_mock self.work.get_resolvers = lambda : self.resolver_mock self.work.binaries = {'binary': BinaryPath(resolver=self.resolver_mock, validator=self.validator_mock, binary='binary')} action_mock = Mock() self.work.actions = [action_mock.action1, action_mock.action2, action_mock.action3] self.work.run() self.assertEqual(action_mock.method_calls, [call.action1.execute(), call.action2.execute(), call.action3.execute()]) self.assertTrue(self.validator_mock.validate.call_count, 1)
aws-lambda-builders
positive