before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def test_custom_logger_is_used_to_log_error(schema, mocker):
logging_mock = mocker.patch('ariadne.logger.logging')
app = GraphQL(schema, logger='custom')
<DeepExtract>
data = json.dumps({'query': '{ error }'})
app({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': DATA_TYPE_JSON, 'CONTENT_LENGTH': len(data), 'wsgi.input': BytesIO(data.encode('utf-8'))}, Mock())
</DeepExtract>
logging_mock.getLogger.assert_called_once_with('custom')
|
def test_custom_logger_is_used_to_log_error(schema, mocker):
logging_mock = mocker.patch('ariadne.logger.logging')
app = GraphQL(schema, logger='custom')
data = json.dumps({'query': '{ error }'})
app({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': DATA_TYPE_JSON, 'CONTENT_LENGTH': len(data), 'wsgi.input': BytesIO(data.encode('utf-8'))}, Mock())
logging_mock.getLogger.assert_called_once_with('custom')
|
ariadne
|
positive
|
def read_header(header: str, hiermod: str) -> Dict[str, List[var]]:
hier = []
varmap = defaultdict(list)
assert header[-len(ENDDEF):] == ENDDEF, 'expected {} but got {}'.format(ENDDEF, header[-len(ENDDEF):])
for line in header.split('\n'):
if '$var' in line:
(_, _type, size, code, *rest) = line.split()
size = int(size)
name = ''.join(rest[:-1])
name = '{}.{}'.format('.'.join(hier), name)
name = name.replace(hiermod + '.', '', 1)
<DeepExtract>
m = re.match(VAR_EXTRACT_PATTERN, name)
if not m:
name = name
else:
groupdict = m.groupdict()
name = groupdict['name']
idx_high = groupdict['idx_high']
idx_low = groupdict['idx_low']
try:
idx_high = int(idx_high)
idx_low = int(idx_low)
except:
raise RuntimeError('Expecting integers in extract but got {}, {}'.format(idx_high, idx_low))
if idx_low == 0 and idx_high == size - 1:
name = name
else:
name = name
</DeepExtract>
for m in find_extracts(name):
name = name.replace(m, swap_indices(m))
for m in find_mems(name):
assert len(m) == 2
(mem, idx) = m
memsel = mem + idx
name = name.replace(memsel, memacc(mem, idx))
varmap[code].append(var(_type, name, size, '.'.join(hier)))
elif '$scope' in line:
hier.append(line.split()[2])
elif '$upscope' in line:
hier.pop()
return varmap
|
def read_header(header: str, hiermod: str) -> Dict[str, List[var]]:
hier = []
varmap = defaultdict(list)
assert header[-len(ENDDEF):] == ENDDEF, 'expected {} but got {}'.format(ENDDEF, header[-len(ENDDEF):])
for line in header.split('\n'):
if '$var' in line:
(_, _type, size, code, *rest) = line.split()
size = int(size)
name = ''.join(rest[:-1])
name = '{}.{}'.format('.'.join(hier), name)
name = name.replace(hiermod + '.', '', 1)
m = re.match(VAR_EXTRACT_PATTERN, name)
if not m:
name = name
else:
groupdict = m.groupdict()
name = groupdict['name']
idx_high = groupdict['idx_high']
idx_low = groupdict['idx_low']
try:
idx_high = int(idx_high)
idx_low = int(idx_low)
except:
raise RuntimeError('Expecting integers in extract but got {}, {}'.format(idx_high, idx_low))
if idx_low == 0 and idx_high == size - 1:
name = name
else:
name = name
for m in find_extracts(name):
name = name.replace(m, swap_indices(m))
for m in find_mems(name):
assert len(m) == 2
(mem, idx) = m
memsel = mem + idx
name = name.replace(memsel, memacc(mem, idx))
varmap[code].append(var(_type, name, size, '.'.join(hier)))
elif '$scope' in line:
hier.append(line.split()[2])
elif '$upscope' in line:
hier.pop()
return varmap
|
CoSA
|
positive
|
def __init__(self, scope: Construct, id: str, context: 'FoundationContext', ssl_cert_arn: str, **kwargs: Any) -> None:
self.env_name = context.name
self.context = context
self.ssl_cert_arn = ssl_cert_arn
super().__init__(scope, id, **kwargs)
Tags.of(scope=cast(core.IConstruct, self)).add(key='Env', value=f'orbit-{self.env_name}')
self.vpc: ec2.Vpc = self._create_vpc(context)
self.public_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC) if self.vpc.public_subnets else self.vpc.select_subnets(subnet_name='')
self.private_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) if self.vpc.private_subnets else self.vpc.select_subnets(subnet_name='')
if not context.networking.data.internet_accessible:
self.isolated_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.ISOLATED) if self.vpc.isolated_subnets else self.vpc.select_subnets(subnet_name='')
self.nodes_subnets = self.isolated_subnets
else:
self.nodes_subnets = self.private_subnets
self._vpc_security_group = ec2.SecurityGroup(self, 'vpc-sg', vpc=cast(ec2.IVpc, self.vpc), allow_all_outbound=False)
self._vpc_security_group.add_ingress_rule(peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block), connection=ec2.Port.all_tcp())
if not context.networking.data.internet_accessible:
<DeepExtract>
vpc_gateway_endpoints = {'s3': ec2.GatewayVpcEndpointAwsService.S3, 'dynamodb': ec2.GatewayVpcEndpointAwsService.DYNAMODB}
vpc_interface_endpoints = {'cloudwatch_endpoint': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH, 'cloudwatch_logs_endpoint': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS, 'cloudwatch_events': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_EVENTS, 'ecr_docker_endpoint': ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER, 'ecr_endpoint': ec2.InterfaceVpcEndpointAwsService.ECR, 'ec2_endpoint': ec2.InterfaceVpcEndpointAwsService.EC2, 'ecs': ec2.InterfaceVpcEndpointAwsService.ECS, 'ecs_agent': ec2.InterfaceVpcEndpointAwsService.ECS_AGENT, 'ecs_telemetry': ec2.InterfaceVpcEndpointAwsService.ECS_TELEMETRY, 'git_endpoint': ec2.InterfaceVpcEndpointAwsService.CODECOMMIT_GIT, 'ssm_endpoint': ec2.InterfaceVpcEndpointAwsService.SSM, 'ssm_messages_endpoint': ec2.InterfaceVpcEndpointAwsService.SSM_MESSAGES, 'secrets_endpoint': ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER, 'kms_endpoint': ec2.InterfaceVpcEndpointAwsService.KMS, 'sagemaker_endpoint': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_API, 'sagemaker_runtime': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_RUNTIME, 'notebook_endpoint': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_NOTEBOOK, 'athena_endpoint': ec2.InterfaceVpcEndpointAwsService('athena'), 'glue_endpoint': ec2.InterfaceVpcEndpointAwsService('glue'), 'sqs': ec2.InterfaceVpcEndpointAwsService.SQS, 'step_function_endpoint': ec2.InterfaceVpcEndpointAwsService('states'), 'sns_endpoint': ec2.InterfaceVpcEndpointAwsService.SNS, 'kinesis_firehose_endpoint': ec2.InterfaceVpcEndpointAwsService('kinesis-firehose'), 'api_gateway': ec2.InterfaceVpcEndpointAwsService.APIGATEWAY, 'sts_endpoint': ec2.InterfaceVpcEndpointAwsService.STS, 'efs': ec2.InterfaceVpcEndpointAwsService.ELASTIC_FILESYSTEM, 'elb': ec2.InterfaceVpcEndpointAwsService.ELASTIC_LOAD_BALANCING, 'autoscaling': ec2.InterfaceVpcEndpointAwsService('autoscaling'), 'cloudformation_endpoint': ec2.InterfaceVpcEndpointAwsService('cloudformation'), 'codebuild_endpoint': ec2.InterfaceVpcEndpointAwsService('codebuild'), 'emr-containers': ec2.InterfaceVpcEndpointAwsService('emr-containers'), 'databrew': ec2.InterfaceVpcEndpointAwsService('databrew')}
for (name, gateway_vpc_endpoint_service) in vpc_gateway_endpoints.items():
self.vpc.add_gateway_endpoint(id=name, service=gateway_vpc_endpoint_service, subnets=[ec2.SubnetSelection(subnets=self.nodes_subnets.subnets)])
for (name, interface_service) in vpc_interface_endpoints.items():
self.vpc.add_interface_endpoint(id=name, service=interface_service, subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=True, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
self.vpc.add_interface_endpoint(id='code_artifact_repo_endpoint', service=cast(ec2.IInterfaceVpcEndpointService, ec2.InterfaceVpcEndpointAwsService('codeartifact.repositories')), subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=False, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
self.vpc.add_interface_endpoint(id='code_artifact_api_endpoint', service=cast(ec2.IInterfaceVpcEndpointService, ec2.InterfaceVpcEndpointAwsService('codeartifact.api')), subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=False, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
endpoint_url_template = 'com.amazonaws.{}.{}'
ec2.CfnVPCEndpoint(self, 'redshift_endpoint', vpc_endpoint_type='Interface', service_name=endpoint_url_template.format(self.region, 'redshift'), vpc_id=self.vpc.vpc_id, security_group_ids=[self._vpc_security_group.security_group_id], subnet_ids=self.nodes_subnets.subnet_ids, private_dns_enabled=True)
ec2.CfnVPCEndpoint(self, 'lambda_endpoint', vpc_endpoint_type='Interface', service_name=endpoint_url_template.format(self.region, 'lambda'), vpc_id=self.vpc.vpc_id, security_group_ids=[self._vpc_security_group.security_group_id], subnet_ids=self.nodes_subnets.subnet_ids, private_dns_enabled=True)
</DeepExtract>
if context.toolkit.s3_bucket is None:
raise ValueError('context.toolkit_s3_bucket is not defined')
toolkit_s3_bucket_name: str = context.toolkit.s3_bucket
acct: str = core.Aws.ACCOUNT_ID
self.bucket_names: Dict[str, Any] = {'scratch-bucket': f'orbit-f-{self.env_name}-scratch-{acct}-{context.toolkit.deploy_id}', 'toolkit-bucket': toolkit_s3_bucket_name}
<DeepExtract>
administrator_arns: List[str] = []
admin_principals = iam.CompositePrincipal(*[iam.ArnPrincipal(arn) for arn in administrator_arns], iam.ArnPrincipal(f'arn:aws:iam::{self.context.account_id}:root'))
self.env_kms_key: kms.Key = kms.Key(self, id='kms-key', removal_policy=core.RemovalPolicy.RETAIN, enabled=True, enable_key_rotation=True, policy=iam.PolicyDocument(statements=[iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['kms:*'], resources=['*'], principals=[cast(iam.IPrincipal, admin_principals)])]))
</DeepExtract>
self.scratch_bucket: s3.Bucket = S3Builder.build_s3_bucket(scope=self, id='scratch_bucket', name=self.bucket_names['scratch-bucket'], scratch_retention_days=30, kms_key=self.env_kms_key)
self.efs_fs = EfsBuilder.build_file_system(scope=self, name=self.env_name, efs_life_cycle='AFTER_7_DAYS', vpc=cast(ec2.IVpc, self.vpc), efs_security_group=cast(ec2.ISecurityGroup, self._vpc_security_group), subnets=self.nodes_subnets.subnets, team_kms_key=self.env_kms_key)
self.user_pool: cognito.UserPool = self._create_user_pool()
self.domain_name = self.context.toolkit.codeartifact_domain
self.repository_name = self.context.toolkit.codeartifact_repo
self._ssm_parameter = ssm.StringParameter(self, id='/orbit/DemoParams', string_value=json.dumps({'VpcId': self.vpc.vpc_id, 'PublicSubnets': self.public_subnets.subnet_ids, 'PrivateSubnets': self.private_subnets.subnet_ids, 'IsolatedSubnets': self.isolated_subnets.subnet_ids if not context.networking.data.internet_accessible else [], 'NodesSubnets': self.nodes_subnets.subnet_ids, 'LoadBalancersSubnets': self.public_subnets.subnet_ids, 'KMSKey': self.env_kms_key.key_arn, 'SharedEfsFsId': self.efs_fs.file_system_id, 'ScratchBucketArn': self.scratch_bucket.bucket_arn, 'ScratchBucketName': self.scratch_bucket.bucket_name, 'UserPoolId': self.user_pool.user_pool_id, 'SharedEfsSgId': self._vpc_security_group.security_group_id, 'UserPoolProviderName': self.user_pool.user_pool_provider_name, 'SslCertArn': self.ssl_cert_arn, 'CodeartifactDomain': self.domain_name, 'CodeartifactRepository': self.repository_name}), type=ssm.ParameterType.STRING, description='Orbit Workbench Demo resources.', parameter_name=context.resources_ssm_parameter_name, simple_name=False, tier=ssm.ParameterTier.INTELLIGENT_TIERING)
CfnOutput(scope=self, id=f'{id}vpcid', export_name=f'orbit-f-{self.env_name}-vpc-id', value=self.vpc.vpc_id)
CfnOutput(scope=self, id=f'{id}publicsubnetsids', export_name=f'orbit-f-{self.env_name}-public-subnet-ids', value=','.join(self.public_subnets.subnet_ids))
CfnOutput(scope=self, id=f'{id}privatesubnetsids', export_name=f'orbit-f-{self.env_name}-private-subnet-ids', value=','.join(self.private_subnets.subnet_ids))
if not context.networking.data.internet_accessible:
CfnOutput(scope=self, id=f'{id}isolatedsubnetsids', export_name=f'orbit-f-{self.env_name}-isolated-subnet-ids', value=','.join(self.isolated_subnets.subnet_ids))
CfnOutput(scope=self, id=f'{id}nodesubnetsids', export_name=f'orbit-f-{self.env_name}-nodes-subnet-ids', value=','.join(self.nodes_subnets.subnet_ids))
|
def __init__(self, scope: Construct, id: str, context: 'FoundationContext', ssl_cert_arn: str, **kwargs: Any) -> None:
self.env_name = context.name
self.context = context
self.ssl_cert_arn = ssl_cert_arn
super().__init__(scope, id, **kwargs)
Tags.of(scope=cast(core.IConstruct, self)).add(key='Env', value=f'orbit-{self.env_name}')
self.vpc: ec2.Vpc = self._create_vpc(context)
self.public_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.PUBLIC) if self.vpc.public_subnets else self.vpc.select_subnets(subnet_name='')
self.private_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE) if self.vpc.private_subnets else self.vpc.select_subnets(subnet_name='')
if not context.networking.data.internet_accessible:
self.isolated_subnets = self.vpc.select_subnets(subnet_type=ec2.SubnetType.ISOLATED) if self.vpc.isolated_subnets else self.vpc.select_subnets(subnet_name='')
self.nodes_subnets = self.isolated_subnets
else:
self.nodes_subnets = self.private_subnets
self._vpc_security_group = ec2.SecurityGroup(self, 'vpc-sg', vpc=cast(ec2.IVpc, self.vpc), allow_all_outbound=False)
self._vpc_security_group.add_ingress_rule(peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block), connection=ec2.Port.all_tcp())
if not context.networking.data.internet_accessible:
vpc_gateway_endpoints = {'s3': ec2.GatewayVpcEndpointAwsService.S3, 'dynamodb': ec2.GatewayVpcEndpointAwsService.DYNAMODB}
vpc_interface_endpoints = {'cloudwatch_endpoint': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH, 'cloudwatch_logs_endpoint': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS, 'cloudwatch_events': ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_EVENTS, 'ecr_docker_endpoint': ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER, 'ecr_endpoint': ec2.InterfaceVpcEndpointAwsService.ECR, 'ec2_endpoint': ec2.InterfaceVpcEndpointAwsService.EC2, 'ecs': ec2.InterfaceVpcEndpointAwsService.ECS, 'ecs_agent': ec2.InterfaceVpcEndpointAwsService.ECS_AGENT, 'ecs_telemetry': ec2.InterfaceVpcEndpointAwsService.ECS_TELEMETRY, 'git_endpoint': ec2.InterfaceVpcEndpointAwsService.CODECOMMIT_GIT, 'ssm_endpoint': ec2.InterfaceVpcEndpointAwsService.SSM, 'ssm_messages_endpoint': ec2.InterfaceVpcEndpointAwsService.SSM_MESSAGES, 'secrets_endpoint': ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER, 'kms_endpoint': ec2.InterfaceVpcEndpointAwsService.KMS, 'sagemaker_endpoint': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_API, 'sagemaker_runtime': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_RUNTIME, 'notebook_endpoint': ec2.InterfaceVpcEndpointAwsService.SAGEMAKER_NOTEBOOK, 'athena_endpoint': ec2.InterfaceVpcEndpointAwsService('athena'), 'glue_endpoint': ec2.InterfaceVpcEndpointAwsService('glue'), 'sqs': ec2.InterfaceVpcEndpointAwsService.SQS, 'step_function_endpoint': ec2.InterfaceVpcEndpointAwsService('states'), 'sns_endpoint': ec2.InterfaceVpcEndpointAwsService.SNS, 'kinesis_firehose_endpoint': ec2.InterfaceVpcEndpointAwsService('kinesis-firehose'), 'api_gateway': ec2.InterfaceVpcEndpointAwsService.APIGATEWAY, 'sts_endpoint': ec2.InterfaceVpcEndpointAwsService.STS, 'efs': ec2.InterfaceVpcEndpointAwsService.ELASTIC_FILESYSTEM, 'elb': ec2.InterfaceVpcEndpointAwsService.ELASTIC_LOAD_BALANCING, 'autoscaling': ec2.InterfaceVpcEndpointAwsService('autoscaling'), 'cloudformation_endpoint': ec2.InterfaceVpcEndpointAwsService('cloudformation'), 'codebuild_endpoint': ec2.InterfaceVpcEndpointAwsService('codebuild'), 'emr-containers': ec2.InterfaceVpcEndpointAwsService('emr-containers'), 'databrew': ec2.InterfaceVpcEndpointAwsService('databrew')}
for (name, gateway_vpc_endpoint_service) in vpc_gateway_endpoints.items():
self.vpc.add_gateway_endpoint(id=name, service=gateway_vpc_endpoint_service, subnets=[ec2.SubnetSelection(subnets=self.nodes_subnets.subnets)])
for (name, interface_service) in vpc_interface_endpoints.items():
self.vpc.add_interface_endpoint(id=name, service=interface_service, subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=True, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
self.vpc.add_interface_endpoint(id='code_artifact_repo_endpoint', service=cast(ec2.IInterfaceVpcEndpointService, ec2.InterfaceVpcEndpointAwsService('codeartifact.repositories')), subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=False, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
self.vpc.add_interface_endpoint(id='code_artifact_api_endpoint', service=cast(ec2.IInterfaceVpcEndpointService, ec2.InterfaceVpcEndpointAwsService('codeartifact.api')), subnets=ec2.SubnetSelection(subnets=self.nodes_subnets.subnets), private_dns_enabled=False, security_groups=[cast(ec2.ISecurityGroup, self._vpc_security_group)])
endpoint_url_template = 'com.amazonaws.{}.{}'
ec2.CfnVPCEndpoint(self, 'redshift_endpoint', vpc_endpoint_type='Interface', service_name=endpoint_url_template.format(self.region, 'redshift'), vpc_id=self.vpc.vpc_id, security_group_ids=[self._vpc_security_group.security_group_id], subnet_ids=self.nodes_subnets.subnet_ids, private_dns_enabled=True)
ec2.CfnVPCEndpoint(self, 'lambda_endpoint', vpc_endpoint_type='Interface', service_name=endpoint_url_template.format(self.region, 'lambda'), vpc_id=self.vpc.vpc_id, security_group_ids=[self._vpc_security_group.security_group_id], subnet_ids=self.nodes_subnets.subnet_ids, private_dns_enabled=True)
if context.toolkit.s3_bucket is None:
raise ValueError('context.toolkit_s3_bucket is not defined')
toolkit_s3_bucket_name: str = context.toolkit.s3_bucket
acct: str = core.Aws.ACCOUNT_ID
self.bucket_names: Dict[str, Any] = {'scratch-bucket': f'orbit-f-{self.env_name}-scratch-{acct}-{context.toolkit.deploy_id}', 'toolkit-bucket': toolkit_s3_bucket_name}
administrator_arns: List[str] = []
admin_principals = iam.CompositePrincipal(*[iam.ArnPrincipal(arn) for arn in administrator_arns], iam.ArnPrincipal(f'arn:aws:iam::{self.context.account_id}:root'))
self.env_kms_key: kms.Key = kms.Key(self, id='kms-key', removal_policy=core.RemovalPolicy.RETAIN, enabled=True, enable_key_rotation=True, policy=iam.PolicyDocument(statements=[iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['kms:*'], resources=['*'], principals=[cast(iam.IPrincipal, admin_principals)])]))
self.scratch_bucket: s3.Bucket = S3Builder.build_s3_bucket(scope=self, id='scratch_bucket', name=self.bucket_names['scratch-bucket'], scratch_retention_days=30, kms_key=self.env_kms_key)
self.efs_fs = EfsBuilder.build_file_system(scope=self, name=self.env_name, efs_life_cycle='AFTER_7_DAYS', vpc=cast(ec2.IVpc, self.vpc), efs_security_group=cast(ec2.ISecurityGroup, self._vpc_security_group), subnets=self.nodes_subnets.subnets, team_kms_key=self.env_kms_key)
self.user_pool: cognito.UserPool = self._create_user_pool()
self.domain_name = self.context.toolkit.codeartifact_domain
self.repository_name = self.context.toolkit.codeartifact_repo
self._ssm_parameter = ssm.StringParameter(self, id='/orbit/DemoParams', string_value=json.dumps({'VpcId': self.vpc.vpc_id, 'PublicSubnets': self.public_subnets.subnet_ids, 'PrivateSubnets': self.private_subnets.subnet_ids, 'IsolatedSubnets': self.isolated_subnets.subnet_ids if not context.networking.data.internet_accessible else [], 'NodesSubnets': self.nodes_subnets.subnet_ids, 'LoadBalancersSubnets': self.public_subnets.subnet_ids, 'KMSKey': self.env_kms_key.key_arn, 'SharedEfsFsId': self.efs_fs.file_system_id, 'ScratchBucketArn': self.scratch_bucket.bucket_arn, 'ScratchBucketName': self.scratch_bucket.bucket_name, 'UserPoolId': self.user_pool.user_pool_id, 'SharedEfsSgId': self._vpc_security_group.security_group_id, 'UserPoolProviderName': self.user_pool.user_pool_provider_name, 'SslCertArn': self.ssl_cert_arn, 'CodeartifactDomain': self.domain_name, 'CodeartifactRepository': self.repository_name}), type=ssm.ParameterType.STRING, description='Orbit Workbench Demo resources.', parameter_name=context.resources_ssm_parameter_name, simple_name=False, tier=ssm.ParameterTier.INTELLIGENT_TIERING)
CfnOutput(scope=self, id=f'{id}vpcid', export_name=f'orbit-f-{self.env_name}-vpc-id', value=self.vpc.vpc_id)
CfnOutput(scope=self, id=f'{id}publicsubnetsids', export_name=f'orbit-f-{self.env_name}-public-subnet-ids', value=','.join(self.public_subnets.subnet_ids))
CfnOutput(scope=self, id=f'{id}privatesubnetsids', export_name=f'orbit-f-{self.env_name}-private-subnet-ids', value=','.join(self.private_subnets.subnet_ids))
if not context.networking.data.internet_accessible:
CfnOutput(scope=self, id=f'{id}isolatedsubnetsids', export_name=f'orbit-f-{self.env_name}-isolated-subnet-ids', value=','.join(self.isolated_subnets.subnet_ids))
CfnOutput(scope=self, id=f'{id}nodesubnetsids', export_name=f'orbit-f-{self.env_name}-nodes-subnet-ids', value=','.join(self.nodes_subnets.subnet_ids))
|
aws-orbit-workbench
|
positive
|
def fhook(self, ops, vm, line_num):
<DeepExtract>
check_num_args(self.name, ops, 3, line_num)
check_immediate_three(self.name, ops, line_num)
ops[0].set_val(check_overflow(opfunc.or_(ops[1].get_val(line_num), ops[2].get_val(line_num)), vm), line_num)
vm.changes.add(ops[0].get_nm())
</DeepExtract>
return ''
|
def fhook(self, ops, vm, line_num):
check_num_args(self.name, ops, 3, line_num)
check_immediate_three(self.name, ops, line_num)
ops[0].set_val(check_overflow(opfunc.or_(ops[1].get_val(line_num), ops[2].get_val(line_num)), vm), line_num)
vm.changes.add(ops[0].get_nm())
return ''
|
Emu86
|
positive
|
def store_sparse_pattern(models: OrderedDict):
"""
Collect and store the sparsity pattern of Jacobian matrices.
This is a runtime function specific to cases.
Notes
-----
For `gy` matrix, always make sure the diagonal is reserved.
It is a safeguard if the modeling user omitted the diagonal
term in the equations.
"""
<DeepExtract>
ret = OrderedDict()
for (name, mdl) in models.items():
ret[name] = getattr(mdl, 'store_sparse_pattern')(*args, **kwargs)
if self.config.save_stats:
if 'store_sparse_pattern' not in self.call_stats[name]:
self.call_stats[name]['store_sparse_pattern'] = 1
else:
self.call_stats[name]['store_sparse_pattern'] += 1
return ret
</DeepExtract>
for jname in jac_names:
(ii, jj, vv) = (list(), list(), list())
if jname == 'gy':
ii.extend(np.arange(self.dae.m))
jj.extend(np.arange(self.dae.m))
vv.extend(np.zeros(self.dae.m))
for mdl in models.values():
for (row, col, val) in mdl.triplets.zip_ijv(jname):
ii.extend(row)
jj.extend(col)
vv.extend(np.zeros_like(row))
for (row, col, val) in mdl.triplets.zip_ijv(jname + 'c'):
ii.extend(row)
jj.extend(col)
vv.extend(val * np.ones_like(row))
if len(ii) > 0:
ii = np.array(ii, dtype=int)
jj = np.array(jj, dtype=int)
vv = np.array(vv, dtype=float)
self.dae.store_sparse_ijv(jname, ii, jj, vv)
self.dae.build_pattern(jname)
|
def store_sparse_pattern(models: OrderedDict):
"""
Collect and store the sparsity pattern of Jacobian matrices.
This is a runtime function specific to cases.
Notes
-----
For `gy` matrix, always make sure the diagonal is reserved.
It is a safeguard if the modeling user omitted the diagonal
term in the equations.
"""
ret = OrderedDict()
for (name, mdl) in models.items():
ret[name] = getattr(mdl, 'store_sparse_pattern')(*args, **kwargs)
if self.config.save_stats:
if 'store_sparse_pattern' not in self.call_stats[name]:
self.call_stats[name]['store_sparse_pattern'] = 1
else:
self.call_stats[name]['store_sparse_pattern'] += 1
return ret
for jname in jac_names:
(ii, jj, vv) = (list(), list(), list())
if jname == 'gy':
ii.extend(np.arange(self.dae.m))
jj.extend(np.arange(self.dae.m))
vv.extend(np.zeros(self.dae.m))
for mdl in models.values():
for (row, col, val) in mdl.triplets.zip_ijv(jname):
ii.extend(row)
jj.extend(col)
vv.extend(np.zeros_like(row))
for (row, col, val) in mdl.triplets.zip_ijv(jname + 'c'):
ii.extend(row)
jj.extend(col)
vv.extend(val * np.ones_like(row))
if len(ii) > 0:
ii = np.array(ii, dtype=int)
jj = np.array(jj, dtype=int)
vv = np.array(vv, dtype=float)
self.dae.store_sparse_ijv(jname, ii, jj, vv)
self.dae.build_pattern(jname)
|
andes
|
positive
|
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=args.tensorboard_dir, histogram_freq=0, batch_size=args.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from callbacks import CocoEval
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
if args.snapshots:
<DeepExtract>
try:
os.makedirs(args.snapshot_path)
except OSError:
if not os.path.isdir(args.snapshot_path):
raise
</DeepExtract>
checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(args.snapshot_path, '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)), verbose=1)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0))
return callbacks
|
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=args.tensorboard_dir, histogram_freq=0, batch_size=args.batch_size, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from callbacks import CocoEval
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback, weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
if args.snapshots:
try:
os.makedirs(args.snapshot_path)
except OSError:
if not os.path.isdir(args.snapshot_path):
raise
checkpoint = keras.callbacks.ModelCheckpoint(os.path.join(args.snapshot_path, '{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)), verbose=1)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0))
return callbacks
|
ensembleObjectDetection
|
positive
|
@parameterized.named_parameters([(f'{shots}_shot_max_{max_e}', shots, max_e) for (shots, max_e) in itertools.product((0, 1, 2), (None, 4))])
@mock.patch.object(bb.BigBenchJsonTaskFetcher, 'get_bigbench_json_task', autospec=True)
def test_composite_generative_json_task_consistency(self, shots: int, max_examples: Optional[int], mock_get_json_task):
subtasks = []
for i in range(2):
<DeepExtract>
examples = [{'input': f"{f'subtask_{i}'} input {i}.", 'target': f"{f'subtask_{i}'} target {i}."} for i in range(8)]
</DeepExtract>
subtasks.append(_create_generative_task_data(name=f'subtask_{i}', examples=examples))
<DeepExtract>
task_data = {'canary': _CANARY, 'name': f'test_task_{str(uuid.uuid1())}', 'description': 'composite task', 'keywords': [], 'subtasks': subtasks}
</DeepExtract>
task = json_task.JsonTask(task_data=task_data, shot_list=[shots])
model = BigbenchTestModel()
task.evaluate_model(model=model, max_examples=max_examples)
subtask_rates = {x['name']: len(x['examples']) for x in subtasks}
num_task_examples = sum((len(x['examples']) for x in subtasks))
test_json_paths = {task_data['name']: '/foo'}
test_json_metadata = {task_data['name']: {'subtask_names': list(subtask_rates.keys()), 'num_generate_text': num_task_examples, 'num_multiple_choice': 0}}
test_json_metadata.update({subtask_name: {'subtask_names': [], 'num_generate_text': subtask_num_examples, 'num_multiple_choice': 0} for (subtask_name, subtask_num_examples) in subtask_rates.items()})
test_json_subtask_names = {task_data['name']: list(subtask_rates.keys())}
self.mock_bb_json_paths_utils(test_json_paths, test_json_metadata, test_json_subtask_names)
mock_get_json_task.return_value = json_task.JsonTask(task_data=task_data, shot_list=[shots])
<DeepExtract>
task_rates = {}
for task_name in [task_data['name']]:
(_, rates) = task_api.register_composite_task(task_name=task_name, shots=shots, vocab=vocabs.T5_DEFAULT_VOCAB, max_examples=max_examples)
task_rates.update(rates)
seqio_mixture_name = task_api.get_seqio_name(bigbench_task_name=f'test_mixture_{str(uuid.uuid1())}', bigbench_task_type=bb.BigBenchTaskType.MIX, vocab=vocabs.T5_DEFAULT_VOCAB, num_shots=shots, max_examples=max_examples)
mixture = seqio.MixtureRegistry.add(name=seqio_mixture_name, tasks=list(task_rates.items()), default_rate=None)
</DeepExtract>
vocab = vocabs.T5_DEFAULT_VOCAB.vocabulary
evaluator = seqio.Evaluator(mixture.name, seqio.EncDecFeatureConverter(pack=False), eval_split='all')
(all_metrics, _, _) = evaluator.evaluate(compute_metrics=True, step=None, predict_fn=lambda ds: [(0, vocab.encode('foo'))] * len(list(ds)), score_fn=lambda ds: [(0, -0.6931471805599453)] * len(list(ds)))
results = all_metrics.result()
del results
self.assertEqual(self.mock_measure_generative_metrics.call_count, 4)
bb_out1 = self.mock_measure_generative_metrics.mock_calls[0][1][0]
bb_out2 = self.mock_measure_generative_metrics.mock_calls[1][1][0]
self.assertListEqual([_strip_input_and_targets(x) for x in bb_out1], self.mock_measure_generative_metrics.mock_calls[2][1][0])
self.assertListEqual([_strip_input_and_targets(x) for x in bb_out2], self.mock_measure_generative_metrics.mock_calls[3][1][0])
|
@parameterized.named_parameters([(f'{shots}_shot_max_{max_e}', shots, max_e) for (shots, max_e) in itertools.product((0, 1, 2), (None, 4))])
@mock.patch.object(bb.BigBenchJsonTaskFetcher, 'get_bigbench_json_task', autospec=True)
def test_composite_generative_json_task_consistency(self, shots: int, max_examples: Optional[int], mock_get_json_task):
subtasks = []
for i in range(2):
examples = [{'input': f"{f'subtask_{i}'} input {i}.", 'target': f"{f'subtask_{i}'} target {i}."} for i in range(8)]
subtasks.append(_create_generative_task_data(name=f'subtask_{i}', examples=examples))
task_data = {'canary': _CANARY, 'name': f'test_task_{str(uuid.uuid1())}', 'description': 'composite task', 'keywords': [], 'subtasks': subtasks}
task = json_task.JsonTask(task_data=task_data, shot_list=[shots])
model = BigbenchTestModel()
task.evaluate_model(model=model, max_examples=max_examples)
subtask_rates = {x['name']: len(x['examples']) for x in subtasks}
num_task_examples = sum((len(x['examples']) for x in subtasks))
test_json_paths = {task_data['name']: '/foo'}
test_json_metadata = {task_data['name']: {'subtask_names': list(subtask_rates.keys()), 'num_generate_text': num_task_examples, 'num_multiple_choice': 0}}
test_json_metadata.update({subtask_name: {'subtask_names': [], 'num_generate_text': subtask_num_examples, 'num_multiple_choice': 0} for (subtask_name, subtask_num_examples) in subtask_rates.items()})
test_json_subtask_names = {task_data['name']: list(subtask_rates.keys())}
self.mock_bb_json_paths_utils(test_json_paths, test_json_metadata, test_json_subtask_names)
mock_get_json_task.return_value = json_task.JsonTask(task_data=task_data, shot_list=[shots])
task_rates = {}
for task_name in [task_data['name']]:
(_, rates) = task_api.register_composite_task(task_name=task_name, shots=shots, vocab=vocabs.T5_DEFAULT_VOCAB, max_examples=max_examples)
task_rates.update(rates)
seqio_mixture_name = task_api.get_seqio_name(bigbench_task_name=f'test_mixture_{str(uuid.uuid1())}', bigbench_task_type=bb.BigBenchTaskType.MIX, vocab=vocabs.T5_DEFAULT_VOCAB, num_shots=shots, max_examples=max_examples)
mixture = seqio.MixtureRegistry.add(name=seqio_mixture_name, tasks=list(task_rates.items()), default_rate=None)
vocab = vocabs.T5_DEFAULT_VOCAB.vocabulary
evaluator = seqio.Evaluator(mixture.name, seqio.EncDecFeatureConverter(pack=False), eval_split='all')
(all_metrics, _, _) = evaluator.evaluate(compute_metrics=True, step=None, predict_fn=lambda ds: [(0, vocab.encode('foo'))] * len(list(ds)), score_fn=lambda ds: [(0, -0.6931471805599453)] * len(list(ds)))
results = all_metrics.result()
del results
self.assertEqual(self.mock_measure_generative_metrics.call_count, 4)
bb_out1 = self.mock_measure_generative_metrics.mock_calls[0][1][0]
bb_out2 = self.mock_measure_generative_metrics.mock_calls[1][1][0]
self.assertListEqual([_strip_input_and_targets(x) for x in bb_out1], self.mock_measure_generative_metrics.mock_calls[2][1][0])
self.assertListEqual([_strip_input_and_targets(x) for x in bb_out2], self.mock_measure_generative_metrics.mock_calls[3][1][0])
|
BIG-bench
|
positive
|
def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
"""Calculate surface area of each grid cell in a lon-lat grid."""
if lon_bounds is None:
<DeepExtract>
spacing = lon.diff(internal_names.LON_STR).values
lower = xr.DataArray(np.empty_like(lon), dims=lon.dims, coords=lon.coords)
lower.values[:-1] = lon.values[:-1] - 0.5 * spacing
lower.values[-1] = lon.values[-1] - 0.5 * spacing[-1]
upper = xr.DataArray(np.empty_like(lon), dims=lon.dims, coords=lon.coords)
upper.values[:-1] = lon.values[:-1] + 0.5 * spacing
upper.values[-1] = lon.values[-1] + 0.5 * spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
lon_bounds = bounds.T
</DeepExtract>
if lat_bounds is None:
<DeepExtract>
spacing = lat.diff(internal_names.LAT_STR).values
lower = xr.DataArray(np.empty_like(lat), dims=lat.dims, coords=lat.coords)
lower.values[:-1] = lat.values[:-1] - 0.5 * spacing
lower.values[-1] = lat.values[-1] - 0.5 * spacing[-1]
upper = xr.DataArray(np.empty_like(lat), dims=lat.dims, coords=lat.coords)
upper.values[:-1] = lat.values[:-1] + 0.5 * spacing
upper.values[-1] = lat.values[-1] + 0.5 * spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
lat_bounds = bounds.T
</DeepExtract>
<DeepExtract>
try:
dlon = utils.vertcoord.to_radians(lon_bounds, is_delta=True)[:, 1] - utils.vertcoord.to_radians(lon_bounds, is_delta=True)[:, 0]
except IndexError:
diff = np.diff(utils.vertcoord.to_radians(lon_bounds, is_delta=True), axis=0)
dlon = xr.DataArray(diff, dims=lon.dims, coords=lon.coords)
</DeepExtract>
sinlat_bounds = np.sin(utils.vertcoord.to_radians(lat_bounds, is_delta=True))
dsinlat = np.abs(_diff_bounds(sinlat_bounds, lat))
sfc_area = dlon * dsinlat * RADIUS_EARTH ** 2
try:
sfc_area = sfc_area.rename({internal_names.LAT_BOUNDS_STR: internal_names.LAT_STR, internal_names.LON_BOUNDS_STR: internal_names.LON_STR})
except ValueError:
pass
sfc_area = sfc_area.rename(internal_names.SFC_AREA_STR)
sfc_area[internal_names.LAT_STR] = lat
sfc_area[internal_names.LON_STR] = lon
return sfc_area.transpose()
|
def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
"""Calculate surface area of each grid cell in a lon-lat grid."""
if lon_bounds is None:
spacing = lon.diff(internal_names.LON_STR).values
lower = xr.DataArray(np.empty_like(lon), dims=lon.dims, coords=lon.coords)
lower.values[:-1] = lon.values[:-1] - 0.5 * spacing
lower.values[-1] = lon.values[-1] - 0.5 * spacing[-1]
upper = xr.DataArray(np.empty_like(lon), dims=lon.dims, coords=lon.coords)
upper.values[:-1] = lon.values[:-1] + 0.5 * spacing
upper.values[-1] = lon.values[-1] + 0.5 * spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
lon_bounds = bounds.T
if lat_bounds is None:
spacing = lat.diff(internal_names.LAT_STR).values
lower = xr.DataArray(np.empty_like(lat), dims=lat.dims, coords=lat.coords)
lower.values[:-1] = lat.values[:-1] - 0.5 * spacing
lower.values[-1] = lat.values[-1] - 0.5 * spacing[-1]
upper = xr.DataArray(np.empty_like(lat), dims=lat.dims, coords=lat.coords)
upper.values[:-1] = lat.values[:-1] + 0.5 * spacing
upper.values[-1] = lat.values[-1] + 0.5 * spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
lat_bounds = bounds.T
try:
dlon = utils.vertcoord.to_radians(lon_bounds, is_delta=True)[:, 1] - utils.vertcoord.to_radians(lon_bounds, is_delta=True)[:, 0]
except IndexError:
diff = np.diff(utils.vertcoord.to_radians(lon_bounds, is_delta=True), axis=0)
dlon = xr.DataArray(diff, dims=lon.dims, coords=lon.coords)
sinlat_bounds = np.sin(utils.vertcoord.to_radians(lat_bounds, is_delta=True))
dsinlat = np.abs(_diff_bounds(sinlat_bounds, lat))
sfc_area = dlon * dsinlat * RADIUS_EARTH ** 2
try:
sfc_area = sfc_area.rename({internal_names.LAT_BOUNDS_STR: internal_names.LAT_STR, internal_names.LON_BOUNDS_STR: internal_names.LON_STR})
except ValueError:
pass
sfc_area = sfc_area.rename(internal_names.SFC_AREA_STR)
sfc_area[internal_names.LAT_STR] = lat
sfc_area[internal_names.LON_STR] = lon
return sfc_area.transpose()
|
aospy
|
positive
|
def extract_all(self, file_path, output_directory, compression_type=None, mode='r'):
if compression_type is None:
compression_type = self.get_compression_type(file_path)
if compression_type == 'zip':
<DeepExtract>
assert self.validate_dir(output_directory, only_parent=True)
with Archive._open['zip'](file_path, 'r') as read_handler:
read_handler.extractall(output_directory)
</DeepExtract>
return
assert compression_type in Archive._archive_mode_read_stream, "Unknown compression type: '{}'".format(compression_type)
<DeepExtract>
assert self.validate_dir(output_directory, only_parent=True)
with tarfile.open(file_path) as read_handler:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(read_handler, output_directory)
</DeepExtract>
|
def extract_all(self, file_path, output_directory, compression_type=None, mode='r'):
if compression_type is None:
compression_type = self.get_compression_type(file_path)
if compression_type == 'zip':
assert self.validate_dir(output_directory, only_parent=True)
with Archive._open['zip'](file_path, 'r') as read_handler:
read_handler.extractall(output_directory)
return
assert compression_type in Archive._archive_mode_read_stream, "Unknown compression type: '{}'".format(compression_type)
assert self.validate_dir(output_directory, only_parent=True)
with tarfile.open(file_path) as read_handler:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(read_handler, output_directory)
</DeepExtract>
|
CAMISIM
|
positive
|
def __repr__(self):
if hasattr(self, 'in_channels_remain') and hasattr(self, 'out_channels_remain'):
s = '\n(1): Conv_dhp({}, {}, kernel_size=({}, {}), stride={}, bias={}, groups={}, scale={}, transpose={})'.format(self.in_channels_remain, self.out_channels_remain, self.kernel_size, self.kernel_size, self.stride, self.bias_flag, self.groups, self.scale, self.transpose)
else:
s = '\n(1): Conv_dhp({}, {}, kernel_size=({}, {}), stride={}, bias={}, groups={}, scale={}, transpose={})'.format(self.in_channels, self.out_channels, self.kernel_size, self.kernel_size, self.stride, self.bias_flag, self.groups, self.scale, self.transpose)
if self.batchnorm:
s = s + '\n(2): ' + repr(self.bn_main)
if self.act:
s = s + '\n(3): ' + repr(self.relu)
<DeepExtract>
s = s.split('\n')
if len(s) == 1:
s = s
first = s.pop(0)
s = [2 * ' ' + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
s = s
</DeepExtract>
return s
|
def __repr__(self):
if hasattr(self, 'in_channels_remain') and hasattr(self, 'out_channels_remain'):
s = '\n(1): Conv_dhp({}, {}, kernel_size=({}, {}), stride={}, bias={}, groups={}, scale={}, transpose={})'.format(self.in_channels_remain, self.out_channels_remain, self.kernel_size, self.kernel_size, self.stride, self.bias_flag, self.groups, self.scale, self.transpose)
else:
s = '\n(1): Conv_dhp({}, {}, kernel_size=({}, {}), stride={}, bias={}, groups={}, scale={}, transpose={})'.format(self.in_channels, self.out_channels, self.kernel_size, self.kernel_size, self.stride, self.bias_flag, self.groups, self.scale, self.transpose)
if self.batchnorm:
s = s + '\n(2): ' + repr(self.bn_main)
if self.act:
s = s + '\n(3): ' + repr(self.relu)
s = s.split('\n')
if len(s) == 1:
s = s
first = s.pop(0)
s = [2 * ' ' + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
s = s
return s
|
dhp
|
positive
|
def update(self, updates: Dict[str, Any], predicate: WhereClause=lambda row: True):
for (column, new_value) in updates.items():
if column not in self.columns:
raise ValueError(f'invalid column: {column}')
<DeepExtract>
idx = self.columns.index(column)
typ3 = self.types[idx]
</DeepExtract>
if not isinstance(new_value, typ3) and new_value is not None:
raise TypeError(f'expected type {typ3}, but got {new_value}')
for row in self.rows:
if predicate(row):
for (column, new_value) in updates.items():
row[column] = new_value
|
def update(self, updates: Dict[str, Any], predicate: WhereClause=lambda row: True):
for (column, new_value) in updates.items():
if column not in self.columns:
raise ValueError(f'invalid column: {column}')
idx = self.columns.index(column)
typ3 = self.types[idx]
if not isinstance(new_value, typ3) and new_value is not None:
raise TypeError(f'expected type {typ3}, but got {new_value}')
for row in self.rows:
if predicate(row):
for (column, new_value) in updates.items():
row[column] = new_value
|
data-science-from-scratch
|
positive
|
def subkey(self, path):
"""
path:
of the form "K" where K is an integer index, or "K/N" where N is usually
a 0 (deposit address) or 1 (change address)
"""
t = path.split('/')
if len(t) == 2:
(n, for_change) = t
else:
(n,) = t
for_change = 0
b = (str(n) + ':' + str(for_change) + ':').encode('utf8') + self.master_public_key()
offset = from_bytes_32(double_sha256(b))
if self._master_private_key:
return Key(secret_exponent=(self._master_private_key + offset) % ORDER, prefer_uncompressed=True)
p1 = offset * ecdsa.generator_secp256k1
<DeepExtract>
if self._public_pair is None:
mpk = self.master_public_key()
self._public_pair = tuple((from_bytes_32(mpk[idx:idx + 32]) for idx in (0, 32)))
(x, y) = self._public_pair
</DeepExtract>
p2 = ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
p = p1 + p2
return Key(public_pair=p.pair(), prefer_uncompressed=True)
|
def subkey(self, path):
"""
path:
of the form "K" where K is an integer index, or "K/N" where N is usually
a 0 (deposit address) or 1 (change address)
"""
t = path.split('/')
if len(t) == 2:
(n, for_change) = t
else:
(n,) = t
for_change = 0
b = (str(n) + ':' + str(for_change) + ':').encode('utf8') + self.master_public_key()
offset = from_bytes_32(double_sha256(b))
if self._master_private_key:
return Key(secret_exponent=(self._master_private_key + offset) % ORDER, prefer_uncompressed=True)
p1 = offset * ecdsa.generator_secp256k1
if self._public_pair is None:
mpk = self.master_public_key()
self._public_pair = tuple((from_bytes_32(mpk[idx:idx + 32]) for idx in (0, 32)))
(x, y) = self._public_pair
p2 = ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
p = p1 + p2
return Key(public_pair=p.pair(), prefer_uncompressed=True)
|
dashman
|
positive
|
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
<DeepExtract>
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
</DeepExtract>
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat((xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
|
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat((xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
|
CenterMask
|
positive
|
def _create_world(self, initialize_goal_image=False):
"""
This function loads the urdfs of the robot in all the pybullet clients
:param initialize_goal_image: (bool) used to indicate if pybullet client
repsonsible for the goal image needs
to be initialized.
:return:
"""
<DeepExtract>
if self._pybullet_client_full_id is not None:
pybullet.resetSimulation(physicsClientId=self._pybullet_client_full_id)
pybullet.setPhysicsEngineParameter(deterministicOverlappingPairs=1, physicsClientId=self._pybullet_client_full_id)
if self._pybullet_client_w_o_goal_id is not None:
pybullet.resetSimulation(physicsClientId=self._pybullet_client_w_o_goal_id)
pybullet.setPhysicsEngineParameter(deterministicOverlappingPairs=1, physicsClientId=self._pybullet_client_w_o_goal_id)
return
</DeepExtract>
finger_base_position = [0, 0, 0.0]
finger_base_orientation = [0, 0, 0, 1]
if initialize_goal_image:
client_list = [self._pybullet_client_w_o_goal_id, self._pybullet_client_w_goal_id, self._pybullet_client_full_id]
else:
client_list = [self._pybullet_client_w_o_goal_id, self._pybullet_client_full_id]
for client in client_list:
if client is not None:
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=client)
pybullet.setGravity(0, 0, -9.81, physicsClientId=client)
pybullet.setTimeStep(self._simulation_time, physicsClientId=client)
pybullet.loadURDF('plane_transparent.urdf', [0, 0, 0], physicsClientId=client)
pybullet.loadURDF(fileName=self._finger_urdf_path, basePosition=finger_base_position, baseOrientation=finger_base_orientation, useFixedBase=1, flags=pybullet.URDF_USE_INERTIA_FROM_FILE | pybullet.URDF_USE_SELF_COLLISION, physicsClientId=client)
if self.link_name_to_index is None:
self.link_name_to_index = {pybullet.getBodyInfo(WorldConstants.ROBOT_ID, physicsClientId=client)[0].decode('UTF-8'): -1}
for joint_idx in range(pybullet.getNumJoints(WorldConstants.ROBOT_ID, physicsClientId=client)):
link_name = pybullet.getJointInfo(WorldConstants.ROBOT_ID, joint_idx, physicsClientId=client)[12].decode('UTF-8')
self.link_name_to_index[link_name] = joint_idx
self._revolute_joint_ids = [self.link_name_to_index[name] for name in WorldConstants.JOINT_NAMES]
self.finger_tip_ids = [self.link_name_to_index[name] for name in WorldConstants.TIP_LINK_NAMES]
self.finger_link_ids = self._revolute_joint_ids
self.last_joint_position = [0] * len(self._revolute_joint_ids)
for link_id in self.finger_link_ids:
pybullet.changeDynamics(bodyUniqueId=WorldConstants.ROBOT_ID, linkIndex=link_id, maxJointVelocity=1000.0, restitution=0.8, jointDamping=0.0, lateralFriction=0.1, spinningFriction=0.1, rollingFriction=0.1, linearDamping=0.5, angularDamping=0.5, contactStiffness=0.1, contactDamping=0.05, physicsClientId=client)
<DeepExtract>
def mesh_path(filename):
return os.path.join(self._robot_properties_path, 'meshes', 'stl', filename)
table_colour = (0.31, 0.27, 0.25, 1.0)
high_border_colour = (0.95, 0.95, 0.95, 1.0)
floor_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_MESH, fileName=mesh_path('trifinger_table_without_border.stl'), flags=0, physicsClientId=client)
obj = pybullet.createMultiBody(baseCollisionShapeIndex=floor_id, baseVisualShapeIndex=-1, basePosition=[0, 0, 0.01], baseOrientation=[0, 0, 0, 1], physicsClientId=client)
pybullet.changeVisualShape(obj, -1, rgbaColor=table_colour, physicsClientId=client)
stage_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_MESH, fileName=mesh_path('edu/frame_wall.stl'), flags=pybullet.GEOM_FORCE_CONCAVE_TRIMESH, physicsClientId=client)
obj = pybullet.createMultiBody(baseCollisionShapeIndex=stage_id, baseVisualShapeIndex=-1, basePosition=[0, 0, 0.01], baseOrientation=[0, 0, 0, 1], physicsClientId=client)
pybullet.changeVisualShape(obj, -1, rgbaColor=high_border_colour, physicsClientId=client)
return
</DeepExtract>
return
|
def _create_world(self, initialize_goal_image=False):
"""
This function loads the urdfs of the robot in all the pybullet clients
:param initialize_goal_image: (bool) used to indicate if pybullet client
repsonsible for the goal image needs
to be initialized.
:return:
"""
if self._pybullet_client_full_id is not None:
pybullet.resetSimulation(physicsClientId=self._pybullet_client_full_id)
pybullet.setPhysicsEngineParameter(deterministicOverlappingPairs=1, physicsClientId=self._pybullet_client_full_id)
if self._pybullet_client_w_o_goal_id is not None:
pybullet.resetSimulation(physicsClientId=self._pybullet_client_w_o_goal_id)
pybullet.setPhysicsEngineParameter(deterministicOverlappingPairs=1, physicsClientId=self._pybullet_client_w_o_goal_id)
return
finger_base_position = [0, 0, 0.0]
finger_base_orientation = [0, 0, 0, 1]
if initialize_goal_image:
client_list = [self._pybullet_client_w_o_goal_id, self._pybullet_client_w_goal_id, self._pybullet_client_full_id]
else:
client_list = [self._pybullet_client_w_o_goal_id, self._pybullet_client_full_id]
for client in client_list:
if client is not None:
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath(), physicsClientId=client)
pybullet.setGravity(0, 0, -9.81, physicsClientId=client)
pybullet.setTimeStep(self._simulation_time, physicsClientId=client)
pybullet.loadURDF('plane_transparent.urdf', [0, 0, 0], physicsClientId=client)
pybullet.loadURDF(fileName=self._finger_urdf_path, basePosition=finger_base_position, baseOrientation=finger_base_orientation, useFixedBase=1, flags=pybullet.URDF_USE_INERTIA_FROM_FILE | pybullet.URDF_USE_SELF_COLLISION, physicsClientId=client)
if self.link_name_to_index is None:
self.link_name_to_index = {pybullet.getBodyInfo(WorldConstants.ROBOT_ID, physicsClientId=client)[0].decode('UTF-8'): -1}
for joint_idx in range(pybullet.getNumJoints(WorldConstants.ROBOT_ID, physicsClientId=client)):
link_name = pybullet.getJointInfo(WorldConstants.ROBOT_ID, joint_idx, physicsClientId=client)[12].decode('UTF-8')
self.link_name_to_index[link_name] = joint_idx
self._revolute_joint_ids = [self.link_name_to_index[name] for name in WorldConstants.JOINT_NAMES]
self.finger_tip_ids = [self.link_name_to_index[name] for name in WorldConstants.TIP_LINK_NAMES]
self.finger_link_ids = self._revolute_joint_ids
self.last_joint_position = [0] * len(self._revolute_joint_ids)
for link_id in self.finger_link_ids:
pybullet.changeDynamics(bodyUniqueId=WorldConstants.ROBOT_ID, linkIndex=link_id, maxJointVelocity=1000.0, restitution=0.8, jointDamping=0.0, lateralFriction=0.1, spinningFriction=0.1, rollingFriction=0.1, linearDamping=0.5, angularDamping=0.5, contactStiffness=0.1, contactDamping=0.05, physicsClientId=client)
def mesh_path(filename):
return os.path.join(self._robot_properties_path, 'meshes', 'stl', filename)
table_colour = (0.31, 0.27, 0.25, 1.0)
high_border_colour = (0.95, 0.95, 0.95, 1.0)
floor_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_MESH, fileName=mesh_path('trifinger_table_without_border.stl'), flags=0, physicsClientId=client)
obj = pybullet.createMultiBody(baseCollisionShapeIndex=floor_id, baseVisualShapeIndex=-1, basePosition=[0, 0, 0.01], baseOrientation=[0, 0, 0, 1], physicsClientId=client)
pybullet.changeVisualShape(obj, -1, rgbaColor=table_colour, physicsClientId=client)
stage_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_MESH, fileName=mesh_path('edu/frame_wall.stl'), flags=pybullet.GEOM_FORCE_CONCAVE_TRIMESH, physicsClientId=client)
obj = pybullet.createMultiBody(baseCollisionShapeIndex=stage_id, baseVisualShapeIndex=-1, basePosition=[0, 0, 0.01], baseOrientation=[0, 0, 0, 1], physicsClientId=client)
pybullet.changeVisualShape(obj, -1, rgbaColor=high_border_colour, physicsClientId=client)
return
return
|
CausalWorld
|
positive
|
def prepare_data_seq(task, batch_size=100):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if int(task) != 6:
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
candid_file_path = 'data/dialog-bAbI-tasks/dialog-babi-candidates.txt'
kb_path = 'data/dialog-bAbI-tasks/dialog-babi-kb-all.txt'
else:
candid_file_path = 'data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-candidates.txt'
kb_path = 'data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt'
query2idx = {'UNK': 0, 'R_restaurant': 7, 'R_cuisine': 1, 'R_location': 2, 'R_price': 3, 'R_number': 4, 'R_phone': 5, 'R_address': 6}
<DeepExtract>
type_dict = get_type_dict(kb_path, dstc2=int(task) == 6)
entity_list = []
for key in type_dict.keys():
for value in type_dict[key]:
entity_list.append(value)
ent = entity_list
</DeepExtract>
<DeepExtract>
type_dict = get_type_dict(kb_path, dstc2=int(task) == 6)
ent_list = entityList(kb_path, int(int(task)))
(candidates, _, _) = load_candidates(task_id=int(task), candidates_f=candid_file_path)
candid_all = []
candid2candDL = {}
for (index, cand) in enumerate(candidates):
cand_DL = [x for x in cand]
for (index, word) in enumerate(cand_DL):
if word in ent_list:
for type_name in type_dict:
if word in type_dict[type_name] and type_name != 'R_rating':
cand_DL[index] = type_name
break
cand_DL = ' '.join(cand_DL)
candid_all.append(cand_DL)
candid2candDL[' '.join(cand)] = cand_DL
cand_list = list(set(candid_all))
candDL2idx = dict(((c, i) for (i, c) in enumerate(cand_list)))
idx2candDL = dict(((i, c) for (c, i) in candDL2idx.items()))
cand2DLidx = {}
for key in candid2candDL.keys():
cand2DLidx[key] = candDL2idx[candid2candDL[key]]
(cand2DLidx, idx2candDL) = (cand2DLidx, idx2candDL)
</DeepExtract>
<DeepExtract>
logging.info('Reading lines from {}'.format(file_train))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_train) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_train, max_len_train) = (data, max_len)
</DeepExtract>
<DeepExtract>
logging.info('Reading lines from {}'.format(file_dev))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_dev) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_dev, max_len_dev) = (data, max_len)
</DeepExtract>
<DeepExtract>
logging.info('Reading lines from {}'.format(file_test))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_test) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_test, max_len_test) = (data, max_len)
</DeepExtract>
max_r_test_OOV = 0
if int(task) != 6:
<DeepExtract>
logging.info('Reading lines from {}'.format(file_test_OOV))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_test_OOV) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_test_OOV, max_len_test_OOV) = (data, max_len)
</DeepExtract>
max_len = max(max_len_train, max_len_dev, max_len_test, max_len_test_OOV) + 1
max_r = -1
lang = Lang()
<DeepExtract>
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_train:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if True:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
train = data_loader
</DeepExtract>
<DeepExtract>
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_dev:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
dev = data_loader
</DeepExtract>
<DeepExtract>
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_test:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
test = data_loader
</DeepExtract>
if int(task) != 6:
<DeepExtract>
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_test_OOV:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
testOOV = data_loader
</DeepExtract>
else:
testOOV = []
logging.info('Read %s sentence pairs train' % len(pair_train))
logging.info('Read %s sentence pairs dev' % len(pair_dev))
logging.info('Read %s sentence pairs test' % len(pair_test))
if int(task) != 6:
logging.info('Read %s sentence pairs testoov' % len(pair_test_OOV))
logging.info('Max len Input %s ' % max_len)
logging.info('Vocab_size %s ' % lang.n_words)
logging.info('USE_CUDA={}'.format(USE_CUDA))
return (train, dev, test, testOOV, lang, max_len, max_r, idx2candDL, query2idx)
|
def prepare_data_seq(task, batch_size=100):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if int(task) != 6:
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
candid_file_path = 'data/dialog-bAbI-tasks/dialog-babi-candidates.txt'
kb_path = 'data/dialog-bAbI-tasks/dialog-babi-kb-all.txt'
else:
candid_file_path = 'data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-candidates.txt'
kb_path = 'data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt'
query2idx = {'UNK': 0, 'R_restaurant': 7, 'R_cuisine': 1, 'R_location': 2, 'R_price': 3, 'R_number': 4, 'R_phone': 5, 'R_address': 6}
type_dict = get_type_dict(kb_path, dstc2=int(task) == 6)
entity_list = []
for key in type_dict.keys():
for value in type_dict[key]:
entity_list.append(value)
ent = entity_list
type_dict = get_type_dict(kb_path, dstc2=int(task) == 6)
ent_list = entityList(kb_path, int(int(task)))
(candidates, _, _) = load_candidates(task_id=int(task), candidates_f=candid_file_path)
candid_all = []
candid2candDL = {}
for (index, cand) in enumerate(candidates):
cand_DL = [x for x in cand]
for (index, word) in enumerate(cand_DL):
if word in ent_list:
for type_name in type_dict:
if word in type_dict[type_name] and type_name != 'R_rating':
cand_DL[index] = type_name
break
cand_DL = ' '.join(cand_DL)
candid_all.append(cand_DL)
candid2candDL[' '.join(cand)] = cand_DL
cand_list = list(set(candid_all))
candDL2idx = dict(((c, i) for (i, c) in enumerate(cand_list)))
idx2candDL = dict(((i, c) for (c, i) in candDL2idx.items()))
cand2DLidx = {}
for key in candid2candDL.keys():
cand2DLidx[key] = candDL2idx[candid2candDL[key]]
(cand2DLidx, idx2candDL) = (cand2DLidx, idx2candDL)
logging.info('Reading lines from {}'.format(file_train))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_train) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_train, max_len_train) = (data, max_len)
logging.info('Reading lines from {}'.format(file_dev))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_dev) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_dev, max_len_dev) = (data, max_len)
logging.info('Reading lines from {}'.format(file_test))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_test) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_test, max_len_test) = (data, max_len)
max_r_test_OOV = 0
if int(task) != 6:
logging.info('Reading lines from {}'.format(file_test_OOV))
data = []
content_arr = []
u = None
r = None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_test_OOV) as fin:
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line = line.strip()
if line:
(nid, line) = line.split(' ', 1)
if '\t' in line:
(u, r) = line.split('\t')
if u != '<SILENCE>':
user_counter += 1
system_counter += 1
bot_action_idx = cand2DLidx[r]
bot_action = idx2candDL[bot_action_idx]
gen_u = generate_memory(u, '$u', str(time_counter))
content_arr += gen_u
ent_query = {}
ent_query_idx = {}
for (idx, key) in enumerate(r.split(' ')):
if key in ent:
index = [loc for (loc, val) in enumerate(content_arr) if val[0] == key]
if index:
index = max(index)
ent_query_idx[bot_action.split(' ')[idx]] = index
ent_query[bot_action.split(' ')[idx]] = key
else:
print('[Wrong] Cannot find the entity')
exit(1)
system_res_counter += 1
if ent_query == {}:
ent_query = {'UNK': '$$$$'}
ent_query_idx = {'UNK': len(content_arr)}
content_arr_temp = content_arr + [['$$$$'] * MEM_TOKEN_SIZE]
else:
content_arr_temp = content_arr
for ent in ent_query.keys():
data_item = {'dialID': dialog_counter, 'turnID': system_counter, 'content_arr': content_arr_temp, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': [ent, ent_query[ent]], 'ent_query_idx': [ent, ent_query_idx[ent]], 'gold_response': r}
data.append(data_item)
gen_r = generate_memory(r, '$s', str(time_counter))
content_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r = line
content_arr += generate_memory(r, '', '')
else:
cnt_lin += 1
if None and cnt_lin >= None:
break
content_arr = []
content_arr_temp = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d['content_arr']) for d in data])
logging.info('Nb of dialogs = {} '.format(dialog_counter))
logging.info('Max responce Len: {}'.format(max_r_len))
logging.info('Max Input Len: {}'.format(max_len))
logging.info('Avg. User Utterances: {}'.format(user_counter * 1.0 / dialog_counter))
logging.info('Avg. Bot Utterances: {}'.format(system_counter * 1.0 / dialog_counter))
logging.info('Avg. KB results: {}'.format(KB_counter * 1.0 / dialog_counter))
logging.info('Avg. responce Len: {}'.format(system_res_counter * 1.0 / system_counter))
print('Sample: ', data[5])
(pair_test_OOV, max_len_test_OOV) = (data, max_len)
max_len = max(max_len_train, max_len_dev, max_len_test, max_len_test_OOV) + 1
max_r = -1
lang = Lang()
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_train:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if True:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
train = data_loader
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_dev:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
dev = data_loader
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_test:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
test = data_loader
if int(task) != 6:
dialID_arr = []
turnID_arr = []
content_arr = []
bot_action = []
bot_action_idx = []
ent_query = []
ent_query_idx = []
gold_response = []
for pair in pair_test_OOV:
dialID_arr.append(pair['dialID'])
turnID_arr.append(pair['turnID'])
content_arr.append(pair['content_arr'])
bot_action.append(pair['bot_action'])
bot_action_idx.append(pair['bot_action_idx'])
ent_query.append(pair['ent_query'])
ent_query_idx.append(pair['ent_query_idx'])
gold_response.append(pair['gold_response'])
if False:
lang.index_words(pair['content_arr'])
lang.index_words(pair['bot_action'], trg=True)
data_item = {'dialID': dialID_arr, 'turnID': turnID_arr, 'content_arr': content_arr, 'bot_action': bot_action, 'bot_action_idx': bot_action_idx, 'ent_query': ent_query, 'ent_query_idx': ent_query_idx, 'gold_response': gold_response}
dataset = Dataset(data_item, lang.word2index, lang.word2index, max_len, query2idx)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)
testOOV = data_loader
else:
testOOV = []
logging.info('Read %s sentence pairs train' % len(pair_train))
logging.info('Read %s sentence pairs dev' % len(pair_dev))
logging.info('Read %s sentence pairs test' % len(pair_test))
if int(task) != 6:
logging.info('Read %s sentence pairs testoov' % len(pair_test_OOV))
logging.info('Max len Input %s ' % max_len)
logging.info('Vocab_size %s ' % lang.n_words)
logging.info('USE_CUDA={}'.format(USE_CUDA))
return (train, dev, test, testOOV, lang, max_len, max_r, idx2candDL, query2idx)
|
CrossWOZ
|
positive
|
def save(self):
if self._configure and (not self._clean):
if self._save or self._save_global:
if self._save:
<DeepExtract>
logger.info('{}'.format(as_notice('Save current settings...')))
self._save_settings(self._conf_path)
logger.info('{}'.format(as_notice('Save complete')))
</DeepExtract>
if self._save_global:
<DeepExtract>
logger.info('{}'.format(as_notice('Save current settings...')))
self._save_settings(self._global_conf_path)
logger.info('{}'.format(as_notice('Save complete')))
</DeepExtract>
else:
if self._update:
<DeepExtract>
logger.info('{}'.format(as_notice('Updating current settings...')))
self._save_settings(self._conf_path)
logger.info('{}'.format(as_notice('Update complete')))
</DeepExtract>
if self._update_global:
<DeepExtract>
logger.info('{}'.format(as_notice('Updating current settings...')))
self._save_settings(self._global_conf_path)
logger.info('{}'.format(as_notice('Update complete')))
</DeepExtract>
if self._remove:
<DeepExtract>
initial_option_count = len(self._loaded_options)
logger.info('Remove settings requested for the following options {}'.format(self._remove))
for setting in self._remove:
if setting in self._loaded_options:
del self._loaded_options[setting]
logger.info('Removing option [{}] as requested'.format(as_notice('--' + setting)))
if initial_option_count != len(self._loaded_options):
self._update_conf(self._conf_path)
</DeepExtract>
if self._remove_global:
<DeepExtract>
initial_option_count = len(self._loaded_options)
logger.info('Remove settings requested for the following options {}'.format(self._remove_global))
for setting in self._remove_global:
if setting in self._loaded_options:
del self._loaded_options[setting]
logger.info('Removing option [{}] as requested'.format(as_notice('--' + setting)))
if initial_option_count != len(self._loaded_options):
self._update_conf(self._global_conf_path)
</DeepExtract>
|
def save(self):
if self._configure and (not self._clean):
if self._save or self._save_global:
if self._save:
logger.info('{}'.format(as_notice('Save current settings...')))
self._save_settings(self._conf_path)
logger.info('{}'.format(as_notice('Save complete')))
if self._save_global:
logger.info('{}'.format(as_notice('Save current settings...')))
self._save_settings(self._global_conf_path)
logger.info('{}'.format(as_notice('Save complete')))
else:
if self._update:
logger.info('{}'.format(as_notice('Updating current settings...')))
self._save_settings(self._conf_path)
logger.info('{}'.format(as_notice('Update complete')))
if self._update_global:
logger.info('{}'.format(as_notice('Updating current settings...')))
self._save_settings(self._global_conf_path)
logger.info('{}'.format(as_notice('Update complete')))
if self._remove:
initial_option_count = len(self._loaded_options)
logger.info('Remove settings requested for the following options {}'.format(self._remove))
for setting in self._remove:
if setting in self._loaded_options:
del self._loaded_options[setting]
logger.info('Removing option [{}] as requested'.format(as_notice('--' + setting)))
if initial_option_count != len(self._loaded_options):
self._update_conf(self._conf_path)
if self._remove_global:
initial_option_count = len(self._loaded_options)
logger.info('Remove settings requested for the following options {}'.format(self._remove_global))
for setting in self._remove_global:
if setting in self._loaded_options:
del self._loaded_options[setting]
logger.info('Removing option [{}] as requested'.format(as_notice('--' + setting)))
if initial_option_count != len(self._loaded_options):
self._update_conf(self._global_conf_path)
</DeepExtract>
|
cuppa
|
positive
|
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
<DeepExtract>
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookies is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookies:
if False or name not in names_from_jar:
cookiejar.set_cookie(create_cookie(name, cookies[name]))
cookiejar = cookiejar
</DeepExtract>
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookies is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookies:
if False or name not in names_from_jar:
cookiejar.set_cookie(create_cookie(name, cookies[name]))
cookiejar = cookiejar
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
DarkSploit
|
positive
|
def _retry(self, *args, **kwargs):
retry_times = 1
i = 0
while True:
try:
return f(self, *args, **kwargs)
except sqlalchemy.exc.DBAPIError as e:
if i < retry_times:
logger.warn('DB is disconnected. Reconnect to it.')
<DeepExtract>
self._engine = sqlalchemy.create_engine(self._database)
self._session = sqlalchemy.orm.sessionmaker(bind=self._engine)
</DeepExtract>
i += 1
else:
raise e
|
def _retry(self, *args, **kwargs):
retry_times = 1
i = 0
while True:
try:
return f(self, *args, **kwargs)
except sqlalchemy.exc.DBAPIError as e:
if i < retry_times:
logger.warn('DB is disconnected. Reconnect to it.')
self._engine = sqlalchemy.create_engine(self._database)
self._session = sqlalchemy.orm.sessionmaker(bind=self._engine)
i += 1
else:
raise e
|
docker-registry
|
positive
|
def draw_pattern(widget, pattern=None):
POS = []
def checkered(canvas, line_distance):
for x in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
def numbered(canvas):
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
canvas.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
canvas.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
def clean_pat(pattern):
try:
return json.loads(pattern)
except Exception as e:
self.logger.warning(f'{e}')
return []
def draw(canvas, pattern=[]):
canvas.delete(tk.ALL)
<DeepExtract>
POS = []
def checkered(canvas, line_distance):
for x in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
def numbered(canvas):
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
canvas.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
canvas.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
def clean_pat(pattern):
try:
return json.loads(None)
except Exception as e:
self.logger.warning(f'{e}')
return []
def draw(canvas, pattern=[]):
canvas.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
if None:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(None)]))
if combo:
canvas.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
checkered(self.VISUAL, self.CANVAS_SIZE // self.FACTOR)
numbered(self.VISUAL)
if None:
draw(self.VISUAL, None)
</DeepExtract>
if pattern:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(pattern)]))
if combo:
canvas.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
<DeepExtract>
for x in range(self.CANVAS_SIZE // self.FACTOR, self.CANVAS_SIZE, self.CANVAS_SIZE // self.FACTOR):
widget.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(self.CANVAS_SIZE // self.FACTOR, self.CANVAS_SIZE, self.CANVAS_SIZE // self.FACTOR):
widget.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
</DeepExtract>
<DeepExtract>
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
widget.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
widget.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
</DeepExtract>
if pattern:
<DeepExtract>
widget.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
if pattern:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(pattern)]))
if combo:
widget.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
</DeepExtract>
|
def draw_pattern(widget, pattern=None):
POS = []
def checkered(canvas, line_distance):
for x in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
def numbered(canvas):
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
canvas.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
canvas.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
def clean_pat(pattern):
try:
return json.loads(pattern)
except Exception as e:
self.logger.warning(f'{e}')
return []
def draw(canvas, pattern=[]):
canvas.delete(tk.ALL)
POS = []
def checkered(canvas, line_distance):
for x in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(line_distance, self.CANVAS_SIZE, line_distance):
canvas.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
def numbered(canvas):
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
canvas.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
canvas.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
def clean_pat(pattern):
try:
return json.loads(None)
except Exception as e:
self.logger.warning(f'{e}')
return []
def draw(canvas, pattern=[]):
canvas.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
if None:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(None)]))
if combo:
canvas.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
checkered(self.VISUAL, self.CANVAS_SIZE // self.FACTOR)
numbered(self.VISUAL)
if None:
draw(self.VISUAL, None)
if pattern:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(pattern)]))
if combo:
canvas.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
for x in range(self.CANVAS_SIZE // self.FACTOR, self.CANVAS_SIZE, self.CANVAS_SIZE // self.FACTOR):
widget.create_line(x, 0, x, self.CANVAS_SIZE, fill='#999999')
for y in range(self.CANVAS_SIZE // self.FACTOR, self.CANVAS_SIZE, self.CANVAS_SIZE // self.FACTOR):
widget.create_line(0, y, self.CANVAS_SIZE, y, fill='#999999')
n = 0
step = self.CANVAS_SIZE // self.FACTOR
start = step // 2
stepx = start
for _ in range(self.FACTOR):
stepy = start
while stepy < self.CANVAS_SIZE:
widget.create_oval(stepy + self.CANVAS_SIZE // 15, stepx + self.CANVAS_SIZE // 15, stepy - self.CANVAS_SIZE // 15, stepx - self.CANVAS_SIZE // 15, fill='#444444', outline='#444444')
widget.create_text(stepy, stepx, font=self.CANVAS_SIZE // 10, text=str(n), fill='#FFFFFF')
POS.append((stepy, stepx))
n += 1
stepy += step
stepx += step
if pattern:
widget.delete(tk.ALL)
self.draw_pattern(self.VISUAL, None)
if pattern:
combo = list(itertools.chain(*[POS[_] for _ in clean_pat(pattern)]))
if combo:
widget.create_line(combo, arrow='last', arrowshape=[self.CANVAS_SIZE // 25, self.CANVAS_SIZE // 20, self.CANVAS_SIZE // 40], width=self.CANVAS_SIZE // 70, fill='#00CC00')
</DeepExtract>
|
andriller
|
positive
|
def distance_from_point_to_point(self, position: Dict[str, float], target: Dict[str, float], allowed_error: float) -> float:
<DeepExtract>
try:
path = self.controller.step(action='GetShortestPathToPoint', position=position, x=target['x'], y=target['y'], z=target['z'], allowedError=allowed_error).metadata['actionReturn']['corners']
except Exception:
get_logger().debug('Failed to find path for {} in {}. Start point {}, agent state {}.'.format(target, self.controller.last_event.metadata['sceneName'], position, self.agent_state()))
path = None
</DeepExtract>
if path:
s_dist = math.sqrt((position['x'] - path[0]['x']) ** 2 + (position['z'] - path[0]['z']) ** 2)
t_dist = math.sqrt((target['x'] - path[-1]['x']) ** 2 + (target['z'] - path[-1]['z']) ** 2)
return metrics.path_distance(path) + s_dist + t_dist
return -1.0
|
def distance_from_point_to_point(self, position: Dict[str, float], target: Dict[str, float], allowed_error: float) -> float:
try:
path = self.controller.step(action='GetShortestPathToPoint', position=position, x=target['x'], y=target['y'], z=target['z'], allowedError=allowed_error).metadata['actionReturn']['corners']
except Exception:
get_logger().debug('Failed to find path for {} in {}. Start point {}, agent state {}.'.format(target, self.controller.last_event.metadata['sceneName'], position, self.agent_state()))
path = None
if path:
s_dist = math.sqrt((position['x'] - path[0]['x']) ** 2 + (position['z'] - path[0]['z']) ** 2)
t_dist = math.sqrt((target['x'] - path[-1]['x']) ** 2 + (target['z'] - path[-1]['z']) ** 2)
return metrics.path_distance(path) + s_dist + t_dist
return -1.0
|
allenact
|
positive
|
def combine_fulltext(term: Term) -> Tuple[Term, Term]:
if not term.contains_term_type(FulltextTerm):
return (AllTerm(), term)
elif isinstance(term, FulltextTerm):
return (term, AllTerm())
elif isinstance(term, CombinedTerm):
if (term.left.contains_term_type(FulltextTerm) or term.right.contains_term_type(FulltextTerm)) and term.op == 'or' and term.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
return (AllTerm(), term)
left = isinstance(term.left, FulltextTerm)
right = isinstance(term.right, FulltextTerm)
if left and right:
return (term, AllTerm())
elif left:
<DeepExtract>
if not term.right.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.right)
elif isinstance(term.right, FulltextTerm):
(ft, remaining) = (term.right, AllTerm())
elif isinstance(term.right, CombinedTerm):
if (term.right.left.contains_term_type(FulltextTerm) or term.right.right.contains_term_type(FulltextTerm)) and term.right.op == 'or' and term.right.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.right)
left = isinstance(term.right.left, FulltextTerm)
right = isinstance(term.right.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.right, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.right.right)
(ft, remaining) = (ft.combine(term.right.op, term.right.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.right.left)
(ft, remaining) = (ft.combine(term.right.op, term.right.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.right.right)
(rf, remaining_right) = combine_fulltext(term.right.left)
(ft, remaining) = (lf.combine(term.right.op, rf), remaining_left.combine(term.right.op, remaining_right))
elif isinstance(term.right, NotTerm):
(ft, remaining) = combine_fulltext(term.right.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.right, MergeTerm):
(ft, remaining) = combine_fulltext(term.right.pre_filter)
(ft, remaining) = (ft, evolve(term.right, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.right)} ({term.right})')
</DeepExtract>
return (ft.combine(term.op, term.left), remaining)
elif right:
<DeepExtract>
if not term.left.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.left)
elif isinstance(term.left, FulltextTerm):
(ft, remaining) = (term.left, AllTerm())
elif isinstance(term.left, CombinedTerm):
if (term.left.left.contains_term_type(FulltextTerm) or term.left.right.contains_term_type(FulltextTerm)) and term.left.op == 'or' and term.left.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.left)
left = isinstance(term.left.left, FulltextTerm)
right = isinstance(term.left.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.left, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.left.right)
(ft, remaining) = (ft.combine(term.left.op, term.left.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.left.left)
(ft, remaining) = (ft.combine(term.left.op, term.left.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.left.right)
(rf, remaining_right) = combine_fulltext(term.left.left)
(ft, remaining) = (lf.combine(term.left.op, rf), remaining_left.combine(term.left.op, remaining_right))
elif isinstance(term.left, NotTerm):
(ft, remaining) = combine_fulltext(term.left.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.left, MergeTerm):
(ft, remaining) = combine_fulltext(term.left.pre_filter)
(ft, remaining) = (ft, evolve(term.left, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.left)} ({term.left})')
</DeepExtract>
return (ft.combine(term.op, term.right), remaining)
else:
<DeepExtract>
if not term.right.contains_term_type(FulltextTerm):
(lf, remaining_left) = (AllTerm(), term.right)
elif isinstance(term.right, FulltextTerm):
(lf, remaining_left) = (term.right, AllTerm())
elif isinstance(term.right, CombinedTerm):
if (term.right.left.contains_term_type(FulltextTerm) or term.right.right.contains_term_type(FulltextTerm)) and term.right.op == 'or' and term.right.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(lf, remaining_left) = (AllTerm(), term.right)
left = isinstance(term.right.left, FulltextTerm)
right = isinstance(term.right.right, FulltextTerm)
if left and right:
(lf, remaining_left) = (term.right, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.right.right)
(lf, remaining_left) = (ft.combine(term.right.op, term.right.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.right.left)
(lf, remaining_left) = (ft.combine(term.right.op, term.right.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.right.right)
(rf, remaining_right) = combine_fulltext(term.right.left)
(lf, remaining_left) = (lf.combine(term.right.op, rf), remaining_left.combine(term.right.op, remaining_right))
elif isinstance(term.right, NotTerm):
(ft, remaining) = combine_fulltext(term.right.term)
(lf, remaining_left) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.right, MergeTerm):
(ft, remaining) = combine_fulltext(term.right.pre_filter)
(lf, remaining_left) = (ft, evolve(term.right, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.right)} ({term.right})')
</DeepExtract>
<DeepExtract>
if not term.left.contains_term_type(FulltextTerm):
(rf, remaining_right) = (AllTerm(), term.left)
elif isinstance(term.left, FulltextTerm):
(rf, remaining_right) = (term.left, AllTerm())
elif isinstance(term.left, CombinedTerm):
if (term.left.left.contains_term_type(FulltextTerm) or term.left.right.contains_term_type(FulltextTerm)) and term.left.op == 'or' and term.left.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(rf, remaining_right) = (AllTerm(), term.left)
left = isinstance(term.left.left, FulltextTerm)
right = isinstance(term.left.right, FulltextTerm)
if left and right:
(rf, remaining_right) = (term.left, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.left.right)
(rf, remaining_right) = (ft.combine(term.left.op, term.left.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.left.left)
(rf, remaining_right) = (ft.combine(term.left.op, term.left.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.left.right)
(rf, remaining_right) = combine_fulltext(term.left.left)
(rf, remaining_right) = (lf.combine(term.left.op, rf), remaining_left.combine(term.left.op, remaining_right))
elif isinstance(term.left, NotTerm):
(ft, remaining) = combine_fulltext(term.left.term)
(rf, remaining_right) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.left, MergeTerm):
(ft, remaining) = combine_fulltext(term.left.pre_filter)
(rf, remaining_right) = (ft, evolve(term.left, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.left)} ({term.left})')
</DeepExtract>
return (lf.combine(term.op, rf), remaining_left.combine(term.op, remaining_right))
elif isinstance(term, NotTerm):
<DeepExtract>
if not term.term.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.term)
elif isinstance(term.term, FulltextTerm):
(ft, remaining) = (term.term, AllTerm())
elif isinstance(term.term, CombinedTerm):
if (term.term.left.contains_term_type(FulltextTerm) or term.term.right.contains_term_type(FulltextTerm)) and term.term.op == 'or' and term.term.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.term)
left = isinstance(term.term.left, FulltextTerm)
right = isinstance(term.term.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.term, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.term.right)
(ft, remaining) = (ft.combine(term.term.op, term.term.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.term.left)
(ft, remaining) = (ft.combine(term.term.op, term.term.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.term.right)
(rf, remaining_right) = combine_fulltext(term.term.left)
(ft, remaining) = (lf.combine(term.term.op, rf), remaining_left.combine(term.term.op, remaining_right))
elif isinstance(term.term, NotTerm):
(ft, remaining) = combine_fulltext(term.term.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.term, MergeTerm):
(ft, remaining) = combine_fulltext(term.term.pre_filter)
(ft, remaining) = (ft, evolve(term.term, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.term)} ({term.term})')
</DeepExtract>
return (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term, MergeTerm):
<DeepExtract>
if not term.pre_filter.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.pre_filter)
elif isinstance(term.pre_filter, FulltextTerm):
(ft, remaining) = (term.pre_filter, AllTerm())
elif isinstance(term.pre_filter, CombinedTerm):
if (term.pre_filter.left.contains_term_type(FulltextTerm) or term.pre_filter.right.contains_term_type(FulltextTerm)) and term.pre_filter.op == 'or' and term.pre_filter.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.pre_filter)
left = isinstance(term.pre_filter.left, FulltextTerm)
right = isinstance(term.pre_filter.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.pre_filter, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.pre_filter.right)
(ft, remaining) = (ft.combine(term.pre_filter.op, term.pre_filter.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.pre_filter.left)
(ft, remaining) = (ft.combine(term.pre_filter.op, term.pre_filter.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.pre_filter.right)
(rf, remaining_right) = combine_fulltext(term.pre_filter.left)
(ft, remaining) = (lf.combine(term.pre_filter.op, rf), remaining_left.combine(term.pre_filter.op, remaining_right))
elif isinstance(term.pre_filter, NotTerm):
(ft, remaining) = combine_fulltext(term.pre_filter.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.pre_filter, MergeTerm):
(ft, remaining) = combine_fulltext(term.pre_filter.pre_filter)
(ft, remaining) = (ft, evolve(term.pre_filter, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.pre_filter)} ({term.pre_filter})')
</DeepExtract>
return (ft, evolve(term, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term)} ({term})')
|
def combine_fulltext(term: Term) -> Tuple[Term, Term]:
if not term.contains_term_type(FulltextTerm):
return (AllTerm(), term)
elif isinstance(term, FulltextTerm):
return (term, AllTerm())
elif isinstance(term, CombinedTerm):
if (term.left.contains_term_type(FulltextTerm) or term.right.contains_term_type(FulltextTerm)) and term.op == 'or' and term.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
return (AllTerm(), term)
left = isinstance(term.left, FulltextTerm)
right = isinstance(term.right, FulltextTerm)
if left and right:
return (term, AllTerm())
elif left:
if not term.right.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.right)
elif isinstance(term.right, FulltextTerm):
(ft, remaining) = (term.right, AllTerm())
elif isinstance(term.right, CombinedTerm):
if (term.right.left.contains_term_type(FulltextTerm) or term.right.right.contains_term_type(FulltextTerm)) and term.right.op == 'or' and term.right.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.right)
left = isinstance(term.right.left, FulltextTerm)
right = isinstance(term.right.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.right, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.right.right)
(ft, remaining) = (ft.combine(term.right.op, term.right.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.right.left)
(ft, remaining) = (ft.combine(term.right.op, term.right.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.right.right)
(rf, remaining_right) = combine_fulltext(term.right.left)
(ft, remaining) = (lf.combine(term.right.op, rf), remaining_left.combine(term.right.op, remaining_right))
elif isinstance(term.right, NotTerm):
(ft, remaining) = combine_fulltext(term.right.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.right, MergeTerm):
(ft, remaining) = combine_fulltext(term.right.pre_filter)
(ft, remaining) = (ft, evolve(term.right, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.right)} ({term.right})')
return (ft.combine(term.op, term.left), remaining)
elif right:
if not term.left.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.left)
elif isinstance(term.left, FulltextTerm):
(ft, remaining) = (term.left, AllTerm())
elif isinstance(term.left, CombinedTerm):
if (term.left.left.contains_term_type(FulltextTerm) or term.left.right.contains_term_type(FulltextTerm)) and term.left.op == 'or' and term.left.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.left)
left = isinstance(term.left.left, FulltextTerm)
right = isinstance(term.left.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.left, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.left.right)
(ft, remaining) = (ft.combine(term.left.op, term.left.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.left.left)
(ft, remaining) = (ft.combine(term.left.op, term.left.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.left.right)
(rf, remaining_right) = combine_fulltext(term.left.left)
(ft, remaining) = (lf.combine(term.left.op, rf), remaining_left.combine(term.left.op, remaining_right))
elif isinstance(term.left, NotTerm):
(ft, remaining) = combine_fulltext(term.left.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.left, MergeTerm):
(ft, remaining) = combine_fulltext(term.left.pre_filter)
(ft, remaining) = (ft, evolve(term.left, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.left)} ({term.left})')
return (ft.combine(term.op, term.right), remaining)
else:
if not term.right.contains_term_type(FulltextTerm):
(lf, remaining_left) = (AllTerm(), term.right)
elif isinstance(term.right, FulltextTerm):
(lf, remaining_left) = (term.right, AllTerm())
elif isinstance(term.right, CombinedTerm):
if (term.right.left.contains_term_type(FulltextTerm) or term.right.right.contains_term_type(FulltextTerm)) and term.right.op == 'or' and term.right.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(lf, remaining_left) = (AllTerm(), term.right)
left = isinstance(term.right.left, FulltextTerm)
right = isinstance(term.right.right, FulltextTerm)
if left and right:
(lf, remaining_left) = (term.right, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.right.right)
(lf, remaining_left) = (ft.combine(term.right.op, term.right.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.right.left)
(lf, remaining_left) = (ft.combine(term.right.op, term.right.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.right.right)
(rf, remaining_right) = combine_fulltext(term.right.left)
(lf, remaining_left) = (lf.combine(term.right.op, rf), remaining_left.combine(term.right.op, remaining_right))
elif isinstance(term.right, NotTerm):
(ft, remaining) = combine_fulltext(term.right.term)
(lf, remaining_left) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.right, MergeTerm):
(ft, remaining) = combine_fulltext(term.right.pre_filter)
(lf, remaining_left) = (ft, evolve(term.right, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.right)} ({term.right})')
if not term.left.contains_term_type(FulltextTerm):
(rf, remaining_right) = (AllTerm(), term.left)
elif isinstance(term.left, FulltextTerm):
(rf, remaining_right) = (term.left, AllTerm())
elif isinstance(term.left, CombinedTerm):
if (term.left.left.contains_term_type(FulltextTerm) or term.left.right.contains_term_type(FulltextTerm)) and term.left.op == 'or' and term.left.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(rf, remaining_right) = (AllTerm(), term.left)
left = isinstance(term.left.left, FulltextTerm)
right = isinstance(term.left.right, FulltextTerm)
if left and right:
(rf, remaining_right) = (term.left, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.left.right)
(rf, remaining_right) = (ft.combine(term.left.op, term.left.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.left.left)
(rf, remaining_right) = (ft.combine(term.left.op, term.left.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.left.right)
(rf, remaining_right) = combine_fulltext(term.left.left)
(rf, remaining_right) = (lf.combine(term.left.op, rf), remaining_left.combine(term.left.op, remaining_right))
elif isinstance(term.left, NotTerm):
(ft, remaining) = combine_fulltext(term.left.term)
(rf, remaining_right) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.left, MergeTerm):
(ft, remaining) = combine_fulltext(term.left.pre_filter)
(rf, remaining_right) = (ft, evolve(term.left, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.left)} ({term.left})')
return (lf.combine(term.op, rf), remaining_left.combine(term.op, remaining_right))
elif isinstance(term, NotTerm):
if not term.term.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.term)
elif isinstance(term.term, FulltextTerm):
(ft, remaining) = (term.term, AllTerm())
elif isinstance(term.term, CombinedTerm):
if (term.term.left.contains_term_type(FulltextTerm) or term.term.right.contains_term_type(FulltextTerm)) and term.term.op == 'or' and term.term.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.term)
left = isinstance(term.term.left, FulltextTerm)
right = isinstance(term.term.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.term, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.term.right)
(ft, remaining) = (ft.combine(term.term.op, term.term.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.term.left)
(ft, remaining) = (ft.combine(term.term.op, term.term.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.term.right)
(rf, remaining_right) = combine_fulltext(term.term.left)
(ft, remaining) = (lf.combine(term.term.op, rf), remaining_left.combine(term.term.op, remaining_right))
elif isinstance(term.term, NotTerm):
(ft, remaining) = combine_fulltext(term.term.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.term, MergeTerm):
(ft, remaining) = combine_fulltext(term.term.pre_filter)
(ft, remaining) = (ft, evolve(term.term, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.term)} ({term.term})')
return (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term, MergeTerm):
if not term.pre_filter.contains_term_type(FulltextTerm):
(ft, remaining) = (AllTerm(), term.pre_filter)
elif isinstance(term.pre_filter, FulltextTerm):
(ft, remaining) = (term.pre_filter, AllTerm())
elif isinstance(term.pre_filter, CombinedTerm):
if (term.pre_filter.left.contains_term_type(FulltextTerm) or term.pre_filter.right.contains_term_type(FulltextTerm)) and term.pre_filter.op == 'or' and term.pre_filter.find_term(lambda x: not isinstance(x, FulltextTerm) and (not isinstance(x, CombinedTerm))):
(ft, remaining) = (AllTerm(), term.pre_filter)
left = isinstance(term.pre_filter.left, FulltextTerm)
right = isinstance(term.pre_filter.right, FulltextTerm)
if left and right:
(ft, remaining) = (term.pre_filter, AllTerm())
elif left:
(ft, remaining) = combine_fulltext(term.pre_filter.right)
(ft, remaining) = (ft.combine(term.pre_filter.op, term.pre_filter.left), remaining)
elif right:
(ft, remaining) = combine_fulltext(term.pre_filter.left)
(ft, remaining) = (ft.combine(term.pre_filter.op, term.pre_filter.right), remaining)
else:
(lf, remaining_left) = combine_fulltext(term.pre_filter.right)
(rf, remaining_right) = combine_fulltext(term.pre_filter.left)
(ft, remaining) = (lf.combine(term.pre_filter.op, rf), remaining_left.combine(term.pre_filter.op, remaining_right))
elif isinstance(term.pre_filter, NotTerm):
(ft, remaining) = combine_fulltext(term.pre_filter.term)
(ft, remaining) = (NotTerm(ft), remaining if isinstance(remaining, AllTerm) else NotTerm(remaining))
elif isinstance(term.pre_filter, MergeTerm):
(ft, remaining) = combine_fulltext(term.pre_filter.pre_filter)
(ft, remaining) = (ft, evolve(term.pre_filter, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term.pre_filter)} ({term.pre_filter})')
return (ft, evolve(term, pre_filter=remaining))
else:
raise AttributeError(f'Can not handle term of type: {type(term)} ({term})')
|
cloudkeeper
|
positive
|
def test_records_serializers_dc(app, test_records_data):
with app.app_context():
<DeepExtract>
creator = create_user('creator')
(_, pid, record) = create_record(test_records_data[0], creator)
record['_files'] = [{'bucket': '15163455-650b-45e5-9b9f-6cf2ef70a08f', 'checksum': 'md5:4653e51dc9b73e020167299ac607e0e1', 'key': 'file1.pptx', 'size': 26289470, 'version_id': '389fff57-e6d7-4434-9a44-ca17297be22f', 'ePIC_PID': 'http://hdl.handle.net/1234/15163455-650b-45e5-9b9f-6cf2ef70a08f'}, {'bucket': '51163455-650b-45e5-9b9f-6cf2ef70a08f', 'checksum': 'md5:4adfe51dc9b73e020167299ac607e0e1', 'key': 'file2.pptx', 'size': 1, 'version_id': '698fff57-e6d7-4434-9a44-ca17297be22f', 'ePIC_PID': 'http://hdl.handle.net/1234/51163455-650b-45e5-9b9f-6cf2ef70a08f'}]
(pid, record) = (pid, record)
</DeepExtract>
rec = {'_source': RecordIndexer._prepare_record(record, 'records', 'record').copy(), '_version': record.revision_id}
dcxml = oaipmh_oai_dc(pid=pid, record=rec)
namespaces = {'dc': 'http://purl.org/dc/elements/1.1/'}
identifiers = dcxml.xpath('//dc:identifier', namespaces=namespaces)
titles = dcxml.xpath('//dc:title', namespaces=namespaces)
creators = dcxml.xpath('//dc:creator', namespaces=namespaces)
descriptions = dcxml.xpath('//dc:description', namespaces=namespaces)
subjects = dcxml.xpath('//dc:subject', namespaces=namespaces)
contributors = dcxml.xpath('//dc:contributor', namespaces=namespaces)
rights = dcxml.xpath('//dc:rights', namespaces=namespaces)
publishers = dcxml.xpath('//dc:publisher', namespaces=namespaces)
languages = dcxml.xpath('//dc:language', namespaces=namespaces)
types = dcxml.xpath('//dc:type', namespaces=namespaces)
assert identifiers
for x in identifiers:
assert x.text.endswith(pid.pid_value)
assert [x.text for x in titles] == [r['title'] for r in record['titles']]
assert [x.text for x in creators] == [r['creator_name'] for r in record['creators']]
assert [x.text for x in descriptions] == [r['description'] for r in record['descriptions']]
assert [x.text for x in types] == [r['resource_type_general'] for r in record['resource_types']]
assert [x.text for x in contributors] == [r['contributor_name'] for r in record['contributors']]
assert [x.text for x in publishers] == [record['publisher']]
assert [x.text for x in languages] == [record['language']]
assert [x.text for x in subjects] == record.get('keywords')
rights = [x.text for x in rights]
access = 'info:eu-repo/semantics/closedAccess'
if record['open_access']:
access = 'info:eu-repo/semantics/openAccess'
assert access in rights
license = record.get('license', {}).get('license')
if license:
assert license in rights
|
def test_records_serializers_dc(app, test_records_data):
with app.app_context():
creator = create_user('creator')
(_, pid, record) = create_record(test_records_data[0], creator)
record['_files'] = [{'bucket': '15163455-650b-45e5-9b9f-6cf2ef70a08f', 'checksum': 'md5:4653e51dc9b73e020167299ac607e0e1', 'key': 'file1.pptx', 'size': 26289470, 'version_id': '389fff57-e6d7-4434-9a44-ca17297be22f', 'ePIC_PID': 'http://hdl.handle.net/1234/15163455-650b-45e5-9b9f-6cf2ef70a08f'}, {'bucket': '51163455-650b-45e5-9b9f-6cf2ef70a08f', 'checksum': 'md5:4adfe51dc9b73e020167299ac607e0e1', 'key': 'file2.pptx', 'size': 1, 'version_id': '698fff57-e6d7-4434-9a44-ca17297be22f', 'ePIC_PID': 'http://hdl.handle.net/1234/51163455-650b-45e5-9b9f-6cf2ef70a08f'}]
(pid, record) = (pid, record)
rec = {'_source': RecordIndexer._prepare_record(record, 'records', 'record').copy(), '_version': record.revision_id}
dcxml = oaipmh_oai_dc(pid=pid, record=rec)
namespaces = {'dc': 'http://purl.org/dc/elements/1.1/'}
identifiers = dcxml.xpath('//dc:identifier', namespaces=namespaces)
titles = dcxml.xpath('//dc:title', namespaces=namespaces)
creators = dcxml.xpath('//dc:creator', namespaces=namespaces)
descriptions = dcxml.xpath('//dc:description', namespaces=namespaces)
subjects = dcxml.xpath('//dc:subject', namespaces=namespaces)
contributors = dcxml.xpath('//dc:contributor', namespaces=namespaces)
rights = dcxml.xpath('//dc:rights', namespaces=namespaces)
publishers = dcxml.xpath('//dc:publisher', namespaces=namespaces)
languages = dcxml.xpath('//dc:language', namespaces=namespaces)
types = dcxml.xpath('//dc:type', namespaces=namespaces)
assert identifiers
for x in identifiers:
assert x.text.endswith(pid.pid_value)
assert [x.text for x in titles] == [r['title'] for r in record['titles']]
assert [x.text for x in creators] == [r['creator_name'] for r in record['creators']]
assert [x.text for x in descriptions] == [r['description'] for r in record['descriptions']]
assert [x.text for x in types] == [r['resource_type_general'] for r in record['resource_types']]
assert [x.text for x in contributors] == [r['contributor_name'] for r in record['contributors']]
assert [x.text for x in publishers] == [record['publisher']]
assert [x.text for x in languages] == [record['language']]
assert [x.text for x in subjects] == record.get('keywords')
rights = [x.text for x in rights]
access = 'info:eu-repo/semantics/closedAccess'
if record['open_access']:
access = 'info:eu-repo/semantics/openAccess'
assert access in rights
license = record.get('license', {}).get('license')
if license:
assert license in rights
|
b2share
|
positive
|
def main(wf):
if not adb_path:
wf.warn_empty(title='adb not found', subtitle="Please config 'adb_path' in workflow settings")
else:
<DeepExtract>
arg = wf.args[0] if wf.args else ''
devices = run_script(adb_path + " devices -l | sed -n '1!p' | tr -s ' '")
devices = devices.rstrip().split('\n')
log.debug('{} adb device(s) found'.format(len(devices)))
(items, wifiDevices) = get_device_items(arg, devices)
if wifiDevices:
run_in_background('update_wifi_history', ['/usr/bin/python3', wf.workflowfile('update_wifi_history.py'), 'add', pickle.dumps(wifiDevices)])
log.error('Save history wifi devices : count : {0}'.format(len(wifiDevices)))
for item in items:
name = item.get('serial')
log.debug(arg + ' ' + name)
if arg == '' or arg.lower() in name.lower():
it = wf.add_item(title=item.title, uid=item.title, autocomplete=('', item.autocomplete)[item.valid], valid=item.valid, arg=item.arg, subtitle=item.subtitle)
it.setvar('status', item.get('status'))
it.setvar('full_info', item.subtitle)
if item.valid:
it.setvar('device_api', item.get('device_api'))
it.setvar('ro.product.manufacturer', item.get('ro.product.manufacturer'))
it.setvar('serial', name)
it.setvar('name', item.get('name'))
if item.subtitle and (not re.match(regexIp + ':5555', name)) and (not name.startswith('emulator-')):
cmd_ip = adb_path + ' -s ' + name + " shell ip -f inet addr show wlan0 | grep inet | tr -s ' ' | awk '{print $2}'"
ip = run_script(cmd_ip)
if '/' in ip and re.match(regexIp, ip.split('/')[0]):
it.setvar('ip', ip.strip('\n'))
it.add_modifier('cmd', subtitle=ip)
if item.get('build_number'):
it.add_modifier('alt', subtitle=item.get('build_number'))
if name.startswith('emulator-'):
log.debug(item.subtitle)
name = hashlib.md5(item.subtitle.encode('utf-8')).hexdigest()
it.setvar('his_tag', name)
lastFuncs = wf.cached_data('last_func:' + name, max_age=0)
if lastFuncs and len(lastFuncs) > 0:
log.debug(lastFuncs)
last_func = lastFuncs[len(lastFuncs) - 1]
mod = it.add_modifier('ctrl', subtitle='run last command {}'.format(last_func))
mod.setvar('last_func', last_func)
mod = it.add_modifier('fn', subtitle='show command history', arg='cmd_history')
mod.setvar('function', 'cmd_history')
if len(lastFuncs) > 1:
second_last_func = lastFuncs[len(lastFuncs) - 2]
mod = it.add_modifier('shift', subtitle='run 2nd last command {}'.format(second_last_func))
mod.setvar('last_func', second_last_func)
if arg and ('connect '.startswith(arg.lower()) or re.match(regexConnect, arg)):
localIpWithMask = run_script('ifconfig | grep -A 1 "en" | grep broadcast | cut -d " " -f 2,4 | tr "\\n" " "')
localIp = localIpWithMask.split(' ')[0]
rawMask = localIpWithMask.split(' ')[1].count('f') * 4
targetIp = arg[8:]
if localIp:
log.debug('history ' + localIp)
history = wf.stored_data('wifi_history_py3')
log.debug(history)
counter = 0
valid = True if re.match('^' + regexIp + '(:|:5|:55|:555|:5555)?$', targetIp) else False
if valid:
subtitle = 'adb connect ' + targetIp if targetIp else ''
it = wf.add_item(title='Connect over WiFi', valid=valid, arg='adb_connect', subtitle=subtitle)
m = it.add_modifier('cmd', subtitle='Remove all connection histories', arg='adb_connect_remove')
m.setvar('extra', 'all')
it.setvar('ip', targetIp.strip('\n'))
if history:
historyWifiDevices = pickle.loads(history)
currentDevices = []
for item in items:
currentDevices.append(item.title.strip())
for historyWifiDevice in historyWifiDevices:
if not historyWifiDevice.title in currentDevices:
deviceIp = historyWifiDevice.title.split(':')[0]
same_network = False
if hasattr(historyWifiDevice, 'mask') and historyWifiDevice.mask:
same_network = ipaddress.ip_network(u'%s/%d' % (localIp, rawMask), False) == ipaddress.ip_network(u'%s/%s' % (deviceIp, historyWifiDevice.mask), False)
else:
same_network = ipaddress.ip_network(u'%s/%d' % (localIp, rawMask), False) == ipaddress.ip_network(u'%s/%d' % (deviceIp, rawMask), False)
if not same_network:
continue
if arg and historyWifiDevice.title.find(targetIp) == -1:
continue
log.debug('history item title ' + historyWifiDevice.title)
title = 'Connect over WiFi'
if historyWifiDevice.subtitle:
title = 'Connect ' + historyWifiDevice.subtitle.split('- ', 1)[1].split(', ', 1)[0] + ' over WiFi'
it = wf.add_item(title=title, valid=True, arg='adb_connect', autocomplete='connect ' + historyWifiDevice.title, subtitle=historyWifiDevice.title, uid=(historyWifiDevice.title, '')[valid])
it.setvar('ip', historyWifiDevice.title)
it.add_modifier('cmd', 'Remove connection history with {0}'.format(historyWifiDevice.title), arg='adb_connect_remove')
it.add_modifier('alt', historyWifiDevice.subtitle)
counter += 1
if not valid and counter == 0:
if not targetIp or re.match(regexIpInput, targetIp):
subtitle = 'adb connect ' + targetIp if targetIp else ''
if not targetIp:
it = wf.add_item(title='Connect over WiFi', valid=False, arg='adb_connect', autocomplete='connect ', subtitle=subtitle)
else:
it = wf.add_item(title='Connect over WiFi', valid=False, arg='adb_connect', subtitle=subtitle)
if wifiDevices:
log.debug(wifiDevices[0].title)
if arg and ('disconnect '.startswith(arg.lower()) or re.match('^disconnect .*', arg)):
targetIp = arg[11:]
if wifiDevices:
for wifiDevice in wifiDevices:
it = wf.add_item(title='Disconnect from WiFi', uid=wifiDevice.title, valid=True, arg='adb_disconnect', autocomplete='disconnect ', subtitle=wifiDevice.title)
ip = wifiDevice.title
if '[OFFLINE]' in ip:
ip = ip.split(' ')[0]
it.setvar('ip', ip)
elif targetIp:
it = wf.add_item(title='Disconnect from WiFi', uid='adb_disconnect', valid=True, arg='adb_disconnect', autocomplete='disconnect ', subtitle='adb disconnect ' + targetIp)
it.setvar('ip', targetIp)
if arg and ('restart'.startswith(arg.lower()) or 'kill-server'.startswith(arg.lower()) or 'start-server'.startswith(arg.lower())) or (len(items) == 0 and (len(arg) == 0 or (not arg.lower().startswith('connect') and (not arg.lower().startswith('disconnect'))))):
wf.add_item(title='Restart adb', valid=True, arg='restart_adb', uid='restart_adb')
</DeepExtract>
wf.send_feedback()
|
def main(wf):
if not adb_path:
wf.warn_empty(title='adb not found', subtitle="Please config 'adb_path' in workflow settings")
else:
arg = wf.args[0] if wf.args else ''
devices = run_script(adb_path + " devices -l | sed -n '1!p' | tr -s ' '")
devices = devices.rstrip().split('\n')
log.debug('{} adb device(s) found'.format(len(devices)))
(items, wifiDevices) = get_device_items(arg, devices)
if wifiDevices:
run_in_background('update_wifi_history', ['/usr/bin/python3', wf.workflowfile('update_wifi_history.py'), 'add', pickle.dumps(wifiDevices)])
log.error('Save history wifi devices : count : {0}'.format(len(wifiDevices)))
for item in items:
name = item.get('serial')
log.debug(arg + ' ' + name)
if arg == '' or arg.lower() in name.lower():
it = wf.add_item(title=item.title, uid=item.title, autocomplete=('', item.autocomplete)[item.valid], valid=item.valid, arg=item.arg, subtitle=item.subtitle)
it.setvar('status', item.get('status'))
it.setvar('full_info', item.subtitle)
if item.valid:
it.setvar('device_api', item.get('device_api'))
it.setvar('ro.product.manufacturer', item.get('ro.product.manufacturer'))
it.setvar('serial', name)
it.setvar('name', item.get('name'))
if item.subtitle and (not re.match(regexIp + ':5555', name)) and (not name.startswith('emulator-')):
cmd_ip = adb_path + ' -s ' + name + " shell ip -f inet addr show wlan0 | grep inet | tr -s ' ' | awk '{print $2}'"
ip = run_script(cmd_ip)
if '/' in ip and re.match(regexIp, ip.split('/')[0]):
it.setvar('ip', ip.strip('\n'))
it.add_modifier('cmd', subtitle=ip)
if item.get('build_number'):
it.add_modifier('alt', subtitle=item.get('build_number'))
if name.startswith('emulator-'):
log.debug(item.subtitle)
name = hashlib.md5(item.subtitle.encode('utf-8')).hexdigest()
it.setvar('his_tag', name)
lastFuncs = wf.cached_data('last_func:' + name, max_age=0)
if lastFuncs and len(lastFuncs) > 0:
log.debug(lastFuncs)
last_func = lastFuncs[len(lastFuncs) - 1]
mod = it.add_modifier('ctrl', subtitle='run last command {}'.format(last_func))
mod.setvar('last_func', last_func)
mod = it.add_modifier('fn', subtitle='show command history', arg='cmd_history')
mod.setvar('function', 'cmd_history')
if len(lastFuncs) > 1:
second_last_func = lastFuncs[len(lastFuncs) - 2]
mod = it.add_modifier('shift', subtitle='run 2nd last command {}'.format(second_last_func))
mod.setvar('last_func', second_last_func)
if arg and ('connect '.startswith(arg.lower()) or re.match(regexConnect, arg)):
localIpWithMask = run_script('ifconfig | grep -A 1 "en" | grep broadcast | cut -d " " -f 2,4 | tr "\\n" " "')
localIp = localIpWithMask.split(' ')[0]
rawMask = localIpWithMask.split(' ')[1].count('f') * 4
targetIp = arg[8:]
if localIp:
log.debug('history ' + localIp)
history = wf.stored_data('wifi_history_py3')
log.debug(history)
counter = 0
valid = True if re.match('^' + regexIp + '(:|:5|:55|:555|:5555)?$', targetIp) else False
if valid:
subtitle = 'adb connect ' + targetIp if targetIp else ''
it = wf.add_item(title='Connect over WiFi', valid=valid, arg='adb_connect', subtitle=subtitle)
m = it.add_modifier('cmd', subtitle='Remove all connection histories', arg='adb_connect_remove')
m.setvar('extra', 'all')
it.setvar('ip', targetIp.strip('\n'))
if history:
historyWifiDevices = pickle.loads(history)
currentDevices = []
for item in items:
currentDevices.append(item.title.strip())
for historyWifiDevice in historyWifiDevices:
if not historyWifiDevice.title in currentDevices:
deviceIp = historyWifiDevice.title.split(':')[0]
same_network = False
if hasattr(historyWifiDevice, 'mask') and historyWifiDevice.mask:
same_network = ipaddress.ip_network(u'%s/%d' % (localIp, rawMask), False) == ipaddress.ip_network(u'%s/%s' % (deviceIp, historyWifiDevice.mask), False)
else:
same_network = ipaddress.ip_network(u'%s/%d' % (localIp, rawMask), False) == ipaddress.ip_network(u'%s/%d' % (deviceIp, rawMask), False)
if not same_network:
continue
if arg and historyWifiDevice.title.find(targetIp) == -1:
continue
log.debug('history item title ' + historyWifiDevice.title)
title = 'Connect over WiFi'
if historyWifiDevice.subtitle:
title = 'Connect ' + historyWifiDevice.subtitle.split('- ', 1)[1].split(', ', 1)[0] + ' over WiFi'
it = wf.add_item(title=title, valid=True, arg='adb_connect', autocomplete='connect ' + historyWifiDevice.title, subtitle=historyWifiDevice.title, uid=(historyWifiDevice.title, '')[valid])
it.setvar('ip', historyWifiDevice.title)
it.add_modifier('cmd', 'Remove connection history with {0}'.format(historyWifiDevice.title), arg='adb_connect_remove')
it.add_modifier('alt', historyWifiDevice.subtitle)
counter += 1
if not valid and counter == 0:
if not targetIp or re.match(regexIpInput, targetIp):
subtitle = 'adb connect ' + targetIp if targetIp else ''
if not targetIp:
it = wf.add_item(title='Connect over WiFi', valid=False, arg='adb_connect', autocomplete='connect ', subtitle=subtitle)
else:
it = wf.add_item(title='Connect over WiFi', valid=False, arg='adb_connect', subtitle=subtitle)
if wifiDevices:
log.debug(wifiDevices[0].title)
if arg and ('disconnect '.startswith(arg.lower()) or re.match('^disconnect .*', arg)):
targetIp = arg[11:]
if wifiDevices:
for wifiDevice in wifiDevices:
it = wf.add_item(title='Disconnect from WiFi', uid=wifiDevice.title, valid=True, arg='adb_disconnect', autocomplete='disconnect ', subtitle=wifiDevice.title)
ip = wifiDevice.title
if '[OFFLINE]' in ip:
ip = ip.split(' ')[0]
it.setvar('ip', ip)
elif targetIp:
it = wf.add_item(title='Disconnect from WiFi', uid='adb_disconnect', valid=True, arg='adb_disconnect', autocomplete='disconnect ', subtitle='adb disconnect ' + targetIp)
it.setvar('ip', targetIp)
if arg and ('restart'.startswith(arg.lower()) or 'kill-server'.startswith(arg.lower()) or 'start-server'.startswith(arg.lower())) or (len(items) == 0 and (len(arg) == 0 or (not arg.lower().startswith('connect') and (not arg.lower().startswith('disconnect'))))):
wf.add_item(title='Restart adb', valid=True, arg='restart_adb', uid='restart_adb')
wf.send_feedback()
|
adb-alfred
|
positive
|
def requires(self):
<DeepExtract>
if not os.path.exists(pipeline_args.hal):
raise InputMissingException('HAL file not found at {}.'.format(pipeline_args.hal))
for d in [pipeline_args.out_dir, pipeline_args.work_dir]:
if not os.path.exists(d):
if not tools.fileOps.dir_is_writeable(os.path.dirname(d)):
raise UserException('Cannot create directory {}.'.format(d))
elif not tools.fileOps.dir_is_writeable(d):
raise UserException('Directory {} is not writeable.'.format(d))
if not os.path.exists(pipeline_args.annotation):
raise InputMissingException('Annotation file {} not found.'.format(pipeline_args.annotation))
if pipeline_args.ref_genome not in pipeline_args.hal_genomes:
raise InvalidInputException('Reference genome {} not present in HAL.'.format(pipeline_args.ref_genome))
missing_genomes = {g for g in pipeline_args.target_genomes if g not in pipeline_args.hal_genomes}
if len(missing_genomes) > 0:
missing_genomes = ','.join(missing_genomes)
raise InvalidInputException('Target genomes {} not present in HAL.'.format(missing_genomes))
if pipeline_args.ref_genome in pipeline_args.target_genomes:
raise InvalidInputException('A target genome cannot be the reference genome.')
</DeepExtract>
<DeepExtract>
args = tools.misc.PipelineNamespace()
args.set('binary_mode', self.binary_mode, False)
args.set('hal', os.path.abspath(self.hal), True)
args.set('ref_genome', self.ref_genome, True)
args.set('out_dir', os.path.abspath(self.out_dir), True)
args.set('work_dir', os.path.abspath(self.work_dir), True)
args.set('augustus', self.augustus, True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('augustus_pb', self.augustus_pb, True)
args.set('augustus_species', self.augustus_species, True)
args.set('tm_cfg', os.path.abspath(self.tm_cfg), True)
args.set('tmr_cfg', os.path.abspath(self.tmr_cfg), True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('maf_chunksize', self.maf_chunksize, True)
args.set('maf_overlap', self.maf_overlap, True)
args.set('pb_genome_chunksize', self.pb_genome_chunksize, True)
args.set('pb_genome_overlap', self.pb_genome_overlap, True)
args.set('pb_cfg', os.path.abspath(self.pb_cfg), True)
args.set('augustus_cgp_cfg_template', os.path.abspath(self.augustus_cgp_cfg_template), True)
args.set('augustus_utr_off', self.augustus_utr_off, True)
if self.cgp_param is not None:
args.set('cgp_param', os.path.abspath(self.cgp_param), True)
else:
args.set('cgp_param', None, True)
args.set('cgp_train_num_exons', self.cgp_train_num_exons, True)
args.set('hgm_cpu', self.hgm_cpu, False)
args.set('global_near_best', self.global_near_best, True)
args.set('filter_overlapping_genes', self.filter_overlapping_genes, True)
args.set('overlapping_ignore_bases', self.overlapping_ignore_bases, True)
args.set('intron_rnaseq_support', self.intron_rnaseq_support, False)
args.set('exon_rnaseq_support', self.exon_rnaseq_support, False)
args.set('intron_annot_support', self.intron_annot_support, False)
args.set('exon_annot_support', self.exon_annot_support, False)
args.set('original_intron_support', self.original_intron_support, False)
args.set('denovo_num_introns', self.denovo_num_introns, False)
args.set('denovo_splice_support', self.denovo_splice_support, False)
args.set('denovo_exon_support', self.denovo_exon_support, False)
args.set('denovo_ignore_novel_genes', self.denovo_ignore_novel_genes, False)
args.set('denovo_only_novel_genes', self.denovo_only_novel_genes, False)
args.set('denovo_allow_novel_ends', self.denovo_allow_novel_ends, False)
args.set('denovo_novel_end_distance', self.denovo_novel_end_distance, False)
args.set('denovo_allow_unsupported', self.denovo_allow_unsupported, False)
args.set('denovo_allow_bad_annot_or_tm', self.denovo_allow_bad_annot_or_tm, False)
args.set('require_pacbio_support', self.require_pacbio_support, False)
args.set('in_species_rna_support_only', self.in_species_rna_support_only, False)
args.set('rebuild_consensus', self.rebuild_consensus, False)
args.set('stats_db', os.path.join(args.out_dir, 'databases', 'timing_stats.db'), False)
args.set('assembly_hub', self.assembly_hub, False)
args.set('hub_email', self.hub_email, False)
args.set('annotate_ancestors', self.annotate_ancestors, True)
if not tools.misc.is_exec('halStats'):
raise ToolMissingException('halStats from the HAL tools package not in global path')
args.set('hal_genomes', tools.hal.extract_genomes(args.hal, self.annotate_ancestors), True)
target_genomes = tools.hal.extract_genomes(args.hal, self.annotate_ancestors, self.target_genomes)
target_genomes = tuple((x for x in target_genomes if x != self.ref_genome))
args.set('target_genomes', target_genomes, True)
args.set('cfg', self.parse_cfg(), True)
args.set('dbs', PipelineTask.get_databases(args), True)
args.set('annotation', args.cfg['ANNOTATION'][args.ref_genome], True)
args.set('hints_db', os.path.join(args.work_dir, 'hints_database', 'hints.db'), True)
args.set('rnaseq_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) | set(args.cfg['BAM'].keys())), True)
args.set('intron_only_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) - set(args.cfg['BAM'].keys())), True)
args.set('isoseq_genomes', frozenset(list(args.cfg['ISO_SEQ_BAM'].keys())), True)
args.set('annotation_genomes', frozenset(list(args.cfg['ANNOTATION'].keys())), True)
args.set('external_ref_genomes', args.annotation_genomes - {args.ref_genome}, True)
args.set('modes', self.get_modes(args), True)
args.set('augustus_tmr', True if 'augTMR' in args.modes else False, True)
if self.__class__.__name__ in ['RunCat', 'Augustus', 'AugustusCgp', 'AugustusPb']:
self.validate_cfg(args)
pipeline_args = args
</DeepExtract>
for genome in list(pipeline_args.target_genomes) + [pipeline_args.ref_genome]:
<DeepExtract>
base_dir = os.path.join(pipeline_args.work_dir, 'genome_files')
args = tools.misc.HashableNamespace()
args.genome = genome
args.fasta = os.path.join(base_dir, genome + '.fa')
args.two_bit = os.path.join(base_dir, genome + '.2bit')
args.sizes = os.path.join(base_dir, genome + '.chrom.sizes')
args.flat_fasta = os.path.join(base_dir, genome + '.fa.flat')
args = args
</DeepExtract>
yield self.clone(GenomeFasta, **vars(args))
yield self.clone(GenomeTwoBit, **vars(args))
yield self.clone(GenomeSizes, **vars(args))
yield self.clone(GenomeFlatFasta, **vars(args))
|
def requires(self):
if not os.path.exists(pipeline_args.hal):
raise InputMissingException('HAL file not found at {}.'.format(pipeline_args.hal))
for d in [pipeline_args.out_dir, pipeline_args.work_dir]:
if not os.path.exists(d):
if not tools.fileOps.dir_is_writeable(os.path.dirname(d)):
raise UserException('Cannot create directory {}.'.format(d))
elif not tools.fileOps.dir_is_writeable(d):
raise UserException('Directory {} is not writeable.'.format(d))
if not os.path.exists(pipeline_args.annotation):
raise InputMissingException('Annotation file {} not found.'.format(pipeline_args.annotation))
if pipeline_args.ref_genome not in pipeline_args.hal_genomes:
raise InvalidInputException('Reference genome {} not present in HAL.'.format(pipeline_args.ref_genome))
missing_genomes = {g for g in pipeline_args.target_genomes if g not in pipeline_args.hal_genomes}
if len(missing_genomes) > 0:
missing_genomes = ','.join(missing_genomes)
raise InvalidInputException('Target genomes {} not present in HAL.'.format(missing_genomes))
if pipeline_args.ref_genome in pipeline_args.target_genomes:
raise InvalidInputException('A target genome cannot be the reference genome.')
args = tools.misc.PipelineNamespace()
args.set('binary_mode', self.binary_mode, False)
args.set('hal', os.path.abspath(self.hal), True)
args.set('ref_genome', self.ref_genome, True)
args.set('out_dir', os.path.abspath(self.out_dir), True)
args.set('work_dir', os.path.abspath(self.work_dir), True)
args.set('augustus', self.augustus, True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('augustus_pb', self.augustus_pb, True)
args.set('augustus_species', self.augustus_species, True)
args.set('tm_cfg', os.path.abspath(self.tm_cfg), True)
args.set('tmr_cfg', os.path.abspath(self.tmr_cfg), True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('maf_chunksize', self.maf_chunksize, True)
args.set('maf_overlap', self.maf_overlap, True)
args.set('pb_genome_chunksize', self.pb_genome_chunksize, True)
args.set('pb_genome_overlap', self.pb_genome_overlap, True)
args.set('pb_cfg', os.path.abspath(self.pb_cfg), True)
args.set('augustus_cgp_cfg_template', os.path.abspath(self.augustus_cgp_cfg_template), True)
args.set('augustus_utr_off', self.augustus_utr_off, True)
if self.cgp_param is not None:
args.set('cgp_param', os.path.abspath(self.cgp_param), True)
else:
args.set('cgp_param', None, True)
args.set('cgp_train_num_exons', self.cgp_train_num_exons, True)
args.set('hgm_cpu', self.hgm_cpu, False)
args.set('global_near_best', self.global_near_best, True)
args.set('filter_overlapping_genes', self.filter_overlapping_genes, True)
args.set('overlapping_ignore_bases', self.overlapping_ignore_bases, True)
args.set('intron_rnaseq_support', self.intron_rnaseq_support, False)
args.set('exon_rnaseq_support', self.exon_rnaseq_support, False)
args.set('intron_annot_support', self.intron_annot_support, False)
args.set('exon_annot_support', self.exon_annot_support, False)
args.set('original_intron_support', self.original_intron_support, False)
args.set('denovo_num_introns', self.denovo_num_introns, False)
args.set('denovo_splice_support', self.denovo_splice_support, False)
args.set('denovo_exon_support', self.denovo_exon_support, False)
args.set('denovo_ignore_novel_genes', self.denovo_ignore_novel_genes, False)
args.set('denovo_only_novel_genes', self.denovo_only_novel_genes, False)
args.set('denovo_allow_novel_ends', self.denovo_allow_novel_ends, False)
args.set('denovo_novel_end_distance', self.denovo_novel_end_distance, False)
args.set('denovo_allow_unsupported', self.denovo_allow_unsupported, False)
args.set('denovo_allow_bad_annot_or_tm', self.denovo_allow_bad_annot_or_tm, False)
args.set('require_pacbio_support', self.require_pacbio_support, False)
args.set('in_species_rna_support_only', self.in_species_rna_support_only, False)
args.set('rebuild_consensus', self.rebuild_consensus, False)
args.set('stats_db', os.path.join(args.out_dir, 'databases', 'timing_stats.db'), False)
args.set('assembly_hub', self.assembly_hub, False)
args.set('hub_email', self.hub_email, False)
args.set('annotate_ancestors', self.annotate_ancestors, True)
if not tools.misc.is_exec('halStats'):
raise ToolMissingException('halStats from the HAL tools package not in global path')
args.set('hal_genomes', tools.hal.extract_genomes(args.hal, self.annotate_ancestors), True)
target_genomes = tools.hal.extract_genomes(args.hal, self.annotate_ancestors, self.target_genomes)
target_genomes = tuple((x for x in target_genomes if x != self.ref_genome))
args.set('target_genomes', target_genomes, True)
args.set('cfg', self.parse_cfg(), True)
args.set('dbs', PipelineTask.get_databases(args), True)
args.set('annotation', args.cfg['ANNOTATION'][args.ref_genome], True)
args.set('hints_db', os.path.join(args.work_dir, 'hints_database', 'hints.db'), True)
args.set('rnaseq_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) | set(args.cfg['BAM'].keys())), True)
args.set('intron_only_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) - set(args.cfg['BAM'].keys())), True)
args.set('isoseq_genomes', frozenset(list(args.cfg['ISO_SEQ_BAM'].keys())), True)
args.set('annotation_genomes', frozenset(list(args.cfg['ANNOTATION'].keys())), True)
args.set('external_ref_genomes', args.annotation_genomes - {args.ref_genome}, True)
args.set('modes', self.get_modes(args), True)
args.set('augustus_tmr', True if 'augTMR' in args.modes else False, True)
if self.__class__.__name__ in ['RunCat', 'Augustus', 'AugustusCgp', 'AugustusPb']:
self.validate_cfg(args)
pipeline_args = args
for genome in list(pipeline_args.target_genomes) + [pipeline_args.ref_genome]:
base_dir = os.path.join(pipeline_args.work_dir, 'genome_files')
args = tools.misc.HashableNamespace()
args.genome = genome
args.fasta = os.path.join(base_dir, genome + '.fa')
args.two_bit = os.path.join(base_dir, genome + '.2bit')
args.sizes = os.path.join(base_dir, genome + '.chrom.sizes')
args.flat_fasta = os.path.join(base_dir, genome + '.fa.flat')
args = args
yield self.clone(GenomeFasta, **vars(args))
yield self.clone(GenomeTwoBit, **vars(args))
yield self.clone(GenomeSizes, **vars(args))
yield self.clone(GenomeFlatFasta, **vars(args))
|
Comparative-Annotation-Toolkit
|
positive
|
@pytest.mark.xfail(condition=werkzeug.__version__ in ('2.1.0', '2.1.1'), reason='Bug with 204 and Transfer-Encoding', strict=False)
@pytest.mark.parametrize('is_eager', [True, False])
def test_tagr002_tabs_render_without_selected(dash_dcc, is_eager):
app = Dash(__name__, eager_loading=is_eager)
menu = html.Div([html.Div('one', id='one'), html.Div('two', id='two')])
tabs_one = html.Div([dcc.Tabs([dcc.Tab(dcc.Graph(id='graph-one'), label='tab-one-one')])], id='tabs-one', style={'display': 'none'})
tabs_two = html.Div([dcc.Tabs([dcc.Tab(dcc.Graph(id='graph-two'), label='tab-two-one')])], id='tabs-two', style={'display': 'none'})
app.layout = html.Div([menu, tabs_one, tabs_two])
for i in ('one', 'two'):
@app.callback(Output(f'tabs-{i}', 'style'), [Input(i, 'n_clicks')])
def on_click_update_tabs(n_clicks):
if n_clicks is None:
raise PreventUpdate
if n_clicks % 2 == 1:
return {'display': 'block'}
return {'display': 'none'}
@app.callback(Output(f'graph-{i}', 'figure'), [Input(i, 'n_clicks')])
def on_click_update_graph(n_clicks):
if n_clicks is None:
raise PreventUpdate
return {'data': [{'x': [1, 2, 3, 4], 'y': [4, 3, 2, 1]}], 'layout': {'width': 700, 'height': 450}}
dash_dcc.start_server(app)
button_one = dash_dcc.wait_for_element('#one')
button_two = dash_dcc.wait_for_element('#two')
button_one.click()
WebDriverWait(dash_dcc.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#graph-one .main-svg')))
is_eager = 'eager' if is_eager else 'lazy'
time.sleep(1)
dash_dcc.percy_snapshot(f'Tabs-1 rendered ({is_eager})')
button_two.click()
WebDriverWait(dash_dcc.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#graph-two .main-svg')))
time.sleep(1)
dash_dcc.percy_snapshot(f'Tabs-2 rendered ({is_eager})')
<DeepExtract>
config_schema = dash_dcc.driver.execute_script('return Plotly.PlotSchema.get().config;')
with open(os.path.join(dcc.__path__[0], 'metadata.json')) as meta:
graph_meta = json.load(meta)['src/components/Graph.react.js']
config_prop_shape = graph_meta['props']['config']['type']['value']
ignored_config = ['setBackground', 'showSources', 'logging', 'globalTransforms', 'notifyOnLogging', 'role', 'typesetMath']
def crawl(schema, props):
for prop_name in props:
assert prop_name in schema
for (item_name, item) in schema.items():
if item_name in ignored_config:
continue
assert item_name in props
if 'valType' not in item:
crawl(item, props[item_name]['value'])
crawl(config_schema, config_prop_shape)
assert dash_dcc.get_logs() == []
</DeepExtract>
assert dash_dcc.get_logs() == []
|
@pytest.mark.xfail(condition=werkzeug.__version__ in ('2.1.0', '2.1.1'), reason='Bug with 204 and Transfer-Encoding', strict=False)
@pytest.mark.parametrize('is_eager', [True, False])
def test_tagr002_tabs_render_without_selected(dash_dcc, is_eager):
app = Dash(__name__, eager_loading=is_eager)
menu = html.Div([html.Div('one', id='one'), html.Div('two', id='two')])
tabs_one = html.Div([dcc.Tabs([dcc.Tab(dcc.Graph(id='graph-one'), label='tab-one-one')])], id='tabs-one', style={'display': 'none'})
tabs_two = html.Div([dcc.Tabs([dcc.Tab(dcc.Graph(id='graph-two'), label='tab-two-one')])], id='tabs-two', style={'display': 'none'})
app.layout = html.Div([menu, tabs_one, tabs_two])
for i in ('one', 'two'):
@app.callback(Output(f'tabs-{i}', 'style'), [Input(i, 'n_clicks')])
def on_click_update_tabs(n_clicks):
if n_clicks is None:
raise PreventUpdate
if n_clicks % 2 == 1:
return {'display': 'block'}
return {'display': 'none'}
@app.callback(Output(f'graph-{i}', 'figure'), [Input(i, 'n_clicks')])
def on_click_update_graph(n_clicks):
if n_clicks is None:
raise PreventUpdate
return {'data': [{'x': [1, 2, 3, 4], 'y': [4, 3, 2, 1]}], 'layout': {'width': 700, 'height': 450}}
dash_dcc.start_server(app)
button_one = dash_dcc.wait_for_element('#one')
button_two = dash_dcc.wait_for_element('#two')
button_one.click()
WebDriverWait(dash_dcc.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#graph-one .main-svg')))
is_eager = 'eager' if is_eager else 'lazy'
time.sleep(1)
dash_dcc.percy_snapshot(f'Tabs-1 rendered ({is_eager})')
button_two.click()
WebDriverWait(dash_dcc.driver, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#graph-two .main-svg')))
time.sleep(1)
dash_dcc.percy_snapshot(f'Tabs-2 rendered ({is_eager})')
config_schema = dash_dcc.driver.execute_script('return Plotly.PlotSchema.get().config;')
with open(os.path.join(dcc.__path__[0], 'metadata.json')) as meta:
graph_meta = json.load(meta)['src/components/Graph.react.js']
config_prop_shape = graph_meta['props']['config']['type']['value']
ignored_config = ['setBackground', 'showSources', 'logging', 'globalTransforms', 'notifyOnLogging', 'role', 'typesetMath']
def crawl(schema, props):
for prop_name in props:
assert prop_name in schema
for (item_name, item) in schema.items():
if item_name in ignored_config:
continue
assert item_name in props
if 'valType' not in item:
crawl(item, props[item_name]['value'])
crawl(config_schema, config_prop_shape)
assert dash_dcc.get_logs() == []
assert dash_dcc.get_logs() == []
|
dash
|
positive
|
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
<DeepExtract>
matches = []
fn = basename(_fn)
for (modname, name, _, filenames, _) in LEXERS.values():
for filename in filenames:
if fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
code = guess_decode(code)
def get_rating(info):
(cls, filename) = info
bonus = '*' not in filename and 0.5 or 0
if code:
res = (cls.analyse_text(code) + bonus, cls.__name__)
res = (cls.priority + bonus, cls.__name__)
if matches:
matches.sort(key=get_rating)
res = matches[-1][0]
</DeepExtract>
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
|
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
matches = []
fn = basename(_fn)
for (modname, name, _, filenames, _) in LEXERS.values():
for filename in filenames:
if fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch(fn, filename):
matches.append((cls, filename))
if isinstance(code, bytes):
code = guess_decode(code)
def get_rating(info):
(cls, filename) = info
bonus = '*' not in filename and 0.5 or 0
if code:
res = (cls.analyse_text(code) + bonus, cls.__name__)
res = (cls.priority + bonus, cls.__name__)
if matches:
matches.sort(key=get_rating)
res = matches[-1][0]
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
|
diaphora
|
positive
|
def decorator(f: t.Callable[..., t.Any]) -> 'Group':
cmd: Group = group(*args, **kwargs)(f)
<DeepExtract>
name = name or cmd.name
if name is None:
raise TypeError('Command has no name.')
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
</DeepExtract>
return cmd
|
def decorator(f: t.Callable[..., t.Any]) -> 'Group':
cmd: Group = group(*args, **kwargs)(f)
name = name or cmd.name
if name is None:
raise TypeError('Command has no name.')
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
return cmd
|
click
|
positive
|
def get_name_lower_rule(self):
if self._cache_name_lower_rule is None:
<DeepExtract>
self._cache_name_lower_rule = self.get_identify_name(self.get_name(), PbConvertRule.CONVERT_NAME_LOWERCASE, '_')
</DeepExtract>
return self._cache_name_lower_rule
|
def get_name_lower_rule(self):
if self._cache_name_lower_rule is None:
self._cache_name_lower_rule = self.get_identify_name(self.get_name(), PbConvertRule.CONVERT_NAME_LOWERCASE, '_')
return self._cache_name_lower_rule
|
atsf4g-co
|
positive
|
def main():
<DeepExtract>
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file', help='Input network prototxt file')
parser.add_argument('output_image_file', help='Output image file')
parser.add_argument('--rankdir', help='One of TB (top-bottom, i.e., vertical), RL (right-left, i.e., horizontal), or another valid dot option; see http://www.graphviz.org/doc/info/attrs.html#k:rankdir', default='LR')
parser.add_argument('--phase', help='Which network phase to draw: can be TRAIN, TEST, or ALL. If ALL, then all layers are drawn regardless of phase.', default='ALL')
args = parser.parse_args()
args = args
</DeepExtract>
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
phase = None
if args.phase == 'TRAIN':
phase = caffe.TRAIN
elif args.phase == 'TEST':
phase = caffe.TEST
elif args.phase != 'ALL':
raise ValueError('Unknown phase: ' + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir, phase)
|
def main():
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file', help='Input network prototxt file')
parser.add_argument('output_image_file', help='Output image file')
parser.add_argument('--rankdir', help='One of TB (top-bottom, i.e., vertical), RL (right-left, i.e., horizontal), or another valid dot option; see http://www.graphviz.org/doc/info/attrs.html#k:rankdir', default='LR')
parser.add_argument('--phase', help='Which network phase to draw: can be TRAIN, TEST, or ALL. If ALL, then all layers are drawn regardless of phase.', default='ALL')
args = parser.parse_args()
args = args
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
phase = None
if args.phase == 'TRAIN':
phase = caffe.TRAIN
elif args.phase == 'TEST':
phase = caffe.TEST
elif args.phase != 'ALL':
raise ValueError('Unknown phase: ' + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir, phase)
|
DeRPN
|
positive
|
def InterpretNumber(token):
<DeepExtract>
if token[0] != STATE_IDENTIFIER:
number = None
try:
number = int(token[1])
except:
number = None
</DeepExtract>
if number == None:
return InterpretHexInteger(token)
else:
return number
|
def InterpretNumber(token):
if token[0] != STATE_IDENTIFIER:
number = None
try:
number = int(token[1])
except:
number = None
if number == None:
return InterpretHexInteger(token)
else:
return number
|
Beta
|
positive
|
def forward(self, image, text, targets, alpha=0, train=True):
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
if train:
output = self.text_encoder(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
prediction = self.cls_head(output.last_hidden_state[:, 0, :])
if self.distill:
with torch.no_grad():
<DeepExtract>
for model_pair in self.model_pairs:
for (param, param_m) in zip(model_pair[0].parameters(), model_pair[1].parameters()):
param_m.data = param_m.data * self.momentum + param.data * (1.0 - self.momentum)
</DeepExtract>
image_embeds_m = self.visual_encoder_m(image)
output_m = self.text_encoder_m(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds_m, encoder_attention_mask=image_atts, return_dict=True)
prediction_m = self.cls_head_m(output_m.last_hidden_state[:, 0, :])
loss = (1 - alpha) * F.cross_entropy(prediction, targets) - alpha * torch.sum(F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1), dim=1).mean()
else:
loss = F.cross_entropy(prediction, targets)
return loss
else:
output = self.text_encoder(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
prediction = self.cls_head(output.last_hidden_state[:, 0, :])
return prediction
|
def forward(self, image, text, targets, alpha=0, train=True):
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image.device)
if train:
output = self.text_encoder(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
prediction = self.cls_head(output.last_hidden_state[:, 0, :])
if self.distill:
with torch.no_grad():
for model_pair in self.model_pairs:
for (param, param_m) in zip(model_pair[0].parameters(), model_pair[1].parameters()):
param_m.data = param_m.data * self.momentum + param.data * (1.0 - self.momentum)
image_embeds_m = self.visual_encoder_m(image)
output_m = self.text_encoder_m(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds_m, encoder_attention_mask=image_atts, return_dict=True)
prediction_m = self.cls_head_m(output_m.last_hidden_state[:, 0, :])
loss = (1 - alpha) * F.cross_entropy(prediction, targets) - alpha * torch.sum(F.log_softmax(prediction, dim=1) * F.softmax(prediction_m, dim=1), dim=1).mean()
else:
loss = F.cross_entropy(prediction, targets)
return loss
else:
output = self.text_encoder(text.input_ids, attention_mask=text.attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True)
prediction = self.cls_head(output.last_hidden_state[:, 0, :])
return prediction
|
ALBEF
|
positive
|
def validate_common_opts(self):
hosts = self.get_opt('host')
self.port = self.get_opt('port')
if hosts:
self.host_list = [host.strip() for host in hosts.split(',') if host]
self.host_list += self.args
self.host_list = uniq_list_ordered(self.host_list)
if not self.host_list:
self.usage('no hosts specified')
validate_hostport_list(self.host_list, port_optional=True)
validate_port(self.port)
self.port = int(self.port)
<DeepExtract>
if self.is_option_defined('https') and self.get_opt('https'):
self.protocol = 'https'
if str(self.port) == '80':
log.info('overriding port 80 => 443 for https')
self.port = 443
elif self.is_option_defined('http') and self.get_opt('http'):
self.protocol = 'http'
if not self.port:
self.port = 80
if self.is_option_defined('url') and self.get_opt('url'):
self.url_path = self.get_opt('url')
if self.url_path:
if self.protocol is None:
self.protocol = 'http'
elif self.protocol == 'ping':
self.usage('cannot specify --url-path with --ping, mutually exclusive options!')
if self.is_option_defined('ping') and self.get_opt('ping'):
if self.protocol:
self.usage('cannot specify --ping with --http / --https, mutually exclusive tests!')
elif self.port != self.default_port:
self.usage('cannot specify --port with --ping, mutually exclusive options!')
self.protocol = 'ping'
if self.protocol and self.protocol not in ('http', 'https', 'ping'):
code_error('invalid protocol, must be one of http / https / ping')
</DeepExtract>
<DeepExtract>
if self.is_option_defined('regex') and self.get_opt('regex'):
self.regex = self.get_opt('regex')
if self.regex:
if not self.protocol:
self.usage('--regex cannot be used without --http / --https')
validate_regex(self.regex)
self.regex = re.compile(self.regex)
self.num_threads = self.get_opt('num_threads')
validate_int(self.num_threads, 'num threads', 1, 100)
self.num_threads = int(self.num_threads)
self.request_timeout = self.get_opt('request_timeout')
validate_int(self.request_timeout, 'request timeout', 1, 60)
self.request_timeout = int(self.request_timeout)
if self.get_opt('random'):
log_option('random', True)
shuffle(self.host_list)
</DeepExtract>
|
def validate_common_opts(self):
hosts = self.get_opt('host')
self.port = self.get_opt('port')
if hosts:
self.host_list = [host.strip() for host in hosts.split(',') if host]
self.host_list += self.args
self.host_list = uniq_list_ordered(self.host_list)
if not self.host_list:
self.usage('no hosts specified')
validate_hostport_list(self.host_list, port_optional=True)
validate_port(self.port)
self.port = int(self.port)
if self.is_option_defined('https') and self.get_opt('https'):
self.protocol = 'https'
if str(self.port) == '80':
log.info('overriding port 80 => 443 for https')
self.port = 443
elif self.is_option_defined('http') and self.get_opt('http'):
self.protocol = 'http'
if not self.port:
self.port = 80
if self.is_option_defined('url') and self.get_opt('url'):
self.url_path = self.get_opt('url')
if self.url_path:
if self.protocol is None:
self.protocol = 'http'
elif self.protocol == 'ping':
self.usage('cannot specify --url-path with --ping, mutually exclusive options!')
if self.is_option_defined('ping') and self.get_opt('ping'):
if self.protocol:
self.usage('cannot specify --ping with --http / --https, mutually exclusive tests!')
elif self.port != self.default_port:
self.usage('cannot specify --port with --ping, mutually exclusive options!')
self.protocol = 'ping'
if self.protocol and self.protocol not in ('http', 'https', 'ping'):
code_error('invalid protocol, must be one of http / https / ping')
if self.is_option_defined('regex') and self.get_opt('regex'):
self.regex = self.get_opt('regex')
if self.regex:
if not self.protocol:
self.usage('--regex cannot be used without --http / --https')
validate_regex(self.regex)
self.regex = re.compile(self.regex)
self.num_threads = self.get_opt('num_threads')
validate_int(self.num_threads, 'num threads', 1, 100)
self.num_threads = int(self.num_threads)
self.request_timeout = self.get_opt('request_timeout')
validate_int(self.request_timeout, 'request timeout', 1, 60)
self.request_timeout = int(self.request_timeout)
if self.get_opt('random'):
log_option('random', True)
shuffle(self.host_list)
</DeepExtract>
|
DevOps-Python-tools
|
positive
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
<DeepExtract>
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
<DeepExtract>
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
|
alexa-sky-hd
|
positive
|
def test_custom_exc_and_message():
@deal.pre(lambda x: x > 0, exception=ZeroDivisionError, message='oh hi mark')
def f(x):
pass
with pytest.raises(ZeroDivisionError) as exc_info:
<DeepExtract>
pass
</DeepExtract>
assert exc_info.value.args == ('oh hi mark',)
|
def test_custom_exc_and_message():
@deal.pre(lambda x: x > 0, exception=ZeroDivisionError, message='oh hi mark')
def f(x):
pass
with pytest.raises(ZeroDivisionError) as exc_info:
pass
assert exc_info.value.args == ('oh hi mark',)
|
deal
|
positive
|
def point_target(proposals_list, valid_flag_list, gt_bboxes_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True):
"""Compute corresponding GT box and classification targets for proposals.
Args:
points_list (list[list]): Multi level points of each image.
valid_flag_list (list[list]): Multi level valid flags of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
cfg (dict): train sample configs.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
num_level_proposals = [points.size(0) for points in proposals_list[0]]
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs)
if any([labels is None for labels in all_labels]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
<DeepExtract>
all_labels = torch.stack(all_labels, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_labels[:, start:end].squeeze(0))
start = end
labels_list = level_targets
</DeepExtract>
<DeepExtract>
all_label_weights = torch.stack(all_label_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_label_weights[:, start:end].squeeze(0))
start = end
label_weights_list = level_targets
</DeepExtract>
<DeepExtract>
all_bbox_gt = torch.stack(all_bbox_gt, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_bbox_gt[:, start:end].squeeze(0))
start = end
bbox_gt_list = level_targets
</DeepExtract>
<DeepExtract>
all_proposals = torch.stack(all_proposals, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_proposals[:, start:end].squeeze(0))
start = end
proposals_list = level_targets
</DeepExtract>
<DeepExtract>
all_proposal_weights = torch.stack(all_proposal_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_proposal_weights[:, start:end].squeeze(0))
start = end
proposal_weights_list = level_targets
</DeepExtract>
return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
|
def point_target(proposals_list, valid_flag_list, gt_bboxes_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True):
"""Compute corresponding GT box and classification targets for proposals.
Args:
points_list (list[list]): Multi level points of each image.
valid_flag_list (list[list]): Multi level valid flags of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
cfg (dict): train sample configs.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == num_imgs
num_level_proposals = [points.size(0) for points in proposals_list[0]]
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs)
if any([labels is None for labels in all_labels]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
all_labels = torch.stack(all_labels, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_labels[:, start:end].squeeze(0))
start = end
labels_list = level_targets
all_label_weights = torch.stack(all_label_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_label_weights[:, start:end].squeeze(0))
start = end
label_weights_list = level_targets
all_bbox_gt = torch.stack(all_bbox_gt, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_bbox_gt[:, start:end].squeeze(0))
start = end
bbox_gt_list = level_targets
all_proposals = torch.stack(all_proposals, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_proposals[:, start:end].squeeze(0))
start = end
proposals_list = level_targets
all_proposal_weights = torch.stack(all_proposal_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
level_targets.append(all_proposal_weights[:, start:end].squeeze(0))
start = end
proposal_weights_list = level_targets
return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
|
BalancedGroupSoftmax
|
positive
|
def _getreportpdf(self, url, retry=False):
headers = {'accept': 'application/pdf', 'x-app-name': 'opencti-connector-5.6.2', 'authorization': 'Bearer ' + self.auth_token}
r = requests.get(url, headers=headers)
if r.status_code == 200:
self.helper.log_info('Report PDF fetched successfully')
return r.content
elif (r.status_code == 401 or r.status_code == 403) and (not retry):
<DeepExtract>
headers = {'accept': 'application/json', 'x-app-name': 'opencti-connector-5.6.2'}
r = requests.post(self.mandiant_api_url + '/token', auth=HTTPBasicAuth(self.mandiant_api_v4_key_id, self.mandiant_api_v4_key_secret), data={'grant_type': 'client_credentials'}, headers=headers)
if r.status_code != 200:
raise ValueError('Mandiant Authentication failed')
data = r.json()
self.auth_token = data.get('access_token')
</DeepExtract>
return self._getreportpdf(url, True)
elif r.status_code == 401 or r.status_code == 403:
raise ValueError('Query failed, permission denied')
else:
self.helper.log_info('An error has ocurred getting PDF report')
|
def _getreportpdf(self, url, retry=False):
headers = {'accept': 'application/pdf', 'x-app-name': 'opencti-connector-5.6.2', 'authorization': 'Bearer ' + self.auth_token}
r = requests.get(url, headers=headers)
if r.status_code == 200:
self.helper.log_info('Report PDF fetched successfully')
return r.content
elif (r.status_code == 401 or r.status_code == 403) and (not retry):
headers = {'accept': 'application/json', 'x-app-name': 'opencti-connector-5.6.2'}
r = requests.post(self.mandiant_api_url + '/token', auth=HTTPBasicAuth(self.mandiant_api_v4_key_id, self.mandiant_api_v4_key_secret), data={'grant_type': 'client_credentials'}, headers=headers)
if r.status_code != 200:
raise ValueError('Mandiant Authentication failed')
data = r.json()
self.auth_token = data.get('access_token')
return self._getreportpdf(url, True)
elif r.status_code == 401 or r.status_code == 403:
raise ValueError('Query failed, permission denied')
else:
self.helper.log_info('An error has ocurred getting PDF report')
|
connectors
|
positive
|
def convert_formulae(formulae: List[Union[str, FNode]], parser: Union[StringParser, LTLParser], relative_path: Path=Path('.')) -> List[List[FNode]]:
"""
Converts string orepresentation of formulae to the internal representation
of PySMT FNodes
The string can also point to a file which contains string formulae, this
method looks in the provided relative path for the formula file
If passed None, it replace it with an empty list
"""
converted_formulae = [None] * len(formulae)
for i in range(len(formulae)):
if formulae[i] is not None:
<DeepExtract>
if isinstance(formulae[i], FNode):
converted_formulae[i] = [formulae[i]]
try:
pdef_file = relative_path / formulae[i]
with pdef_file.open() as f:
converted_tuples = parser.parse_formulae([p.strip() for p in f.read().strip().split('\n')])
except OSError:
converted_tuples = parser.parse_formulae([p.strip() for p in formulae[i].split(MODEL_SP)])
converted_formulae[i] = [c[1] for c in converted_tuples]
</DeepExtract>
else:
converted_formulae[i] = []
return converted_formulae
|
def convert_formulae(formulae: List[Union[str, FNode]], parser: Union[StringParser, LTLParser], relative_path: Path=Path('.')) -> List[List[FNode]]:
"""
Converts string orepresentation of formulae to the internal representation
of PySMT FNodes
The string can also point to a file which contains string formulae, this
method looks in the provided relative path for the formula file
If passed None, it replace it with an empty list
"""
converted_formulae = [None] * len(formulae)
for i in range(len(formulae)):
if formulae[i] is not None:
if isinstance(formulae[i], FNode):
converted_formulae[i] = [formulae[i]]
try:
pdef_file = relative_path / formulae[i]
with pdef_file.open() as f:
converted_tuples = parser.parse_formulae([p.strip() for p in f.read().strip().split('\n')])
except OSError:
converted_tuples = parser.parse_formulae([p.strip() for p in formulae[i].split(MODEL_SP)])
converted_formulae[i] = [c[1] for c in converted_tuples]
else:
converted_formulae[i] = []
return converted_formulae
|
CoSA
|
positive
|
def prepare_test_img(self, idx):
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
<DeepExtract>
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
</DeepExtract>
return self.pipeline(results)
|
def prepare_test_img(self, idx):
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
return self.pipeline(results)
|
BalancedGroupSoftmax
|
positive
|
def load_urdf(urdf_file: str, weight_distribution: str='rigid', fk_offset: Optional[Union[List[float], Vector, np.array]]=None, ik_offset: Optional[Union[List[float], Vector, np.array]]=None) -> URDFObject:
""" Loads an urdf object from an URDF file.
:param urdf_file: Path to the URDF file.
:param weight_distribution: One of ['envelope', 'automatic', 'rigid']. For more information please see
https://docs.blender.org/manual/en/latest/animation/armatures/skinning/parenting.html.
:param fk_offset: Offset between fk (forward kinematic) bone chain and link bone chain. This does not have any
effect on the transformations, but can be useful for visualization in blender.
:param ik_offset: Offset between ik (inverse kinematic) bone chain and link bone chain. Effects on the
transformation (e.g. `urdf_object.set_location_ik()`) are being handled internally. Useful for
visualization in blender.
:return: URDF object instance.
"""
SetupUtility.setup_pip(user_required_packages=['git+https://github.com/wboerdijk/urdfpy.git'])
from urdfpy import URDF
if fk_offset is None:
fk_offset = [0.0, -1.0, 0.0]
if ik_offset is None:
ik_offset = [0.0, 1.0, 0.0]
urdf_tree = URDF.load(urdf_file)
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.armature_add()
armature = bpy.context.active_object
armature.name = urdf_tree.name
bpy.ops.object.mode_set(mode='EDIT')
armature.data.edit_bones.remove(armature.data.edit_bones.values()[0])
bpy.ops.object.mode_set(mode='OBJECT')
<DeepExtract>
base_joints = [joint_tree for (i, joint_tree) in enumerate(urdf_tree.joints) if joint_tree.parent == urdf_tree.base_link.name]
</DeepExtract>
for base_joint in base_joints:
<DeepExtract>
if fk_offset is None:
fk_offset = [0.0, -1.0, 0.0]
if ik_offset is None:
ik_offset = [0.0, 1.0, 0.0]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = armature
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
edit_bones = armature.data.edit_bones
editbone = edit_bones.new(base_joint.name)
origin = base_joint.origin
if parent_origin is not None:
origin = parent_origin @ origin
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
editbone.head = Vector(origin[:3, -1])
editbone.tail = editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None)
editbone.parent = parent_bone
fk_editbone = edit_bones.new(base_joint.name + '.fk')
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
fk_editbone.head = Vector(origin[:3, -1]) + Vector(fk_offset)
fk_editbone.tail = fk_editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None + '.fk')
fk_editbone.parent = parent_bone
ik_editbone = edit_bones.new(base_joint.name + '.ik')
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
ik_editbone.head = Vector(origin[:3, -1]) + Vector(ik_offset)
ik_editbone.tail = ik_editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None + '.ik')
ik_editbone.parent = parent_bone
bone_name = editbone.name
fk_bone_name = fk_editbone.name
ik_bone_name = ik_editbone.name
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bone = armature.pose.bones[bone_name]
fk_bone = armature.pose.bones[fk_bone_name]
ik_bone = armature.pose.bones[ik_bone_name]
bone.rotation_mode = 'XYZ'
fk_bone.rotation_mode = 'XYZ'
ik_bone.rotation_mode = 'XYZ'
if base_joint.joint_type == 'fixed':
set_location_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
elif base_joint.joint_type == 'revolute':
limits = None
if base_joint.limit is not None:
limits = np.array([base_joint.limit.lower, base_joint.limit.upper])
set_location_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_rotation_constraint(bone=fk_bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_rotation_constraint(bone=ik_bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_copy_rotation_constraint(bone=bone, target=armature, target_bone=fk_bone.name, custom_constraint_name='copy_rotation.fk')
set_copy_rotation_constraint(bone=bone, target=armature, target_bone=ik_bone.name, custom_constraint_name='copy_rotation.ik', influence=0.0)
else:
warnings.warn(f"WARNING: No constraint implemented for joint type '{base_joint.joint_type}'!")
if True:
child_joints = get_joints_which_have_link_as_parent(link_name=base_joint.child, joint_trees=urdf_tree.joints)
for child_joint in child_joints:
create_bone(armature, child_joint, urdf_tree.joints, parent_bone_name=bone.name, create_recursive=True, parent_origin=origin, fk_offset=fk_offset, ik_offset=ik_offset)
</DeepExtract>
<DeepExtract>
links = []
for link_tree in urdf_tree.links:
(visuals, collisions, inertial) = ([], [], None)
if link_tree.visuals:
visuals = [load_visual_collision_obj(visual_tree, name=f'{link_tree.name}_visual', urdf_path=urdf_file) for visual_tree in link_tree.visuals]
if link_tree.collisions:
collisions = [load_visual_collision_obj(collision_tree, name=f'{link_tree.name}_collision', urdf_path=urdf_file) for collision_tree in link_tree.collisions]
if link_tree.inertial:
inertial = load_inertial(link_tree.inertial, name=f'{link_tree.name}_inertial')
corresponding_joint = get_joints_which_have_link_as_child(link_tree.name, urdf_tree.joints)
link = Link(bpy_object=create_with_empty_mesh(link_tree.name).blender_obj)
link.set_armature(armature)
link.set_visuals(visuals)
link.set_visual_local2link_mats([Matrix(obj.get_local2world_mat()) for obj in visuals])
link.set_collisions(collisions)
link.set_collision_local2link_mats([Matrix(obj.get_local2world_mat()) for obj in collisions])
link.set_inertial(inertial)
link.set_inertial_local2link_mat(Matrix(inertial.get_local2world_mat()) if inertial is not None else None)
link.set_name(name=link_tree.name)
if corresponding_joint is not None:
link.set_bone(armature.pose.bones.get(corresponding_joint.name))
link.set_fk_bone(armature.pose.bones.get(corresponding_joint.name + '.fk'))
link.set_ik_bone(armature.pose.bones.get(corresponding_joint.name + '.ik'))
link.set_joint_type(corresponding_joint.joint_type)
links.append(link)
links = links
</DeepExtract>
for base_joint in base_joints:
<DeepExtract>
child_link = one_by_attr(elements=links, attr_name='name', value=base_joint.child)
parent_link = one_by_attr(elements=links, attr_name='name', value=base_joint.parent)
mat = Matrix(parent_link.get_local2world_mat()) @ Matrix(base_joint.origin)
child_link.set_local2world_mat(mat)
child_link.set_link_parent(parent=parent_link)
parent_link.set_link_child(child=child_link)
for obj in child_link.get_all_objs():
obj.set_local2world_mat(Matrix(child_link.get_local2world_mat()) @ Matrix(obj.get_local2world_mat()))
if child_link.bone is not None:
child_link.set_link2bone_mat(mat.inverted() @ child_link.bone.matrix)
if recursive:
child_joint_trees = get_joints_which_have_link_as_parent(child_link.get_name(), urdf_tree.joints)
for child_joint_tree in child_joint_trees:
propagate_pose(links, child_joint_tree, urdf_tree.joints, armature, recursive=True)
</DeepExtract>
for link in links:
link.parent_with_bone(weight_distribution=weight_distribution)
for link in links:
link.switch_fk_ik_mode(mode='fk')
urdf_object = URDFObject(armature, links=links, xml_tree=urdf_tree)
urdf_object.hide_links_and_collision_inertial_objs()
return urdf_object
|
def load_urdf(urdf_file: str, weight_distribution: str='rigid', fk_offset: Optional[Union[List[float], Vector, np.array]]=None, ik_offset: Optional[Union[List[float], Vector, np.array]]=None) -> URDFObject:
""" Loads an urdf object from an URDF file.
:param urdf_file: Path to the URDF file.
:param weight_distribution: One of ['envelope', 'automatic', 'rigid']. For more information please see
https://docs.blender.org/manual/en/latest/animation/armatures/skinning/parenting.html.
:param fk_offset: Offset between fk (forward kinematic) bone chain and link bone chain. This does not have any
effect on the transformations, but can be useful for visualization in blender.
:param ik_offset: Offset between ik (inverse kinematic) bone chain and link bone chain. Effects on the
transformation (e.g. `urdf_object.set_location_ik()`) are being handled internally. Useful for
visualization in blender.
:return: URDF object instance.
"""
SetupUtility.setup_pip(user_required_packages=['git+https://github.com/wboerdijk/urdfpy.git'])
from urdfpy import URDF
if fk_offset is None:
fk_offset = [0.0, -1.0, 0.0]
if ik_offset is None:
ik_offset = [0.0, 1.0, 0.0]
urdf_tree = URDF.load(urdf_file)
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.armature_add()
armature = bpy.context.active_object
armature.name = urdf_tree.name
bpy.ops.object.mode_set(mode='EDIT')
armature.data.edit_bones.remove(armature.data.edit_bones.values()[0])
bpy.ops.object.mode_set(mode='OBJECT')
base_joints = [joint_tree for (i, joint_tree) in enumerate(urdf_tree.joints) if joint_tree.parent == urdf_tree.base_link.name]
for base_joint in base_joints:
if fk_offset is None:
fk_offset = [0.0, -1.0, 0.0]
if ik_offset is None:
ik_offset = [0.0, 1.0, 0.0]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = armature
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
edit_bones = armature.data.edit_bones
editbone = edit_bones.new(base_joint.name)
origin = base_joint.origin
if parent_origin is not None:
origin = parent_origin @ origin
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
editbone.head = Vector(origin[:3, -1])
editbone.tail = editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None)
editbone.parent = parent_bone
fk_editbone = edit_bones.new(base_joint.name + '.fk')
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
fk_editbone.head = Vector(origin[:3, -1]) + Vector(fk_offset)
fk_editbone.tail = fk_editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None + '.fk')
fk_editbone.parent = parent_bone
ik_editbone = edit_bones.new(base_joint.name + '.ik')
axis = Matrix(origin[:3, :3]) @ Vector(base_joint.axis)
ik_editbone.head = Vector(origin[:3, -1]) + Vector(ik_offset)
ik_editbone.tail = ik_editbone.head + axis.normalized() * 0.2
if None is not None:
parent_bone = edit_bones.get(None + '.ik')
ik_editbone.parent = parent_bone
bone_name = editbone.name
fk_bone_name = fk_editbone.name
ik_bone_name = ik_editbone.name
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bone = armature.pose.bones[bone_name]
fk_bone = armature.pose.bones[fk_bone_name]
ik_bone = armature.pose.bones[ik_bone_name]
bone.rotation_mode = 'XYZ'
fk_bone.rotation_mode = 'XYZ'
ik_bone.rotation_mode = 'XYZ'
if base_joint.joint_type == 'fixed':
set_location_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
elif base_joint.joint_type == 'revolute':
limits = None
if base_joint.limit is not None:
limits = np.array([base_joint.limit.lower, base_joint.limit.upper])
set_location_constraint(bone=bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=fk_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_location_constraint(bone=ik_bone, x_limits=[0.0, 0.0], y_limits=[0.0, 0.0], z_limits=[0.0, 0.0])
set_rotation_constraint(bone=bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_rotation_constraint(bone=fk_bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_rotation_constraint(bone=ik_bone, x_limits=[0, 0], y_limits=limits, z_limits=[0, 0])
set_copy_rotation_constraint(bone=bone, target=armature, target_bone=fk_bone.name, custom_constraint_name='copy_rotation.fk')
set_copy_rotation_constraint(bone=bone, target=armature, target_bone=ik_bone.name, custom_constraint_name='copy_rotation.ik', influence=0.0)
else:
warnings.warn(f"WARNING: No constraint implemented for joint type '{base_joint.joint_type}'!")
if True:
child_joints = get_joints_which_have_link_as_parent(link_name=base_joint.child, joint_trees=urdf_tree.joints)
for child_joint in child_joints:
create_bone(armature, child_joint, urdf_tree.joints, parent_bone_name=bone.name, create_recursive=True, parent_origin=origin, fk_offset=fk_offset, ik_offset=ik_offset)
links = []
for link_tree in urdf_tree.links:
(visuals, collisions, inertial) = ([], [], None)
if link_tree.visuals:
visuals = [load_visual_collision_obj(visual_tree, name=f'{link_tree.name}_visual', urdf_path=urdf_file) for visual_tree in link_tree.visuals]
if link_tree.collisions:
collisions = [load_visual_collision_obj(collision_tree, name=f'{link_tree.name}_collision', urdf_path=urdf_file) for collision_tree in link_tree.collisions]
if link_tree.inertial:
inertial = load_inertial(link_tree.inertial, name=f'{link_tree.name}_inertial')
corresponding_joint = get_joints_which_have_link_as_child(link_tree.name, urdf_tree.joints)
link = Link(bpy_object=create_with_empty_mesh(link_tree.name).blender_obj)
link.set_armature(armature)
link.set_visuals(visuals)
link.set_visual_local2link_mats([Matrix(obj.get_local2world_mat()) for obj in visuals])
link.set_collisions(collisions)
link.set_collision_local2link_mats([Matrix(obj.get_local2world_mat()) for obj in collisions])
link.set_inertial(inertial)
link.set_inertial_local2link_mat(Matrix(inertial.get_local2world_mat()) if inertial is not None else None)
link.set_name(name=link_tree.name)
if corresponding_joint is not None:
link.set_bone(armature.pose.bones.get(corresponding_joint.name))
link.set_fk_bone(armature.pose.bones.get(corresponding_joint.name + '.fk'))
link.set_ik_bone(armature.pose.bones.get(corresponding_joint.name + '.ik'))
link.set_joint_type(corresponding_joint.joint_type)
links.append(link)
links = links
for base_joint in base_joints:
child_link = one_by_attr(elements=links, attr_name='name', value=base_joint.child)
parent_link = one_by_attr(elements=links, attr_name='name', value=base_joint.parent)
mat = Matrix(parent_link.get_local2world_mat()) @ Matrix(base_joint.origin)
child_link.set_local2world_mat(mat)
child_link.set_link_parent(parent=parent_link)
parent_link.set_link_child(child=child_link)
for obj in child_link.get_all_objs():
obj.set_local2world_mat(Matrix(child_link.get_local2world_mat()) @ Matrix(obj.get_local2world_mat()))
if child_link.bone is not None:
child_link.set_link2bone_mat(mat.inverted() @ child_link.bone.matrix)
if recursive:
child_joint_trees = get_joints_which_have_link_as_parent(child_link.get_name(), urdf_tree.joints)
for child_joint_tree in child_joint_trees:
propagate_pose(links, child_joint_tree, urdf_tree.joints, armature, recursive=True)
for link in links:
link.parent_with_bone(weight_distribution=weight_distribution)
for link in links:
link.switch_fk_ik_mode(mode='fk')
urdf_object = URDFObject(armature, links=links, xml_tree=urdf_tree)
urdf_object.hide_links_and_collision_inertial_objs()
return urdf_object
|
BlenderProc
|
positive
|
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
(w, V) = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 0.0001)[0]
if len(i) < 2:
raise ValueError('no two linear independent eigenvectors found %s' % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for (i0, i1) in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
<DeepExtract>
n = numpy.array(n, dtype=numpy.float64, copy=True)
if out is None:
if n.ndim == 1:
w = math.sqrt(numpy.dot(n, n))
n *= n
out = numpy.atleast_1d(numpy.sum(n, axis=axis))
numpy.sqrt(out, out)
w = out
else:
n *= n
numpy.sum(n, axis=axis, out=out)
numpy.sqrt(out, out)
</DeepExtract>
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
direction = numpy.dot(M33 - numpy.identity(3), normal)
<DeepExtract>
direction = numpy.array(direction, dtype=numpy.float64, copy=True)
if out is None:
if direction.ndim == 1:
angle = math.sqrt(numpy.dot(direction, direction))
direction *= direction
out = numpy.atleast_1d(numpy.sum(direction, axis=axis))
numpy.sqrt(out, out)
angle = out
else:
direction *= direction
numpy.sum(direction, axis=axis, out=out)
numpy.sqrt(out, out)
</DeepExtract>
direction /= angle
angle = math.atan(angle)
(w, V) = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-08)[0]
if not len(i):
raise ValueError('no eigenvector corresponding to eigenvalue 1')
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return (angle, direction, point, normal)
|
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
(w, V) = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 0.0001)[0]
if len(i) < 2:
raise ValueError('no two linear independent eigenvectors found %s' % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for (i0, i1) in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
n = numpy.array(n, dtype=numpy.float64, copy=True)
if out is None:
if n.ndim == 1:
w = math.sqrt(numpy.dot(n, n))
n *= n
out = numpy.atleast_1d(numpy.sum(n, axis=axis))
numpy.sqrt(out, out)
w = out
else:
n *= n
numpy.sum(n, axis=axis, out=out)
numpy.sqrt(out, out)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
direction = numpy.dot(M33 - numpy.identity(3), normal)
direction = numpy.array(direction, dtype=numpy.float64, copy=True)
if out is None:
if direction.ndim == 1:
angle = math.sqrt(numpy.dot(direction, direction))
direction *= direction
out = numpy.atleast_1d(numpy.sum(direction, axis=axis))
numpy.sqrt(out, out)
angle = out
else:
direction *= direction
numpy.sum(direction, axis=axis, out=out)
numpy.sqrt(out, out)
direction /= angle
angle = math.atan(angle)
(w, V) = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-08)[0]
if not len(i):
raise ValueError('no eigenvector corresponding to eigenvalue 1')
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return (angle, direction, point, normal)
|
alfred
|
positive
|
def runTest(self, client=wan, server=lan):
<DeepExtract>
board.sendline('mpstat -V')
if board.expect(['sysstat version', 'BusyBox', 'not found'], timeout=5) == 0:
mpstat_present = True
else:
mpstat_present = False
board.expect(prompt)
mpstat_present = mpstat_present
</DeepExtract>
<DeepExtract>
if self.server_opts_reverse(node=server) is None:
self.server_opts_reverse(node=server) = self.server_opts_forward()
self.kill_iperf(server)
server.sendline('iperf -s %s > /dev/null &' % self.server_opts_reverse(node=server))
server.expect(prompt)
</DeepExtract>
if mpstat_present:
board.sendline('mpstat -P ALL 10000 1')
board.expect('Linux')
<DeepExtract>
if self.reverse_ip() is None:
self.reverse_ip() = self.forward_ip()
client.sendline('iperf %s -c %s %s | grep -v SUM' % (self.client_opts(), self.reverse_ip(), opts))
client.expect('Client connecting to')
</DeepExtract>
<DeepExtract>
rate = 0.0
for i in range(0, connections):
m = client.expect(['Bytes([^M]*)Mbits', 'Bytes([^K]*)Kbits'], timeout=t + 30)
if m == 0:
rate += float(client.match.group(1))
elif m == 1:
rate += float(client.match.group(1)) / 1000
else:
lib.common.test_msg('Unknown units for iPerf results!\n')
assert False
client.expect(prompt)
rate = rate
</DeepExtract>
if mpstat_present:
board.sendcontrol('c')
board.expect('Average.*idle\r\nAverage:\\s+all(\\s+[0-9]+.[0-9]+){10}\r\n')
idle_cpu = float(board.match.group(1))
avg_cpu = 100 - float(idle_cpu)
self.logged['avg_cpu'] = float(avg_cpu)
else:
avg_cpu = 'N/A'
<DeepExtract>
server.sendline('killall -9 iperf')
server.expect(prompt)
</DeepExtract>
msg = 'iPerf from WAN to LAN (%s Mbps, CPU=%s)' % (rate, avg_cpu)
lib.common.test_msg('\n%s' % msg)
self.logged['rate'] = float(rate)
self.result_message = msg
|
def runTest(self, client=wan, server=lan):
board.sendline('mpstat -V')
if board.expect(['sysstat version', 'BusyBox', 'not found'], timeout=5) == 0:
mpstat_present = True
else:
mpstat_present = False
board.expect(prompt)
mpstat_present = mpstat_present
if self.server_opts_reverse(node=server) is None:
self.server_opts_reverse(node=server) = self.server_opts_forward()
self.kill_iperf(server)
server.sendline('iperf -s %s > /dev/null &' % self.server_opts_reverse(node=server))
server.expect(prompt)
if mpstat_present:
board.sendline('mpstat -P ALL 10000 1')
board.expect('Linux')
if self.reverse_ip() is None:
self.reverse_ip() = self.forward_ip()
client.sendline('iperf %s -c %s %s | grep -v SUM' % (self.client_opts(), self.reverse_ip(), opts))
client.expect('Client connecting to')
rate = 0.0
for i in range(0, connections):
m = client.expect(['Bytes([^M]*)Mbits', 'Bytes([^K]*)Kbits'], timeout=t + 30)
if m == 0:
rate += float(client.match.group(1))
elif m == 1:
rate += float(client.match.group(1)) / 1000
else:
lib.common.test_msg('Unknown units for iPerf results!\n')
assert False
client.expect(prompt)
rate = rate
if mpstat_present:
board.sendcontrol('c')
board.expect('Average.*idle\r\nAverage:\\s+all(\\s+[0-9]+.[0-9]+){10}\r\n')
idle_cpu = float(board.match.group(1))
avg_cpu = 100 - float(idle_cpu)
self.logged['avg_cpu'] = float(avg_cpu)
else:
avg_cpu = 'N/A'
server.sendline('killall -9 iperf')
server.expect(prompt)
msg = 'iPerf from WAN to LAN (%s Mbps, CPU=%s)' % (rate, avg_cpu)
lib.common.test_msg('\n%s' % msg)
self.logged['rate'] = float(rate)
self.result_message = msg
|
boardfarm
|
positive
|
def gen_new_df_with_used_cols(df: Union[pd.DataFrame, dd.DataFrame], x: Optional[Union[str, LatLong]]=None, y: Optional[Union[str, LatLong]]=None, z: Optional[str]=None) -> Tuple[Dict[Optional[Union[str, LatLong]], Optional[str]], Union[pd.DataFrame, dd.DataFrame]]:
"""
Keep only used columns in x, y, z, and gen new Latlong columns if any x, y, z is LatLong.
"""
new_names: Dict[Optional[Union[str, LatLong]], Optional[str]] = {}
if x is None and y is None and (z is None):
return ({}, df)
used_org_cols = set()
for col in (x, y, z):
if col is not None:
if isinstance(col, LatLong):
used_org_cols.add(col.lat)
used_org_cols.add(col.long)
else:
used_org_cols.add(col)
if isinstance(df, dd.DataFrame):
pd_df = df[list(used_org_cols)].compute()
else:
pd_df = df[list(used_org_cols)]
new_srss: Dict[Optional[Union[str, LatLong]], pd.Series] = {}
for col in (x, y, z):
if isinstance(col, LatLong):
<DeepExtract>
columns = pd_df.columns
name = col.lat + '_' + col.long
i = 0
while name in columns:
name = f'{name}_{i}'
i += 1
lat_long = pd.Series(zip(pd_df[col.lat], pd_df[col.long]), name=name)
lat_long = lat_long
</DeepExtract>
new_names[col] = lat_long.name
new_srss[lat_long.name] = lat_long
new_srss[col.lat] = pd_df[col.lat]
new_srss[col.long] = pd_df[col.long]
else:
new_names[col] = col
if col is not None:
new_srss[col] = pd_df[col]
new_df = pd.concat(new_srss.values(), axis='columns')
return (new_names, new_df)
|
def gen_new_df_with_used_cols(df: Union[pd.DataFrame, dd.DataFrame], x: Optional[Union[str, LatLong]]=None, y: Optional[Union[str, LatLong]]=None, z: Optional[str]=None) -> Tuple[Dict[Optional[Union[str, LatLong]], Optional[str]], Union[pd.DataFrame, dd.DataFrame]]:
"""
Keep only used columns in x, y, z, and gen new Latlong columns if any x, y, z is LatLong.
"""
new_names: Dict[Optional[Union[str, LatLong]], Optional[str]] = {}
if x is None and y is None and (z is None):
return ({}, df)
used_org_cols = set()
for col in (x, y, z):
if col is not None:
if isinstance(col, LatLong):
used_org_cols.add(col.lat)
used_org_cols.add(col.long)
else:
used_org_cols.add(col)
if isinstance(df, dd.DataFrame):
pd_df = df[list(used_org_cols)].compute()
else:
pd_df = df[list(used_org_cols)]
new_srss: Dict[Optional[Union[str, LatLong]], pd.Series] = {}
for col in (x, y, z):
if isinstance(col, LatLong):
columns = pd_df.columns
name = col.lat + '_' + col.long
i = 0
while name in columns:
name = f'{name}_{i}'
i += 1
lat_long = pd.Series(zip(pd_df[col.lat], pd_df[col.long]), name=name)
lat_long = lat_long
new_names[col] = lat_long.name
new_srss[lat_long.name] = lat_long
new_srss[col.lat] = pd_df[col.lat]
new_srss[col.long] = pd_df[col.long]
else:
new_names[col] = col
if col is not None:
new_srss[col] = pd_df[col]
new_df = pd.concat(new_srss.values(), axis='columns')
return (new_names, new_df)
|
dataprep
|
positive
|
@mock.patch('search.services.index.Search')
@mock.patch('search.services.index.Elasticsearch')
def test_classic_query_complex(self, mock_Elasticsearch, mock_Search):
""":class:`.index.search` supports :class:`SimpleQuery`."""
mock_results = mock.MagicMock()
mock_results.__getitem__.return_value = {'total': 53}
<DeepExtract>
rdata = {'authors': [{'full_name': 'N. Ame'}], 'owners': [{'full_name': 'N. Ame'}], 'submitter': {'full_name': 'N. Ame'}, 'paper_id': '1234.56789', 'title': 'some title', 'abstract': 'An abstract with math $/alpha * /alpha$ for you.'}
</DeepExtract>
mock_result = mock.MagicMock(_d_=rdata, **rdata)
mock_result.meta.score = 1
mock_results.__iter__.return_value = [mock_result]
mock_Search.execute.return_value = mock_results
mock_Search.return_value = mock_Search
mock_Search.filter.return_value = mock_Search
mock_Search.highlight.return_value = mock_Search
mock_Search.highlight_options.return_value = mock_Search
mock_Search.query.return_value = mock_Search
mock_Search.sort.return_value = mock_Search
mock_Search.__getitem__.return_value = mock_Search
query = ClassicAPIQuery(phrase=(Operator.OR, Term(Field.Author, 'copernicus'), (Operator.ANDNOT, Term(Field.Title, 'dark matter'))), order=SortOrder(by=SortBy.relevance), size=10)
document_set = index.SearchSession.search(query, highlight=True)
self.assertEqual(document_set['metadata']['start'], 0)
self.assertEqual(document_set['metadata']['total_results'], 53)
self.assertEqual(document_set['metadata']['current_page'], 1)
self.assertEqual(document_set['metadata']['total_pages'], 6)
self.assertEqual(document_set['metadata']['size'], 10)
self.assertEqual(len(document_set['results']), 1)
|
@mock.patch('search.services.index.Search')
@mock.patch('search.services.index.Elasticsearch')
def test_classic_query_complex(self, mock_Elasticsearch, mock_Search):
""":class:`.index.search` supports :class:`SimpleQuery`."""
mock_results = mock.MagicMock()
mock_results.__getitem__.return_value = {'total': 53}
rdata = {'authors': [{'full_name': 'N. Ame'}], 'owners': [{'full_name': 'N. Ame'}], 'submitter': {'full_name': 'N. Ame'}, 'paper_id': '1234.56789', 'title': 'some title', 'abstract': 'An abstract with math $/alpha * /alpha$ for you.'}
mock_result = mock.MagicMock(_d_=rdata, **rdata)
mock_result.meta.score = 1
mock_results.__iter__.return_value = [mock_result]
mock_Search.execute.return_value = mock_results
mock_Search.return_value = mock_Search
mock_Search.filter.return_value = mock_Search
mock_Search.highlight.return_value = mock_Search
mock_Search.highlight_options.return_value = mock_Search
mock_Search.query.return_value = mock_Search
mock_Search.sort.return_value = mock_Search
mock_Search.__getitem__.return_value = mock_Search
query = ClassicAPIQuery(phrase=(Operator.OR, Term(Field.Author, 'copernicus'), (Operator.ANDNOT, Term(Field.Title, 'dark matter'))), order=SortOrder(by=SortBy.relevance), size=10)
document_set = index.SearchSession.search(query, highlight=True)
self.assertEqual(document_set['metadata']['start'], 0)
self.assertEqual(document_set['metadata']['total_results'], 53)
self.assertEqual(document_set['metadata']['current_page'], 1)
self.assertEqual(document_set['metadata']['total_pages'], 6)
self.assertEqual(document_set['metadata']['size'], 10)
self.assertEqual(len(document_set['results']), 1)
|
arxiv-search
|
positive
|
def filter_yamldata(data: Union[List, dict], groups: List[str], hostname: str, recdepth=100) -> Union[List, dict]:
"""Filter data and remove dictionary items if they have a key that specifies
a list of groups, but none of those groups are included in the groups argument.
Should only be called with yaml.safe_load:ed data.
Args:
data: yaml safe_load:ed data
groups: a list of groups to filter on
hostname: a hostname to filter on
recdepth: recursion depth limit, default 100
Returns:
filtered data
"""
if recdepth < 1:
return data
elif isinstance(data, list):
ret_l = []
for item in data:
<DeepExtract>
if recdepth - 1 < 1:
f_item = item
elif isinstance(item, list):
ret_l = []
for item in item:
f_item = filter_yamldata(item, groups, hostname, recdepth - 1 - 1)
if f_item:
ret_l.append(f_item)
f_item = ret_l
elif isinstance(item, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in item.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, item))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, item))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
ret_v = filter_yamldata(v, groups, hostname, recdepth - 1 - 1)
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
f_item = None
else:
f_item = ret_d
else:
f_item = item
</DeepExtract>
if f_item:
ret_l.append(f_item)
return ret_l
elif isinstance(data, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in data.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, data))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, data))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
<DeepExtract>
if recdepth - 1 < 1:
ret_v = v
elif isinstance(v, list):
ret_l = []
for item in v:
f_item = filter_yamldata(item, groups, hostname, recdepth - 1 - 1)
if f_item:
ret_l.append(f_item)
ret_v = ret_l
elif isinstance(v, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in v.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, v))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, v))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
ret_v = filter_yamldata(v, groups, hostname, recdepth - 1 - 1)
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
ret_v = None
else:
ret_v = ret_d
else:
ret_v = v
</DeepExtract>
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
return None
else:
return ret_d
else:
return data
|
def filter_yamldata(data: Union[List, dict], groups: List[str], hostname: str, recdepth=100) -> Union[List, dict]:
"""Filter data and remove dictionary items if they have a key that specifies
a list of groups, but none of those groups are included in the groups argument.
Should only be called with yaml.safe_load:ed data.
Args:
data: yaml safe_load:ed data
groups: a list of groups to filter on
hostname: a hostname to filter on
recdepth: recursion depth limit, default 100
Returns:
filtered data
"""
if recdepth < 1:
return data
elif isinstance(data, list):
ret_l = []
for item in data:
if recdepth - 1 < 1:
f_item = item
elif isinstance(item, list):
ret_l = []
for item in item:
f_item = filter_yamldata(item, groups, hostname, recdepth - 1 - 1)
if f_item:
ret_l.append(f_item)
f_item = ret_l
elif isinstance(item, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in item.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, item))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, item))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
ret_v = filter_yamldata(v, groups, hostname, recdepth - 1 - 1)
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
f_item = None
else:
f_item = ret_d
else:
f_item = item
if f_item:
ret_l.append(f_item)
return ret_l
elif isinstance(data, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in data.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, data))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, data))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
if recdepth - 1 < 1:
ret_v = v
elif isinstance(v, list):
ret_l = []
for item in v:
f_item = filter_yamldata(item, groups, hostname, recdepth - 1 - 1)
if f_item:
ret_l.append(f_item)
ret_v = ret_l
elif isinstance(v, dict):
ret_d = {}
group_match = False
hostname_match = False
do_filter_group = False
do_filter_hostname = False
for (k, v) in v.items():
if not v:
ret_d[k] = v
continue
if k == 'groups':
if not isinstance(v, list):
raise SettingsSyntaxError('Groups field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, v))
do_filter_group = True
ret_d[k] = v
for group in v:
if group in groups:
group_match = True
elif k == 'devices':
if not isinstance(v, list):
raise SettingsSyntaxError('Devices field must be a list or empty (currently {}) in: {}'.format(type(v).__name__, v))
do_filter_hostname = True
ret_d[k] = v
if hostname in v:
hostname_match = True
else:
ret_v = filter_yamldata(v, groups, hostname, recdepth - 1 - 1)
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
ret_v = None
else:
ret_v = ret_d
else:
ret_v = v
if ret_v:
ret_d[k] = ret_v
if (do_filter_group or do_filter_hostname) and (not group_match) and (not hostname_match):
return None
else:
return ret_d
else:
return data
|
cnaas-nms
|
positive
|
def status(self) -> Any:
"""Return the status of the Neptune cluster.
Returns
-------
str
The result of the call to the status API for the Neptune cluster
"""
url = f'{HTTP_PROTOCOL}://{self.host}:{self.port}/status'
<DeepExtract>
request = requests.Request(method='GET', url=url, data='', params=params, headers=headers)
if self.boto3_session is not None:
aws_request = self._get_aws_request(method='GET', url=url, data='', params=params, headers=headers, service=service)
request.headers = dict(aws_request.headers)
req = request.prepare()
</DeepExtract>
res = self._http_session.send(req)
return res.json()
|
def status(self) -> Any:
"""Return the status of the Neptune cluster.
Returns
-------
str
The result of the call to the status API for the Neptune cluster
"""
url = f'{HTTP_PROTOCOL}://{self.host}:{self.port}/status'
request = requests.Request(method='GET', url=url, data='', params=params, headers=headers)
if self.boto3_session is not None:
aws_request = self._get_aws_request(method='GET', url=url, data='', params=params, headers=headers, service=service)
request.headers = dict(aws_request.headers)
req = request.prepare()
res = self._http_session.send(req)
return res.json()
|
aws-data-wrangler
|
positive
|
def eval_det_cls_wrapper(arguments):
(pred, gt, classname, ovthresh, use_07_metric, get_iou_func) = arguments
<DeepExtract>
class_recs = {}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for (box, score) in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB)
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, ...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...]))
if iou > ovmax:
ovmax = iou
jmax = j
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, classname, ovthresh, use_07_metric)
(rec, prec, ap) = (rec, prec, ap)
</DeepExtract>
return (rec, prec, ap)
|
def eval_det_cls_wrapper(arguments):
(pred, gt, classname, ovthresh, use_07_metric, get_iou_func) = arguments
class_recs = {}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for (box, score) in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB)
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, ...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...]))
if iou > ovmax:
ovmax = iou
jmax = j
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.0
R['det'][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, classname, ovthresh, use_07_metric)
(rec, prec, ap) = (rec, prec, ap)
return (rec, prec, ap)
|
3DIoUMatch
|
positive
|
def read_file(file, path, start_idx, signal, done, target):
dataset = tf.data.TFRecordDataset(file, compression_type='')
try:
signal.value = sum((1 for _ in dataset))
except:
signal.value = 0
with open(os.path.join('/tmp', 'waymo_missing.txt'), 'a') as f:
f.write(f'{file}\n')
return
target.value += signal.value
idx = start_idx
for data in dataset:
dname = '%06d' % idx
idx += 1
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
<DeepExtract>
front_images = list(filter(lambda x: x.name == open_dataset.CameraName.Name.FRONT, frame.images))
assert len(front_images) == 1
front_image = Image.fromarray(tf.image.decode_jpeg(front_images[0].image).numpy())
front_image.save(os.path.join(path['left'], f'{dname}.png'))
</DeepExtract>
<DeepExtract>
(range_images, camera_projections, range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(frame)
(points, cp_points) = frame_utils.convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose)
(points_ri2, cp_points_ri2) = frame_utils.convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=1)
points = np.concatenate(points + points_ri2, axis=0)
points = np.concatenate([points, np.ones((points.shape[0], 1), dtype=np.float32)], axis=1)
points = points.reshape(-1).astype(np.float32)
points.tofile(os.path.join(path['lidar'], f'{dname}.bin'))
</DeepExtract>
front_calibs = list(filter(lambda x: x.name == open_dataset.CameraName.Name.FRONT, frame.context.camera_calibrations))
assert len(front_calibs) == 1
front_calib = front_calibs[0]
with open(os.path.join(path['calib'], f'{dname}.txt'), 'w') as f:
f.write(convert_calib(front_calib))
<DeepExtract>
extrinsic = np.reshape(np.array(front_calib.extrinsic.transform), [4, 4])
extrinsic = tf.linalg.inv(extrinsic).numpy()
norm = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
extrinsic[:3, 3] = extrinsic[:3, 3].reshape(1, 3).dot(norm)
_norm = np.eye(4)
_norm[:3, :3] = norm.T
extrinsic = extrinsic.dot(_norm)
extrinsic = extrinsic
</DeepExtract>
<DeepExtract>
intrinsic_matrix = np.zeros((3, 4))
intrinsic_matrix[0, 0] = front_calib.intrinsic[0]
intrinsic_matrix[0, 1] = 0.0
intrinsic_matrix[0, 2] = front_calib.intrinsic[2]
intrinsic_matrix[1, 1] = front_calib.intrinsic[1]
intrinsic_matrix[1, 2] = front_calib.intrinsic[3]
intrinsic_matrix[2, 2] = 1.0
front_calib.intrinsic = intrinsic_matrix
</DeepExtract>
objs = map(lambda x: form_kitty_label(x, extrinsic, intrinsic, front_calib.height, front_calib.width), frame.laser_labels)
objs = list(filter(lambda x: x is not None, objs))
<DeepExtract>
_map = np.ones((front_calib.height, front_calib.width), dtype=np.uint8) * -1
objs = sorted(objs, key=lambda x: x['depth'], reverse=True)
for (i, obj) in enumerate(objs):
_map[round(obj['bbox'][1]):round(obj['bbox'][3]), round(obj['bbox'][0]):round(obj['bbox'][2])] = i
(unique, counts) = np.unique(_map, return_counts=True)
counts = dict(zip(unique, counts))
for (i, obj) in enumerate(objs):
if i not in counts.keys():
counts[i] = 0
occlusion = 1.0 - counts[i] / (obj['bbox'][3] - obj['bbox'][1]) / (obj['bbox'][2] - obj['bbox'][0])
obj['occluded'] = int(np.clip(occlusion * 4, 0, 3))
objs = objs
</DeepExtract>
<DeepExtract>
labels = []
for obj in objs:
string_to_write = f"{obj['type']} {'%.2f' % obj['truncated']} {obj['occluded']} {'%.2f' % obj['alpha']} "
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['bbox'])) + ' '
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['dimensions'])) + ' '
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['location'])) + ' '
string_to_write += '%0.2f' % obj['rotation_y']
labels.append(string_to_write)
with open(os.path.join(path['label'], f'{dname}.txt'), 'w') as f:
f.write('\n'.join(labels))
</DeepExtract>
done.value += 1
|
def read_file(file, path, start_idx, signal, done, target):
dataset = tf.data.TFRecordDataset(file, compression_type='')
try:
signal.value = sum((1 for _ in dataset))
except:
signal.value = 0
with open(os.path.join('/tmp', 'waymo_missing.txt'), 'a') as f:
f.write(f'{file}\n')
return
target.value += signal.value
idx = start_idx
for data in dataset:
dname = '%06d' % idx
idx += 1
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
front_images = list(filter(lambda x: x.name == open_dataset.CameraName.Name.FRONT, frame.images))
assert len(front_images) == 1
front_image = Image.fromarray(tf.image.decode_jpeg(front_images[0].image).numpy())
front_image.save(os.path.join(path['left'], f'{dname}.png'))
(range_images, camera_projections, range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(frame)
(points, cp_points) = frame_utils.convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose)
(points_ri2, cp_points_ri2) = frame_utils.convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=1)
points = np.concatenate(points + points_ri2, axis=0)
points = np.concatenate([points, np.ones((points.shape[0], 1), dtype=np.float32)], axis=1)
points = points.reshape(-1).astype(np.float32)
points.tofile(os.path.join(path['lidar'], f'{dname}.bin'))
front_calibs = list(filter(lambda x: x.name == open_dataset.CameraName.Name.FRONT, frame.context.camera_calibrations))
assert len(front_calibs) == 1
front_calib = front_calibs[0]
with open(os.path.join(path['calib'], f'{dname}.txt'), 'w') as f:
f.write(convert_calib(front_calib))
extrinsic = np.reshape(np.array(front_calib.extrinsic.transform), [4, 4])
extrinsic = tf.linalg.inv(extrinsic).numpy()
norm = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
extrinsic[:3, 3] = extrinsic[:3, 3].reshape(1, 3).dot(norm)
_norm = np.eye(4)
_norm[:3, :3] = norm.T
extrinsic = extrinsic.dot(_norm)
extrinsic = extrinsic
intrinsic_matrix = np.zeros((3, 4))
intrinsic_matrix[0, 0] = front_calib.intrinsic[0]
intrinsic_matrix[0, 1] = 0.0
intrinsic_matrix[0, 2] = front_calib.intrinsic[2]
intrinsic_matrix[1, 1] = front_calib.intrinsic[1]
intrinsic_matrix[1, 2] = front_calib.intrinsic[3]
intrinsic_matrix[2, 2] = 1.0
front_calib.intrinsic = intrinsic_matrix
objs = map(lambda x: form_kitty_label(x, extrinsic, intrinsic, front_calib.height, front_calib.width), frame.laser_labels)
objs = list(filter(lambda x: x is not None, objs))
_map = np.ones((front_calib.height, front_calib.width), dtype=np.uint8) * -1
objs = sorted(objs, key=lambda x: x['depth'], reverse=True)
for (i, obj) in enumerate(objs):
_map[round(obj['bbox'][1]):round(obj['bbox'][3]), round(obj['bbox'][0]):round(obj['bbox'][2])] = i
(unique, counts) = np.unique(_map, return_counts=True)
counts = dict(zip(unique, counts))
for (i, obj) in enumerate(objs):
if i not in counts.keys():
counts[i] = 0
occlusion = 1.0 - counts[i] / (obj['bbox'][3] - obj['bbox'][1]) / (obj['bbox'][2] - obj['bbox'][0])
obj['occluded'] = int(np.clip(occlusion * 4, 0, 3))
objs = objs
labels = []
for obj in objs:
string_to_write = f"{obj['type']} {'%.2f' % obj['truncated']} {obj['occluded']} {'%.2f' % obj['alpha']} "
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['bbox'])) + ' '
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['dimensions'])) + ' '
string_to_write += ' '.join(map(lambda x: '%.2f' % x, obj['location'])) + ' '
string_to_write += '%0.2f' % obj['rotation_y']
labels.append(string_to_write)
with open(os.path.join(path['label'], f'{dname}.txt'), 'w') as f:
f.write('\n'.join(labels))
done.value += 1
|
3D_adapt_auto_driving
|
positive
|
def step(self, action):
"""
Perform an action and update the state
"""
<DeepExtract>
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
</DeepExtract>
<DeepExtract>
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action.vx * self.time_step
py = self.py + action.vy * self.time_step
else:
theta = self.theta + action.r
px = self.px + np.cos(theta) * action.v * self.time_step
py = self.py + np.sin(theta) * action.v * self.time_step
pos = (px, py)
</DeepExtract>
(self.px, self.py) = pos
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
|
def step(self, action):
"""
Perform an action and update the state
"""
if self.kinematics == 'holonomic':
assert isinstance(action, ActionXY)
else:
assert isinstance(action, ActionRot)
self.check_validity(action)
if self.kinematics == 'holonomic':
px = self.px + action.vx * self.time_step
py = self.py + action.vy * self.time_step
else:
theta = self.theta + action.r
px = self.px + np.cos(theta) * action.v * self.time_step
py = self.py + np.sin(theta) * action.v * self.time_step
pos = (px, py)
(self.px, self.py) = pos
if self.kinematics == 'holonomic':
self.vx = action.vx
self.vy = action.vy
else:
self.theta = (self.theta + action.r) % (2 * np.pi)
self.vx = action.v * np.cos(self.theta)
self.vy = action.v * np.sin(self.theta)
|
arena-rosnav
|
positive
|
@app.route('/findvs', methods=['POST'])
def findvs():
if request.method == 'POST':
json_request = request.get_json()
payload_str = json_request['payload']
payload = json.loads(payload_str)
list_attributes = ['' for (k, v) in payload.items() if k[0] == '0']
list_samples = ['' for el in list_attributes]
for (k, v) in payload.items():
row_idx = int(k[0])
col_idx = int(k[2])
if row_idx == 0:
list_attributes[col_idx] = v
else:
list_samples[col_idx] = v
global dod
global view_generator
view_generator = iter(dod.virtual_schema_iterative_search(list_attributes, list_samples))
(mvs, attrs_to_project, view_metadata) = next(view_generator)
proj_view = dpu.project(mvs, attrs_to_project)
<DeepExtract>
htmls = []
for c in proj_view.columns:
html_repr = proj_view[c].describe().to_frame().to_html()
htmls.append(html_repr)
analysis = htmls
</DeepExtract>
sample_view = proj_view.head(10)
html_dataframe = sample_view.to_html()
global matview
matview = proj_view
return jsonify({'view': html_dataframe, 'analysis': analysis, 'joingraph': view_metadata})
|
@app.route('/findvs', methods=['POST'])
def findvs():
if request.method == 'POST':
json_request = request.get_json()
payload_str = json_request['payload']
payload = json.loads(payload_str)
list_attributes = ['' for (k, v) in payload.items() if k[0] == '0']
list_samples = ['' for el in list_attributes]
for (k, v) in payload.items():
row_idx = int(k[0])
col_idx = int(k[2])
if row_idx == 0:
list_attributes[col_idx] = v
else:
list_samples[col_idx] = v
global dod
global view_generator
view_generator = iter(dod.virtual_schema_iterative_search(list_attributes, list_samples))
(mvs, attrs_to_project, view_metadata) = next(view_generator)
proj_view = dpu.project(mvs, attrs_to_project)
htmls = []
for c in proj_view.columns:
html_repr = proj_view[c].describe().to_frame().to_html()
htmls.append(html_repr)
analysis = htmls
sample_view = proj_view.head(10)
html_dataframe = sample_view.to_html()
global matview
matview = proj_view
return jsonify({'view': html_dataframe, 'analysis': analysis, 'joingraph': view_metadata})
|
aurum-datadiscovery
|
positive
|
def test_afrq(self):
"""AFRQ Counting Disease Carriers"""
<DeepExtract>
def p_recessive(p):
aa = 2 * sqrt(p) - p
aa = [p_recessive(p) for p in [0.1, 0.25, 0.5]]
</DeepExtract>
self.assertAlmostEqual(0.532, aa[0], 3)
self.assertAlmostEqual(0.75, aa[1], 3)
self.assertAlmostEqual(0.914, aa[2], 3)
|
def test_afrq(self):
"""AFRQ Counting Disease Carriers"""
def p_recessive(p):
aa = 2 * sqrt(p) - p
aa = [p_recessive(p) for p in [0.1, 0.25, 0.5]]
self.assertAlmostEqual(0.532, aa[0], 3)
self.assertAlmostEqual(0.75, aa[1], 3)
self.assertAlmostEqual(0.914, aa[2], 3)
|
bioinformatics
|
positive
|
def load_metadata_from_all_jobs(self, search_id: Hashable, key: Hashable) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
search_id
<DeepExtract>
job_ids = self._redis.lrange(f'search:{search_id}.job_id_list', 0, -1)
jobs_ids = job_ids
</DeepExtract>
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f'job:{job_id}', f'.metadata.{key}')
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
|
def load_metadata_from_all_jobs(self, search_id: Hashable, key: Hashable) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
search_id
job_ids = self._redis.lrange(f'search:{search_id}.job_id_list', 0, -1)
jobs_ids = job_ids
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f'job:{job_id}', f'.metadata.{key}')
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
|
deephyper
|
positive
|
def resize_affine(affine, shape, target_shape, copy=True):
if copy:
affine = np.copy(affine)
scale = np.divide(shape, target_shape)
<DeepExtract>
RZS = affine[:3, :3]
spacing = np.sqrt(np.sum(RZS * RZS, axis=0))
</DeepExtract>
target_spacing = np.multiply(spacing, scale)
<DeepExtract>
if spacing is None:
spacing = get_spacing_from_affine(affine)
offset = calculate_origin_offset(target_spacing, spacing)
new_affine = np.copy(affine)
translation_affine = np.diag(np.ones(4))
translation_affine[:3, 3] = offset
new_affine = np.matmul(new_affine, translation_affine)
new_affine = set_affine_spacing(new_affine, target_spacing)
affine = new_affine
</DeepExtract>
return affine
|
def resize_affine(affine, shape, target_shape, copy=True):
if copy:
affine = np.copy(affine)
scale = np.divide(shape, target_shape)
RZS = affine[:3, :3]
spacing = np.sqrt(np.sum(RZS * RZS, axis=0))
target_spacing = np.multiply(spacing, scale)
if spacing is None:
spacing = get_spacing_from_affine(affine)
offset = calculate_origin_offset(target_spacing, spacing)
new_affine = np.copy(affine)
translation_affine = np.diag(np.ones(4))
translation_affine[:3, 3] = offset
new_affine = np.matmul(new_affine, translation_affine)
new_affine = set_affine_spacing(new_affine, target_spacing)
affine = new_affine
return affine
|
3DUnetCNN
|
positive
|
def createRequestV2(self, data, userID):
<DeepExtract>
comms = []
for line in data:
numbers = [line[3], line[4], line[5], line[7]]
buy = line[4] if line[4] != '' else 0
stock = line[7] if line[7] != '' else 0
sell = line[3] if line[3] != '' else 0
demand = line[5] if line[5] != '' else 0
if not buy is None:
if buy != '':
try:
int(buy)
except:
commodities = None
if not stock is None:
if stock != '':
try:
int(stock)
except:
commodities = None
if not sell is None:
if sell != '':
try:
int(sell)
except:
commodities = None
if not demand is None:
if demand != '':
try:
int(demand)
except:
commodities = None
('name', 'buyPrice', 'supply', 'sellPrice', 'demand')
new_dict = {'name': line[2], 'buyPrice': int(buy) if not buy is None else 0, 'supply': int(stock) if not stock is None else 0, 'sellPrice': int(sell) if not sell is None else 0, 'demand': int(demand) if not demand is None else 0}
if line[8] != '':
new_dict['supplyLevel'] = line[8]
if line[6] != '':
new_dict['demandLevel'] = line[6]
comms.append(new_dict)
commodities = comms
</DeepExtract>
if commodities is None:
return {}
if len(commodities) < 1:
return {}
request = {'$schemaRef': 'http://schemas.elite-markets.net/eddn/commodity/2', 'header': {'uploaderID': userID, 'softwareName': 'EliteOCR', 'softwareVersion': self.parent.appversion}, 'message': {'systemName': data[0][0], 'stationName': data[0][1], 'timestamp': data[0][9], 'commodities': commodities}}
return request
|
def createRequestV2(self, data, userID):
comms = []
for line in data:
numbers = [line[3], line[4], line[5], line[7]]
buy = line[4] if line[4] != '' else 0
stock = line[7] if line[7] != '' else 0
sell = line[3] if line[3] != '' else 0
demand = line[5] if line[5] != '' else 0
if not buy is None:
if buy != '':
try:
int(buy)
except:
commodities = None
if not stock is None:
if stock != '':
try:
int(stock)
except:
commodities = None
if not sell is None:
if sell != '':
try:
int(sell)
except:
commodities = None
if not demand is None:
if demand != '':
try:
int(demand)
except:
commodities = None
('name', 'buyPrice', 'supply', 'sellPrice', 'demand')
new_dict = {'name': line[2], 'buyPrice': int(buy) if not buy is None else 0, 'supply': int(stock) if not stock is None else 0, 'sellPrice': int(sell) if not sell is None else 0, 'demand': int(demand) if not demand is None else 0}
if line[8] != '':
new_dict['supplyLevel'] = line[8]
if line[6] != '':
new_dict['demandLevel'] = line[6]
comms.append(new_dict)
commodities = comms
if commodities is None:
return {}
if len(commodities) < 1:
return {}
request = {'$schemaRef': 'http://schemas.elite-markets.net/eddn/commodity/2', 'header': {'uploaderID': userID, 'softwareName': 'EliteOCR', 'softwareVersion': self.parent.appversion}, 'message': {'systemName': data[0][0], 'stationName': data[0][1], 'timestamp': data[0][9], 'commodities': commodities}}
return request
|
EliteOCR
|
positive
|
def evaluate(generator, retinanet, iou_threshold=0.5, score_threshold=0.05, max_detections=100, save_path=None):
""" Evaluate a given dataset using a given object_detection.
# Arguments
generator : The generator that represents the dataset to evaluate.
object_detection : The object_detection to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
<DeepExtract>
all_detections = [[None for i in range(generator.num_classes)] for j in range(len(generator))]
device = [p.device for p in retinanet.parameters()][0]
retinanet.eval()
st = time.time()
with torch.no_grad():
for index in range(len(generator)):
try:
data = generator[index]
except Exception as e:
for label in range(generator.num_classes):
all_detections[index][label] = np.zeros((0, 5))
continue
scale = data.scale
(scores, labels, boxes) = retinanet(data.image.permute(2, 0, 1).to(device=device).float().unsqueeze(dim=0))
scores = scores.cpu().numpy()
labels = labels.cpu().numpy()
boxes = boxes.cpu().numpy()
boxes /= scale
indices = np.where(scores > score_threshold)[0]
if indices.shape[0] > 0:
scores = scores[indices]
scores_sort = np.argsort(-scores)[:max_detections]
image_boxes = boxes[indices[scores_sort], :]
image_scores = scores[scores_sort]
image_labels = labels[indices[scores_sort]]
image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
for label in range(generator.num_classes):
all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]
else:
for label in range(generator.num_classes):
all_detections[index][label] = np.zeros((0, 5))
print('{}/{}'.format(index + 1, len(generator)), end='\r')
print('time to get detections during eval: ', time.time() - st)
all_detections = all_detections
</DeepExtract>
<DeepExtract>
all_annotations = [[None for i in range(generator.num_classes)] for j in range(len(generator))]
for i in range(len(generator)):
annotations = generator.load_annotations(i)
for label in range(generator.num_classes):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
print('{}/{}'.format(i + 1, len(generator)), end='\r')
all_annotations = all_annotations
</DeepExtract>
average_precisions = {}
for label in range(generator.num_classes):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(len(generator)):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
<DeepExtract>
area = (annotations[:, 2] - annotations[:, 0]) * (annotations[:, 3] - annotations[:, 1])
iw = np.minimum(np.expand_dims(np.expand_dims(d, axis=0)[:, 2], axis=1), annotations[:, 2]) - np.maximum(np.expand_dims(np.expand_dims(d, axis=0)[:, 0], 1), annotations[:, 0])
ih = np.minimum(np.expand_dims(np.expand_dims(d, axis=0)[:, 3], axis=1), annotations[:, 3]) - np.maximum(np.expand_dims(np.expand_dims(d, axis=0)[:, 1], 1), annotations[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((np.expand_dims(d, axis=0)[:, 2] - np.expand_dims(d, axis=0)[:, 0]) * (np.expand_dims(d, axis=0)[:, 3] - np.expand_dims(d, axis=0)[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
overlaps = intersection / ua
</DeepExtract>
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
if num_annotations == 0:
average_precisions[label] = (0, 0)
continue
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
<DeepExtract>
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([0.0], precision, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
average_precision = ap
</DeepExtract>
average_precisions[label] = (average_precision, num_annotations)
total_num_annotations = 0
sum_AP = 0
for label in range(generator.num_classes):
sum_AP = sum_AP + average_precisions[label][0] * average_precisions[label][1]
total_num_annotations = total_num_annotations + average_precisions[label][1]
if total_num_annotations > 0:
mAP = sum_AP / total_num_annotations
else:
logger.info('total num annotations in val is zero')
mAP = np.int64(0)
print('\nmAP: ', mAP)
return mAP
|
def evaluate(generator, retinanet, iou_threshold=0.5, score_threshold=0.05, max_detections=100, save_path=None):
""" Evaluate a given dataset using a given object_detection.
# Arguments
generator : The generator that represents the dataset to evaluate.
object_detection : The object_detection to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
all_detections = [[None for i in range(generator.num_classes)] for j in range(len(generator))]
device = [p.device for p in retinanet.parameters()][0]
retinanet.eval()
st = time.time()
with torch.no_grad():
for index in range(len(generator)):
try:
data = generator[index]
except Exception as e:
for label in range(generator.num_classes):
all_detections[index][label] = np.zeros((0, 5))
continue
scale = data.scale
(scores, labels, boxes) = retinanet(data.image.permute(2, 0, 1).to(device=device).float().unsqueeze(dim=0))
scores = scores.cpu().numpy()
labels = labels.cpu().numpy()
boxes = boxes.cpu().numpy()
boxes /= scale
indices = np.where(scores > score_threshold)[0]
if indices.shape[0] > 0:
scores = scores[indices]
scores_sort = np.argsort(-scores)[:max_detections]
image_boxes = boxes[indices[scores_sort], :]
image_scores = scores[scores_sort]
image_labels = labels[indices[scores_sort]]
image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
for label in range(generator.num_classes):
all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]
else:
for label in range(generator.num_classes):
all_detections[index][label] = np.zeros((0, 5))
print('{}/{}'.format(index + 1, len(generator)), end='\r')
print('time to get detections during eval: ', time.time() - st)
all_detections = all_detections
all_annotations = [[None for i in range(generator.num_classes)] for j in range(len(generator))]
for i in range(len(generator)):
annotations = generator.load_annotations(i)
for label in range(generator.num_classes):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
print('{}/{}'.format(i + 1, len(generator)), end='\r')
all_annotations = all_annotations
average_precisions = {}
for label in range(generator.num_classes):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(len(generator)):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
area = (annotations[:, 2] - annotations[:, 0]) * (annotations[:, 3] - annotations[:, 1])
iw = np.minimum(np.expand_dims(np.expand_dims(d, axis=0)[:, 2], axis=1), annotations[:, 2]) - np.maximum(np.expand_dims(np.expand_dims(d, axis=0)[:, 0], 1), annotations[:, 0])
ih = np.minimum(np.expand_dims(np.expand_dims(d, axis=0)[:, 3], axis=1), annotations[:, 3]) - np.maximum(np.expand_dims(np.expand_dims(d, axis=0)[:, 1], 1), annotations[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((np.expand_dims(d, axis=0)[:, 2] - np.expand_dims(d, axis=0)[:, 0]) * (np.expand_dims(d, axis=0)[:, 3] - np.expand_dims(d, axis=0)[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
overlaps = intersection / ua
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
if num_annotations == 0:
average_precisions[label] = (0, 0)
continue
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
mrec = np.concatenate(([0.0], recall, [1.0]))
mpre = np.concatenate(([0.0], precision, [0.0]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
average_precision = ap
average_precisions[label] = (average_precision, num_annotations)
total_num_annotations = 0
sum_AP = 0
for label in range(generator.num_classes):
sum_AP = sum_AP + average_precisions[label][0] * average_precisions[label][1]
total_num_annotations = total_num_annotations + average_precisions[label][1]
if total_num_annotations > 0:
mAP = sum_AP / total_num_annotations
else:
logger.info('total num annotations in val is zero')
mAP = np.int64(0)
print('\nmAP: ', mAP)
return mAP
|
AutoML
|
positive
|
def main():
""""""
files = sys.argv[1:]
n_files = len(files)
assert n_files % 2 == 0
(gold_files, sys_files) = (files[:n_files // 2], files[n_files // 2:])
<DeepExtract>
correct = 0
predicted = 0
actual = 0
n_tokens = 0
n_sequences = 0
current_seq_correct = False
n_correct_sequences = 0
current_fp = 0
current_sent = 0
for (gold_file, sys_file) in zip(gold_files, sys_files):
with codecs.open(gold_file, encoding='utf-8') as gf, codecs.open(sys_file, encoding='utf-8') as sf:
gold_line = gf.readline()
gold_i = 1
sys_i = 0
while gold_line:
while gold_line.startswith('#'):
current_sent += 1
gold_i += 1
n_sequences += 1
n_correct_sequences += current_seq_correct
current_seq_correct = True
gold_line = gf.readline()
if gold_line.rstrip() != '':
sys_line = sf.readline()
sys_i += 1
while sys_line.startswith('#') or sys_line.rstrip() == '' or sys_line.split('\t')[0] == '0':
sys_line = sf.readline()
sys_i += 1
gold_line = gold_line.rstrip().split('\t')
sys_line = sys_line.rstrip().split('\t')
assert sys_line[1] == gold_line[1], 'Files are misaligned at lines {}, {}'.format(gold_i, sys_i)
gold_node = gold_line[8]
if gold_node != '_':
gold_node = gold_node.split('|')
if False:
gold_edges = set((tuple(gold_edge.split(':', 1)) for gold_edge in gold_node))
else:
gold_edges = set((gold_edge.split(':', 1)[0] for gold_edge in gold_node))
else:
gold_edges = set()
sys_node = sys_line[8]
if sys_node != '_':
sys_node = sys_node.split('|')
if False:
sys_edges = set((tuple(sys_edge.split(':', 1)) for sys_edge in sys_node))
else:
sys_edges = set((sys_edge.split(':', 1)[0] for sys_edge in sys_node))
else:
sys_edges = set()
correct_edges = gold_edges & sys_edges
if len(correct_edges) != len(gold_edges):
current_seq_correct = False
correct += len(correct_edges)
predicted += len(sys_edges)
actual += len(gold_edges)
n_tokens += 1
gold_line = gf.readline()
gold_i += 1
Accuracy = namedtuple('Accuracy', ['precision', 'recall', 'F1', 'seq_acc'])
precision = correct / (predicted + 1e-12)
recall = correct / (actual + 1e-12)
F1 = 2 * precision * recall / (precision + recall + 1e-12)
seq_acc = n_correct_sequences / n_sequences
UAS = Accuracy(precision, recall, F1, seq_acc)
</DeepExtract>
<DeepExtract>
correct = 0
predicted = 0
actual = 0
n_tokens = 0
n_sequences = 0
current_seq_correct = False
n_correct_sequences = 0
current_fp = 0
current_sent = 0
for (gold_file, sys_file) in zip(gold_files, sys_files):
with codecs.open(gold_file, encoding='utf-8') as gf, codecs.open(sys_file, encoding='utf-8') as sf:
gold_line = gf.readline()
gold_i = 1
sys_i = 0
while gold_line:
while gold_line.startswith('#'):
current_sent += 1
gold_i += 1
n_sequences += 1
n_correct_sequences += current_seq_correct
current_seq_correct = True
gold_line = gf.readline()
if gold_line.rstrip() != '':
sys_line = sf.readline()
sys_i += 1
while sys_line.startswith('#') or sys_line.rstrip() == '' or sys_line.split('\t')[0] == '0':
sys_line = sf.readline()
sys_i += 1
gold_line = gold_line.rstrip().split('\t')
sys_line = sys_line.rstrip().split('\t')
assert sys_line[1] == gold_line[1], 'Files are misaligned at lines {}, {}'.format(gold_i, sys_i)
gold_node = gold_line[8]
if gold_node != '_':
gold_node = gold_node.split('|')
if True:
gold_edges = set((tuple(gold_edge.split(':', 1)) for gold_edge in gold_node))
else:
gold_edges = set((gold_edge.split(':', 1)[0] for gold_edge in gold_node))
else:
gold_edges = set()
sys_node = sys_line[8]
if sys_node != '_':
sys_node = sys_node.split('|')
if True:
sys_edges = set((tuple(sys_edge.split(':', 1)) for sys_edge in sys_node))
else:
sys_edges = set((sys_edge.split(':', 1)[0] for sys_edge in sys_node))
else:
sys_edges = set()
correct_edges = gold_edges & sys_edges
if len(correct_edges) != len(gold_edges):
current_seq_correct = False
correct += len(correct_edges)
predicted += len(sys_edges)
actual += len(gold_edges)
n_tokens += 1
gold_line = gf.readline()
gold_i += 1
Accuracy = namedtuple('Accuracy', ['precision', 'recall', 'F1', 'seq_acc'])
precision = correct / (predicted + 1e-12)
recall = correct / (actual + 1e-12)
F1 = 2 * precision * recall / (precision + recall + 1e-12)
seq_acc = n_correct_sequences / n_sequences
LAS = Accuracy(precision, recall, F1, seq_acc)
</DeepExtract>
print('{:0.6f}'.format(LAS.F1 * 100))
|
def main():
""""""
files = sys.argv[1:]
n_files = len(files)
assert n_files % 2 == 0
(gold_files, sys_files) = (files[:n_files // 2], files[n_files // 2:])
correct = 0
predicted = 0
actual = 0
n_tokens = 0
n_sequences = 0
current_seq_correct = False
n_correct_sequences = 0
current_fp = 0
current_sent = 0
for (gold_file, sys_file) in zip(gold_files, sys_files):
with codecs.open(gold_file, encoding='utf-8') as gf, codecs.open(sys_file, encoding='utf-8') as sf:
gold_line = gf.readline()
gold_i = 1
sys_i = 0
while gold_line:
while gold_line.startswith('#'):
current_sent += 1
gold_i += 1
n_sequences += 1
n_correct_sequences += current_seq_correct
current_seq_correct = True
gold_line = gf.readline()
if gold_line.rstrip() != '':
sys_line = sf.readline()
sys_i += 1
while sys_line.startswith('#') or sys_line.rstrip() == '' or sys_line.split('\t')[0] == '0':
sys_line = sf.readline()
sys_i += 1
gold_line = gold_line.rstrip().split('\t')
sys_line = sys_line.rstrip().split('\t')
assert sys_line[1] == gold_line[1], 'Files are misaligned at lines {}, {}'.format(gold_i, sys_i)
gold_node = gold_line[8]
if gold_node != '_':
gold_node = gold_node.split('|')
if False:
gold_edges = set((tuple(gold_edge.split(':', 1)) for gold_edge in gold_node))
else:
gold_edges = set((gold_edge.split(':', 1)[0] for gold_edge in gold_node))
else:
gold_edges = set()
sys_node = sys_line[8]
if sys_node != '_':
sys_node = sys_node.split('|')
if False:
sys_edges = set((tuple(sys_edge.split(':', 1)) for sys_edge in sys_node))
else:
sys_edges = set((sys_edge.split(':', 1)[0] for sys_edge in sys_node))
else:
sys_edges = set()
correct_edges = gold_edges & sys_edges
if len(correct_edges) != len(gold_edges):
current_seq_correct = False
correct += len(correct_edges)
predicted += len(sys_edges)
actual += len(gold_edges)
n_tokens += 1
gold_line = gf.readline()
gold_i += 1
Accuracy = namedtuple('Accuracy', ['precision', 'recall', 'F1', 'seq_acc'])
precision = correct / (predicted + 1e-12)
recall = correct / (actual + 1e-12)
F1 = 2 * precision * recall / (precision + recall + 1e-12)
seq_acc = n_correct_sequences / n_sequences
UAS = Accuracy(precision, recall, F1, seq_acc)
correct = 0
predicted = 0
actual = 0
n_tokens = 0
n_sequences = 0
current_seq_correct = False
n_correct_sequences = 0
current_fp = 0
current_sent = 0
for (gold_file, sys_file) in zip(gold_files, sys_files):
with codecs.open(gold_file, encoding='utf-8') as gf, codecs.open(sys_file, encoding='utf-8') as sf:
gold_line = gf.readline()
gold_i = 1
sys_i = 0
while gold_line:
while gold_line.startswith('#'):
current_sent += 1
gold_i += 1
n_sequences += 1
n_correct_sequences += current_seq_correct
current_seq_correct = True
gold_line = gf.readline()
if gold_line.rstrip() != '':
sys_line = sf.readline()
sys_i += 1
while sys_line.startswith('#') or sys_line.rstrip() == '' or sys_line.split('\t')[0] == '0':
sys_line = sf.readline()
sys_i += 1
gold_line = gold_line.rstrip().split('\t')
sys_line = sys_line.rstrip().split('\t')
assert sys_line[1] == gold_line[1], 'Files are misaligned at lines {}, {}'.format(gold_i, sys_i)
gold_node = gold_line[8]
if gold_node != '_':
gold_node = gold_node.split('|')
if True:
gold_edges = set((tuple(gold_edge.split(':', 1)) for gold_edge in gold_node))
else:
gold_edges = set((gold_edge.split(':', 1)[0] for gold_edge in gold_node))
else:
gold_edges = set()
sys_node = sys_line[8]
if sys_node != '_':
sys_node = sys_node.split('|')
if True:
sys_edges = set((tuple(sys_edge.split(':', 1)) for sys_edge in sys_node))
else:
sys_edges = set((sys_edge.split(':', 1)[0] for sys_edge in sys_node))
else:
sys_edges = set()
correct_edges = gold_edges & sys_edges
if len(correct_edges) != len(gold_edges):
current_seq_correct = False
correct += len(correct_edges)
predicted += len(sys_edges)
actual += len(gold_edges)
n_tokens += 1
gold_line = gf.readline()
gold_i += 1
Accuracy = namedtuple('Accuracy', ['precision', 'recall', 'F1', 'seq_acc'])
precision = correct / (predicted + 1e-12)
recall = correct / (actual + 1e-12)
F1 = 2 * precision * recall / (precision + recall + 1e-12)
seq_acc = n_correct_sequences / n_sequences
LAS = Accuracy(precision, recall, F1, seq_acc)
print('{:0.6f}'.format(LAS.F1 * 100))
|
ACE
|
positive
|
def caller(func):
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
<DeepExtract>
(HOST, PORT) = ('localhost', 50007)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
start_t = time.time()
s.send(path.encode())
resStr = str(s.recv(1000).decode())
splitRes = resStr.split(',')
end_t = time.time()
exlist = []
new_splitRes = []
for i in splitRes:
path = self.sse_flist[int(i)]
fidx = int(path[:-5])
if fidx in exlist:
continue
else:
new_splitRes.append(fidx)
print('global query time : {}'.format(end_t - start_t))
print('querylist=', new_splitRes)
qstr = new_splitRes
</DeepExtract>
func(qstr)
|
def caller(func):
path = os.path.join(os.getcwd(), 'temp', 'query_img.png')
(HOST, PORT) = ('localhost', 50007)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
start_t = time.time()
s.send(path.encode())
resStr = str(s.recv(1000).decode())
splitRes = resStr.split(',')
end_t = time.time()
exlist = []
new_splitRes = []
for i in splitRes:
path = self.sse_flist[int(i)]
fidx = int(path[:-5])
if fidx in exlist:
continue
else:
new_splitRes.append(fidx)
print('global query time : {}'.format(end_t - start_t))
print('querylist=', new_splitRes)
qstr = new_splitRes
func(qstr)
|
dualFace
|
positive
|
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
<DeepExtract>
if isinstance(table_description, (types.StringTypes, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col['depth'] = depth
parsed_col['container'] = 'scalar'
self.__columns = [parsed_col]
if not hasattr(table_description, '__iter__'):
raise DataTableException('Expected an iterable object, got %s' % type(table_description))
if not isinstance(table_description, dict):
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col['depth'] = depth
parsed_col['container'] = 'iter'
columns.append(parsed_col)
if not columns:
raise DataTableException('Description iterable objects should not be empty.')
self.__columns = columns
if not table_description:
raise DataTableException('Empty dictionaries are not allowed inside description')
if len(table_description) != 1 or (isinstance(table_description.keys()[0], types.StringTypes) and isinstance(table_description.values()[0], tuple) and (len(table_description.values()[0]) < 4)):
columns = []
for (key, value) in sorted(table_description.items()):
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col['depth'] = depth
parsed_col['container'] = 'dict'
columns.append(parsed_col)
self.__columns = columns
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col['depth'] = depth
parsed_col['container'] = 'dict'
self.__columns = [parsed_col] + DataTable.TableDescriptionParser(table_description.values()[0], depth=depth + 1)
</DeepExtract>
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
<DeepExtract>
self.__data = []
self.AppendData(data, custom_properties)
</DeepExtract>
|
def __init__(self, table_description, data=None, custom_properties=None):
"""Initialize the data table from a table schema and (optionally) data.
See the class documentation for more information on table schema and data
values.
Args:
table_description: A table schema, following one of the formats described
in TableDescriptionParser(). Schemas describe the
column names, data types, and labels. See
TableDescriptionParser() for acceptable formats.
data: Optional. If given, fills the table with the given data. The data
structure must be consistent with schema in table_description. See
the class documentation for more information on acceptable data. You
can add data later by calling AppendData().
custom_properties: Optional. A dictionary from string to string that
goes into the table's custom properties. This can be
later changed by changing self.custom_properties.
Raises:
DataTableException: Raised if the data and the description did not match,
or did not use the supported formats.
"""
if isinstance(table_description, (types.StringTypes, tuple)):
parsed_col = DataTable.ColumnTypeParser(table_description)
parsed_col['depth'] = depth
parsed_col['container'] = 'scalar'
self.__columns = [parsed_col]
if not hasattr(table_description, '__iter__'):
raise DataTableException('Expected an iterable object, got %s' % type(table_description))
if not isinstance(table_description, dict):
columns = []
for desc in table_description:
parsed_col = DataTable.ColumnTypeParser(desc)
parsed_col['depth'] = depth
parsed_col['container'] = 'iter'
columns.append(parsed_col)
if not columns:
raise DataTableException('Description iterable objects should not be empty.')
self.__columns = columns
if not table_description:
raise DataTableException('Empty dictionaries are not allowed inside description')
if len(table_description) != 1 or (isinstance(table_description.keys()[0], types.StringTypes) and isinstance(table_description.values()[0], tuple) and (len(table_description.values()[0]) < 4)):
columns = []
for (key, value) in sorted(table_description.items()):
if isinstance(value, tuple):
parsed_col = DataTable.ColumnTypeParser((key,) + value)
else:
parsed_col = DataTable.ColumnTypeParser((key, value))
parsed_col['depth'] = depth
parsed_col['container'] = 'dict'
columns.append(parsed_col)
self.__columns = columns
parsed_col = DataTable.ColumnTypeParser(table_description.keys()[0])
parsed_col['depth'] = depth
parsed_col['container'] = 'dict'
self.__columns = [parsed_col] + DataTable.TableDescriptionParser(table_description.values()[0], depth=depth + 1)
self.__data = []
self.custom_properties = {}
if custom_properties is not None:
self.custom_properties = custom_properties
if data:
self.__data = []
self.AppendData(data, custom_properties)
</DeepExtract>
|
DCMetroMetrics
|
positive
|
def send_command(self, command_name, parameters=None, lifetime=None, timestamp=None, status=None, result=None):
<DeepExtract>
if self._id:
return
raise DeviceError('Device does not exist.')
</DeepExtract>
return self._api.send_command(device_id=self._id, command_name=command_name, parameters=parameters, lifetime=lifetime, timestamp=timestamp, status=status, result=result)
|
def send_command(self, command_name, parameters=None, lifetime=None, timestamp=None, status=None, result=None):
if self._id:
return
raise DeviceError('Device does not exist.')
return self._api.send_command(device_id=self._id, command_name=command_name, parameters=parameters, lifetime=lifetime, timestamp=timestamp, status=status, result=result)
|
devicehive-python
|
positive
|
def parse_typedef_struct(self):
self.tokens.expect('typedef')
self.tokens.expect('struct')
<DeepExtract>
self.tokens.expect('{')
member_names = []
member_types = []
while self.tokens.peek() != '}':
type_specifier = self.parse_type_specifier()
name = self.tokens.get()
(name, type_specifier) = self.parse_declaration(name, type_specifier)
member_names.append(name)
member_types.append(type_specifier.type_)
self.tokens.expect(';')
self.tokens.expect('}')
declaration = StructOf(member_names, member_types)
</DeepExtract>
name = self.tokens.get()
self.tokens.expect(';')
self.scope[name] = declaration
self.structs.append(name)
|
def parse_typedef_struct(self):
self.tokens.expect('typedef')
self.tokens.expect('struct')
self.tokens.expect('{')
member_names = []
member_types = []
while self.tokens.peek() != '}':
type_specifier = self.parse_type_specifier()
name = self.tokens.get()
(name, type_specifier) = self.parse_declaration(name, type_specifier)
member_names.append(name)
member_types.append(type_specifier.type_)
self.tokens.expect(';')
self.tokens.expect('}')
declaration = StructOf(member_names, member_types)
name = self.tokens.get()
self.tokens.expect(';')
self.scope[name] = declaration
self.structs.append(name)
|
Chips-2.0
|
positive
|
def test_link_type_on_nodes_table_update_links_link_type(self):
<DeepExtract>
for query in self.queries:
if 'link_type_on_nodes_table_update_links_link_type' in query:
cmd = query
raise FileNotFoundError('QUERY DOES NOT EXIST')
</DeepExtract>
self.curr.execute(cmd)
self.curr.execute('update links set link_type="test" where link_id=15')
self.curr.execute('select link_types from nodes where node_id=6')
lts = self.curr.fetchone()[0]
self.assertEqual(lts, 'rtw', 'link_types on nodes table not updated with new link type in the links')
self.curr.execute('select link_types from nodes where node_id=5')
lts = self.curr.fetchone()[0]
self.assertEqual(lts, 'ertw', 'link_types was allowed to be corrupted in the nodes table')
|
def test_link_type_on_nodes_table_update_links_link_type(self):
for query in self.queries:
if 'link_type_on_nodes_table_update_links_link_type' in query:
cmd = query
raise FileNotFoundError('QUERY DOES NOT EXIST')
self.curr.execute(cmd)
self.curr.execute('update links set link_type="test" where link_id=15')
self.curr.execute('select link_types from nodes where node_id=6')
lts = self.curr.fetchone()[0]
self.assertEqual(lts, 'rtw', 'link_types on nodes table not updated with new link type in the links')
self.curr.execute('select link_types from nodes where node_id=5')
lts = self.curr.fetchone()[0]
self.assertEqual(lts, 'ertw', 'link_types was allowed to be corrupted in the nodes table')
|
aequilibrae
|
positive
|
def knight(self, A, B, C, D, E, F):
def isValid(x, y):
if x >= 1 and x <= A and (y >= 1) and (y <= B):
return True
return False
def minimumSteps(curr, dest):
movex = [2, 2, -2, -2, 1, -1, 1, -1]
movey = [1, -1, 1, -1, 2, 2, -2, -2]
queue = []
queue.append(Point(curr[0], curr[1]))
visited = [[False for i in range(B + 1)] for j in range(A + 1)]
visited[curr[0]][curr[1]] = True
while len(queue):
target = queue.pop(0)
if target.x == dest[0] and target.y == dest[1]:
return target.distance
for i in range(8):
x = target.x + movex[i]
y = target.y + movey[i]
if isValid(x, y) and (not visited[x][y]):
visited[x][y] = True
queue.append(Point(x, y, target.distance + 1))
curr = [C, D]
dest = [E, F]
<DeepExtract>
movex = [2, 2, -2, -2, 1, -1, 1, -1]
movey = [1, -1, 1, -1, 2, 2, -2, -2]
queue = []
queue.append(Point(curr[0], curr[1]))
visited = [[False for i in range(B + 1)] for j in range(A + 1)]
visited[curr[0]][curr[1]] = True
while len(queue):
target = queue.pop(0)
if target.x == dest[0] and target.y == dest[1]:
x = target.distance
for i in range(8):
x = target.x + movex[i]
y = target.y + movey[i]
if isValid(x, y) and (not visited[x][y]):
visited[x][y] = True
queue.append(Point(x, y, target.distance + 1))
</DeepExtract>
if x == None:
return -1
else:
return x
|
def knight(self, A, B, C, D, E, F):
def isValid(x, y):
if x >= 1 and x <= A and (y >= 1) and (y <= B):
return True
return False
def minimumSteps(curr, dest):
movex = [2, 2, -2, -2, 1, -1, 1, -1]
movey = [1, -1, 1, -1, 2, 2, -2, -2]
queue = []
queue.append(Point(curr[0], curr[1]))
visited = [[False for i in range(B + 1)] for j in range(A + 1)]
visited[curr[0]][curr[1]] = True
while len(queue):
target = queue.pop(0)
if target.x == dest[0] and target.y == dest[1]:
return target.distance
for i in range(8):
x = target.x + movex[i]
y = target.y + movey[i]
if isValid(x, y) and (not visited[x][y]):
visited[x][y] = True
queue.append(Point(x, y, target.distance + 1))
curr = [C, D]
dest = [E, F]
movex = [2, 2, -2, -2, 1, -1, 1, -1]
movey = [1, -1, 1, -1, 2, 2, -2, -2]
queue = []
queue.append(Point(curr[0], curr[1]))
visited = [[False for i in range(B + 1)] for j in range(A + 1)]
visited[curr[0]][curr[1]] = True
while len(queue):
target = queue.pop(0)
if target.x == dest[0] and target.y == dest[1]:
x = target.distance
for i in range(8):
x = target.x + movex[i]
y = target.y + movey[i]
if isValid(x, y) and (not visited[x][y]):
visited[x][y] = True
queue.append(Point(x, y, target.distance + 1))
if x == None:
return -1
else:
return x
|
Competitive_Programming
|
positive
|
@csrf_exempt
def time(request):
"""This functionality use TimeDetector to detect time. It is called through api call
Args:
request (django.http.request.HttpRequest): HttpRequest object
request params:
message (str): natural text on which detection logic is to be run. Note if structured value is present
detection is run on structured value instead of message
entity_name (str): name of the entity. Also acts as elastic-search dictionary name
if entity uses elastic-search lookup
structured_value (str): Value obtained from any structured elements. Note if structured value is present
detection is run on structured value instead of message
(For example, UI elements like form, payload, etc)
fallback_value (str): If the detection logic fails to detect any value either from structured_value
or message then we return a fallback_value as an output.
bot_message (str): previous message from a bot/agent.
timezone (str): timezone of the user
source_language (str): source language code (ISO 639-1)
language_script (str): language code of script (ISO 639-1)
Returns:
response (django.http.response.HttpResponse): HttpResponse object
Example:
message = "kal subah 5 baje mujhe jaga dena"
entity_name = 'time'
structured_value = None
fallback_value = None
bot_message = None
timezone = 'UTC'
source_language = 'hi'
language_script = 'en'
output = time(request)
print output
>> [{'detection': 'message', 'original_text': '12:30 pm',
'entity_value': {'mm': 30, 'hh': 12, 'nn': 'pm'}}]
"""
entity_output = None
try:
parameters_dict = {}
if request.method == 'POST':
<DeepExtract>
request_data = json.loads(request.body)
parameters_dict = {PARAMETER_MESSAGE: request_data.get('message'), PARAMETER_ENTITY_NAME: request_data.get('entity_name'), PARAMETER_STRUCTURED_VALUE: request_data.get('structured_value'), PARAMETER_FALLBACK_VALUE: request_data.get('fallback_value'), PARAMETER_BOT_MESSAGE: request_data.get('bot_message'), PARAMETER_TIMEZONE: request_data.get('timezone'), PARAMETER_LANGUAGE_SCRIPT: request_data.get('language_script', ENGLISH_LANG), PARAMETER_SOURCE_LANGUAGE: request_data.get('source_language', ENGLISH_LANG), PARAMETER_PAST_DATE_REFERENCED: request_data.get('date_past_reference', 'False'), PARAMETER_MIN_DIGITS: request_data.get('min_number_digits'), PARAMETER_MAX_DIGITS: request_data.get('max_number_digits'), PARAMETER_NUMBER_UNIT_TYPE: request_data.get('unit_type'), PARAMETER_LOCALE: request_data.get('locale'), PARAMETER_RANGE_ENABLED: request_data.get('range_enabled')}
parameters_dict = parameters_dict
</DeepExtract>
ner_logger.debug('Start Bulk Detection: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])
elif request.method == 'GET':
<DeepExtract>
parameters_dict = {PARAMETER_MESSAGE: request.GET.get('message'), PARAMETER_ENTITY_NAME: request.GET.get('entity_name'), PARAMETER_STRUCTURED_VALUE: request.GET.get('structured_value'), PARAMETER_FALLBACK_VALUE: request.GET.get('fallback_value'), PARAMETER_BOT_MESSAGE: request.GET.get('bot_message'), PARAMETER_TIMEZONE: request.GET.get('timezone'), PARAMETER_LANGUAGE_SCRIPT: request.GET.get('language_script', ENGLISH_LANG), PARAMETER_SOURCE_LANGUAGE: request.GET.get('source_language', ENGLISH_LANG), PARAMETER_PAST_DATE_REFERENCED: request.GET.get('date_past_reference', 'False'), PARAMETER_MIN_DIGITS: request.GET.get('min_number_digits'), PARAMETER_MAX_DIGITS: request.GET.get('max_number_digits'), PARAMETER_NUMBER_UNIT_TYPE: request.GET.get('unit_type'), PARAMETER_LOCALE: request.GET.get('locale'), PARAMETER_RANGE_ENABLED: request.GET.get('range_enabled')}
parameters_dict = parameters_dict
</DeepExtract>
ner_logger.debug('Start: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])
timezone = parameters_dict[PARAMETER_TIMEZONE] or None
form_check = True if parameters_dict[PARAMETER_STRUCTURED_VALUE] else False
range_enabled = True if parameters_dict[PARAMETER_RANGE_ENABLED] else False
time_detection = TimeDetector(entity_name=parameters_dict[PARAMETER_ENTITY_NAME], language=parameters_dict[PARAMETER_SOURCE_LANGUAGE], timezone=timezone)
time_detection.set_bot_message(bot_message=parameters_dict[PARAMETER_BOT_MESSAGE])
message = parameters_dict[PARAMETER_MESSAGE]
if isinstance(message, six.string_types):
entity_output = time_detection.detect(message=message, structured_value=parameters_dict[PARAMETER_STRUCTURED_VALUE], fallback_value=parameters_dict[PARAMETER_FALLBACK_VALUE], form_check=form_check, range_enabled=range_enabled)
elif isinstance(message, (list, tuple)):
entity_output = time_detection.detect_bulk(messages=message)
ner_logger.debug('Finished %s : %s ' % (parameters_dict[PARAMETER_ENTITY_NAME], entity_output))
except InvalidTextRequest as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in validating request body for {request.path}, error: {err}')
return JsonResponse(response, status=400)
except DataStoreRequestException as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in requesting ES {request.path}, error: {err}, query: {err.request}, response: {err.response}')
return JsonResponse(response, status=400)
except es_exceptions.ConnectionTimeout as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Connection timed out for ES {request.path}, error: {err}')
return JsonResponse(response, status=500)
except es_exceptions.ConnectionError as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in connection to ES {request.path}, error: {err}')
return JsonResponse(response, status=500)
except (TypeError, KeyError) as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in validating type {request.path}, error: {err}')
return JsonResponse(response, status=500)
except Exception as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'General exception for {request.path}, error: {err}')
return JsonResponse(response, status=500)
response = {'success': True, 'error': None, 'data': entity_output}
return JsonResponse(response, status=200)
|
@csrf_exempt
def time(request):
"""This functionality use TimeDetector to detect time. It is called through api call
Args:
request (django.http.request.HttpRequest): HttpRequest object
request params:
message (str): natural text on which detection logic is to be run. Note if structured value is present
detection is run on structured value instead of message
entity_name (str): name of the entity. Also acts as elastic-search dictionary name
if entity uses elastic-search lookup
structured_value (str): Value obtained from any structured elements. Note if structured value is present
detection is run on structured value instead of message
(For example, UI elements like form, payload, etc)
fallback_value (str): If the detection logic fails to detect any value either from structured_value
or message then we return a fallback_value as an output.
bot_message (str): previous message from a bot/agent.
timezone (str): timezone of the user
source_language (str): source language code (ISO 639-1)
language_script (str): language code of script (ISO 639-1)
Returns:
response (django.http.response.HttpResponse): HttpResponse object
Example:
message = "kal subah 5 baje mujhe jaga dena"
entity_name = 'time'
structured_value = None
fallback_value = None
bot_message = None
timezone = 'UTC'
source_language = 'hi'
language_script = 'en'
output = time(request)
print output
>> [{'detection': 'message', 'original_text': '12:30 pm',
'entity_value': {'mm': 30, 'hh': 12, 'nn': 'pm'}}]
"""
entity_output = None
try:
parameters_dict = {}
if request.method == 'POST':
request_data = json.loads(request.body)
parameters_dict = {PARAMETER_MESSAGE: request_data.get('message'), PARAMETER_ENTITY_NAME: request_data.get('entity_name'), PARAMETER_STRUCTURED_VALUE: request_data.get('structured_value'), PARAMETER_FALLBACK_VALUE: request_data.get('fallback_value'), PARAMETER_BOT_MESSAGE: request_data.get('bot_message'), PARAMETER_TIMEZONE: request_data.get('timezone'), PARAMETER_LANGUAGE_SCRIPT: request_data.get('language_script', ENGLISH_LANG), PARAMETER_SOURCE_LANGUAGE: request_data.get('source_language', ENGLISH_LANG), PARAMETER_PAST_DATE_REFERENCED: request_data.get('date_past_reference', 'False'), PARAMETER_MIN_DIGITS: request_data.get('min_number_digits'), PARAMETER_MAX_DIGITS: request_data.get('max_number_digits'), PARAMETER_NUMBER_UNIT_TYPE: request_data.get('unit_type'), PARAMETER_LOCALE: request_data.get('locale'), PARAMETER_RANGE_ENABLED: request_data.get('range_enabled')}
parameters_dict = parameters_dict
ner_logger.debug('Start Bulk Detection: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])
elif request.method == 'GET':
parameters_dict = {PARAMETER_MESSAGE: request.GET.get('message'), PARAMETER_ENTITY_NAME: request.GET.get('entity_name'), PARAMETER_STRUCTURED_VALUE: request.GET.get('structured_value'), PARAMETER_FALLBACK_VALUE: request.GET.get('fallback_value'), PARAMETER_BOT_MESSAGE: request.GET.get('bot_message'), PARAMETER_TIMEZONE: request.GET.get('timezone'), PARAMETER_LANGUAGE_SCRIPT: request.GET.get('language_script', ENGLISH_LANG), PARAMETER_SOURCE_LANGUAGE: request.GET.get('source_language', ENGLISH_LANG), PARAMETER_PAST_DATE_REFERENCED: request.GET.get('date_past_reference', 'False'), PARAMETER_MIN_DIGITS: request.GET.get('min_number_digits'), PARAMETER_MAX_DIGITS: request.GET.get('max_number_digits'), PARAMETER_NUMBER_UNIT_TYPE: request.GET.get('unit_type'), PARAMETER_LOCALE: request.GET.get('locale'), PARAMETER_RANGE_ENABLED: request.GET.get('range_enabled')}
parameters_dict = parameters_dict
ner_logger.debug('Start: %s ' % parameters_dict[PARAMETER_ENTITY_NAME])
timezone = parameters_dict[PARAMETER_TIMEZONE] or None
form_check = True if parameters_dict[PARAMETER_STRUCTURED_VALUE] else False
range_enabled = True if parameters_dict[PARAMETER_RANGE_ENABLED] else False
time_detection = TimeDetector(entity_name=parameters_dict[PARAMETER_ENTITY_NAME], language=parameters_dict[PARAMETER_SOURCE_LANGUAGE], timezone=timezone)
time_detection.set_bot_message(bot_message=parameters_dict[PARAMETER_BOT_MESSAGE])
message = parameters_dict[PARAMETER_MESSAGE]
if isinstance(message, six.string_types):
entity_output = time_detection.detect(message=message, structured_value=parameters_dict[PARAMETER_STRUCTURED_VALUE], fallback_value=parameters_dict[PARAMETER_FALLBACK_VALUE], form_check=form_check, range_enabled=range_enabled)
elif isinstance(message, (list, tuple)):
entity_output = time_detection.detect_bulk(messages=message)
ner_logger.debug('Finished %s : %s ' % (parameters_dict[PARAMETER_ENTITY_NAME], entity_output))
except InvalidTextRequest as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in validating request body for {request.path}, error: {err}')
return JsonResponse(response, status=400)
except DataStoreRequestException as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in requesting ES {request.path}, error: {err}, query: {err.request}, response: {err.response}')
return JsonResponse(response, status=400)
except es_exceptions.ConnectionTimeout as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Connection timed out for ES {request.path}, error: {err}')
return JsonResponse(response, status=500)
except es_exceptions.ConnectionError as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in connection to ES {request.path}, error: {err}')
return JsonResponse(response, status=500)
except (TypeError, KeyError) as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'Error in validating type {request.path}, error: {err}')
return JsonResponse(response, status=500)
except Exception as err:
response = {'success': False, 'error': str(err)}
ner_logger.exception(f'General exception for {request.path}, error: {err}')
return JsonResponse(response, status=500)
response = {'success': True, 'error': None, 'data': entity_output}
return JsonResponse(response, status=200)
|
chatbot_ner
|
positive
|
def __call__(self, rigid: geometry.Rigid3Array, representations_list: Iterable[jnp.ndarray], aatype: jnp.ndarray) -> Dict[str, Any]:
"""Predict sidechains using multi-rigid representations.
Args:
rigid: The Rigid's for each residue (translations in angstoms)
representations_list: A list of activations to predict sidechains from.
aatype: amino acid types.
Returns:
dict containing atom positions and frames (in angstrom)
"""
act = [common_modules.Linear(self.config.num_channel, name='input_projection')(jax.nn.relu(x)) for x in representations_list]
act = sum(act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
for _ in range(self.config.num_residual_block):
old_act = act
act = common_modules.Linear(self.config.num_channel, initializer='relu', name='resblock1')(jax.nn.relu(act))
act = common_modules.Linear(self.config.num_channel, initializer=final_init, name='resblock2')(jax.nn.relu(act))
act += old_act
num_res = act.shape[0]
unnormalized_angles = common_modules.Linear(14, name='unnormalized_angles')(jax.nn.relu(act))
unnormalized_angles = jnp.reshape(unnormalized_angles, [num_res, 7, 2])
<DeepExtract>
angles = unnormalized_angles / jnp.sqrt(jnp.maximum(jnp.sum(unnormalized_angles ** 2, axis=-1, keepdims=True), epsilon))
</DeepExtract>
outputs = {'angles_sin_cos': angles, 'unnormalized_angles_sin_cos': unnormalized_angles}
all_frames_to_global = all_atom_multimer.torsion_angles_to_frames(aatype, rigid, angles)
pred_positions = all_atom_multimer.frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global)
outputs.update({'atom_pos': pred_positions, 'frames': all_frames_to_global})
return outputs
|
def __call__(self, rigid: geometry.Rigid3Array, representations_list: Iterable[jnp.ndarray], aatype: jnp.ndarray) -> Dict[str, Any]:
"""Predict sidechains using multi-rigid representations.
Args:
rigid: The Rigid's for each residue (translations in angstoms)
representations_list: A list of activations to predict sidechains from.
aatype: amino acid types.
Returns:
dict containing atom positions and frames (in angstrom)
"""
act = [common_modules.Linear(self.config.num_channel, name='input_projection')(jax.nn.relu(x)) for x in representations_list]
act = sum(act)
final_init = 'zeros' if self.global_config.zero_init else 'linear'
for _ in range(self.config.num_residual_block):
old_act = act
act = common_modules.Linear(self.config.num_channel, initializer='relu', name='resblock1')(jax.nn.relu(act))
act = common_modules.Linear(self.config.num_channel, initializer=final_init, name='resblock2')(jax.nn.relu(act))
act += old_act
num_res = act.shape[0]
unnormalized_angles = common_modules.Linear(14, name='unnormalized_angles')(jax.nn.relu(act))
unnormalized_angles = jnp.reshape(unnormalized_angles, [num_res, 7, 2])
angles = unnormalized_angles / jnp.sqrt(jnp.maximum(jnp.sum(unnormalized_angles ** 2, axis=-1, keepdims=True), epsilon))
outputs = {'angles_sin_cos': angles, 'unnormalized_angles_sin_cos': unnormalized_angles}
all_frames_to_global = all_atom_multimer.torsion_angles_to_frames(aatype, rigid, angles)
pred_positions = all_atom_multimer.frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global)
outputs.update({'atom_pos': pred_positions, 'frames': all_frames_to_global})
return outputs
|
alphafold
|
positive
|
def testUpWithClusterAppScalefile(self):
appscale = AppScale()
contents = {'ips_layout': {'master': 'ip1', 'appengine': 'ip1', 'database': 'ip2', 'zookeeper': 'ip2'}, 'keyname': 'boobazblarg', 'group': 'boobazblarg'}
yaml_dumped_contents = yaml.dump(contents)
<DeepExtract>
flexmock(os)
os.should_receive('getcwd').and_return('/boo')
mock = flexmock(sys.modules['__builtin__'])
mock.should_call('open')
mock.should_receive('open').with_args('/boo/' + appscale.APPSCALEFILE).and_return(flexmock(read=lambda : yaml_dumped_contents))
return mock
</DeepExtract>
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/boo/' + appscale.APPSCALEFILE).and_return(True)
key_path = os.path.expanduser('~/.appscale/boobazblarg.key')
os.path.should_receive('exists').with_args(key_path).and_return(False)
flexmock(AppScaleTools)
AppScaleTools.should_receive('add_keypair')
AppScaleTools.should_receive('run_instances')
appscale.up()
|
def testUpWithClusterAppScalefile(self):
appscale = AppScale()
contents = {'ips_layout': {'master': 'ip1', 'appengine': 'ip1', 'database': 'ip2', 'zookeeper': 'ip2'}, 'keyname': 'boobazblarg', 'group': 'boobazblarg'}
yaml_dumped_contents = yaml.dump(contents)
flexmock(os)
os.should_receive('getcwd').and_return('/boo')
mock = flexmock(sys.modules['__builtin__'])
mock.should_call('open')
mock.should_receive('open').with_args('/boo/' + appscale.APPSCALEFILE).and_return(flexmock(read=lambda : yaml_dumped_contents))
return mock
flexmock(os.path)
os.path.should_call('exists')
os.path.should_receive('exists').with_args('/boo/' + appscale.APPSCALEFILE).and_return(True)
key_path = os.path.expanduser('~/.appscale/boobazblarg.key')
os.path.should_receive('exists').with_args(key_path).and_return(False)
flexmock(AppScaleTools)
AppScaleTools.should_receive('add_keypair')
AppScaleTools.should_receive('run_instances')
appscale.up()
|
appscale-tools
|
positive
|
def to_julian(self, year, month, day, hours=0, minutes=0, seconds=0):
"""Convert from Python date to Excel JD."""
if year < 1900 or year > 10000:
msg = 'Year not supported by Excel: %s' % year
raise ValueError(msg)
if self.excel_base_date == CALENDAR_WINDOWS_1900:
if year == 1900 and month <= 2:
excel_1900_leap_year = False
else:
excel_1900_leap_year = True
excel_base_date = 2415020
elif self.excel_base_date == CALENDAR_MAC_1904:
excel_base_date = 2416481
excel_1900_leap_year = False
else:
raise NotImplementedError('base date supported.')
if month > 2:
month = month - 3
else:
month = month + 9
year -= 1
(century, decade) = (int(str(year)[:2]), int(str(year)[2:]))
excel_date = floor(146097 * century / 4) + floor(1461 * decade / 4) + floor((153 * month + 2) / 5) + day + 1721119 - excel_base_date
if excel_1900_leap_year:
excel_date += 1
if self.excel_base_date == CALENDAR_WINDOWS_1900 and excel_date == 60:
msg = 'Error: Excel believes 1900 was a leap year'
raise ValueError(msg)
<DeepExtract>
excel_time = (hours * 3600 + minutes * 60 + seconds) / 86400
</DeepExtract>
return excel_date + excel_time
|
def to_julian(self, year, month, day, hours=0, minutes=0, seconds=0):
"""Convert from Python date to Excel JD."""
if year < 1900 or year > 10000:
msg = 'Year not supported by Excel: %s' % year
raise ValueError(msg)
if self.excel_base_date == CALENDAR_WINDOWS_1900:
if year == 1900 and month <= 2:
excel_1900_leap_year = False
else:
excel_1900_leap_year = True
excel_base_date = 2415020
elif self.excel_base_date == CALENDAR_MAC_1904:
excel_base_date = 2416481
excel_1900_leap_year = False
else:
raise NotImplementedError('base date supported.')
if month > 2:
month = month - 3
else:
month = month + 9
year -= 1
(century, decade) = (int(str(year)[:2]), int(str(year)[2:]))
excel_date = floor(146097 * century / 4) + floor(1461 * decade / 4) + floor((153 * month + 2) / 5) + day + 1721119 - excel_base_date
if excel_1900_leap_year:
excel_date += 1
if self.excel_base_date == CALENDAR_WINDOWS_1900 and excel_date == 60:
msg = 'Error: Excel believes 1900 was a leap year'
raise ValueError(msg)
excel_time = (hours * 3600 + minutes * 60 + seconds) / 86400
return excel_date + excel_time
|
dataproxy
|
positive
|
def __init__(self, view_radius: int, world_dim: int, uuid: str='corner_fixed_radius', **kwargs: Any):
self.view_radius = view_radius
self.world_dim = world_dim
self.view_corner_offsets: Optional[np.ndarray] = None
<DeepExtract>
observation_space = gym.spaces.Box(low=min(LightHouseEnvironment.SPACE_LEVELS), high=max(LightHouseEnvironment.SPACE_LEVELS), shape=(2 ** self.world_dim + 2,), dtype=int)
</DeepExtract>
super().__init__(**prepare_locals_for_super(locals()))
|
def __init__(self, view_radius: int, world_dim: int, uuid: str='corner_fixed_radius', **kwargs: Any):
self.view_radius = view_radius
self.world_dim = world_dim
self.view_corner_offsets: Optional[np.ndarray] = None
observation_space = gym.spaces.Box(low=min(LightHouseEnvironment.SPACE_LEVELS), high=max(LightHouseEnvironment.SPACE_LEVELS), shape=(2 ** self.world_dim + 2,), dtype=int)
super().__init__(**prepare_locals_for_super(locals()))
|
allenact
|
positive
|
def _get_peak_times(self, responses, raise_warnings=False):
<DeepExtract>
trace = {}
if self.somatic_recording_name not in responses:
logger.debug('Recording named %s not found in responses %s', self.somatic_recording_name, str(responses))
efel_trace = None
if responses[self.somatic_recording_name] is None:
efel_trace = None
response = responses[self.somatic_recording_name]
trace['T'] = response['time']
trace['V'] = response['voltage']
trace['stim_start'] = [self.stim_start]
trace['stim_end'] = [self.stim_end]
efel_trace = trace
</DeepExtract>
if efel_trace is None:
peak_times = None
else:
<DeepExtract>
import efel
efel.reset()
if self.threshold is not None:
efel.setThreshold(self.threshold)
if self.stimulus_current is not None:
efel.setDoubleSetting('stimulus_current', self.stimulus_current)
if self.interp_step is not None:
efel.setDoubleSetting('interp_step', self.interp_step)
if self.double_settings is not None:
for (setting_name, setting_value) in self.double_settings.items():
efel.setDoubleSetting(setting_name, setting_value)
if self.int_settings is not None:
for (setting_name, setting_value) in self.int_settings.items():
efel.setIntSetting(setting_name, setting_value)
if self.string_settings is not None:
for (setting_name, setting_value) in self.string_settings.items():
efel.setStrSetting(setting_name, setting_value)
</DeepExtract>
import efel
peaks = efel.getFeatureValues([efel_trace], ['peak_time'], raise_warnings=raise_warnings)
peak_times = peaks[0]['peak_time']
efel.reset()
return peak_times
|
def _get_peak_times(self, responses, raise_warnings=False):
trace = {}
if self.somatic_recording_name not in responses:
logger.debug('Recording named %s not found in responses %s', self.somatic_recording_name, str(responses))
efel_trace = None
if responses[self.somatic_recording_name] is None:
efel_trace = None
response = responses[self.somatic_recording_name]
trace['T'] = response['time']
trace['V'] = response['voltage']
trace['stim_start'] = [self.stim_start]
trace['stim_end'] = [self.stim_end]
efel_trace = trace
if efel_trace is None:
peak_times = None
else:
import efel
efel.reset()
if self.threshold is not None:
efel.setThreshold(self.threshold)
if self.stimulus_current is not None:
efel.setDoubleSetting('stimulus_current', self.stimulus_current)
if self.interp_step is not None:
efel.setDoubleSetting('interp_step', self.interp_step)
if self.double_settings is not None:
for (setting_name, setting_value) in self.double_settings.items():
efel.setDoubleSetting(setting_name, setting_value)
if self.int_settings is not None:
for (setting_name, setting_value) in self.int_settings.items():
efel.setIntSetting(setting_name, setting_value)
if self.string_settings is not None:
for (setting_name, setting_value) in self.string_settings.items():
efel.setStrSetting(setting_name, setting_value)
import efel
peaks = efel.getFeatureValues([efel_trace], ['peak_time'], raise_warnings=raise_warnings)
peak_times = peaks[0]['peak_time']
efel.reset()
return peak_times
|
BluePyOpt
|
positive
|
def perform_new_feature_crossing(cat_input_dict, cross_cats, dataset):
combos = combinations(cross_cats, 2)
combos_encoded_list = []
for (cat1, cat2) in combos:
<DeepExtract>
cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.CategoryCrossing()([cat_input_dict[cat1], cat_input_dict[cat2]])
hash_cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=64)(cross_cat1_cat2)
cat_cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens=64)(hash_cross_cat1_cat2)
cat_combo_encoded = cat_cross_cat1_cat2
</DeepExtract>
combos_encoded_list.append(cat_combo_encoded)
return combos_encoded_list
|
def perform_new_feature_crossing(cat_input_dict, cross_cats, dataset):
combos = combinations(cross_cats, 2)
combos_encoded_list = []
for (cat1, cat2) in combos:
cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.CategoryCrossing()([cat_input_dict[cat1], cat_input_dict[cat2]])
hash_cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=64)(cross_cat1_cat2)
cat_cross_cat1_cat2 = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens=64)(hash_cross_cat1_cat2)
cat_combo_encoded = cat_cross_cat1_cat2
combos_encoded_list.append(cat_combo_encoded)
return combos_encoded_list
|
deep_autoviml
|
positive
|
def gds_validate_boolean_list(self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in (True, 1, False, 0):
<DeepExtract>
if node is not None:
'Requires sequence of boolean values (one of True, 1, False, 0)' = '%s (element %s/line %d)' % ('Requires sequence of boolean values (one of True, 1, False, 0)', node.tag, node.sourceline)
raise GDSParseError('Requires sequence of boolean values (one of True, 1, False, 0)')
</DeepExtract>
return values
|
def gds_validate_boolean_list(self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in (True, 1, False, 0):
if node is not None:
'Requires sequence of boolean values (one of True, 1, False, 0)' = '%s (element %s/line %d)' % ('Requires sequence of boolean values (one of True, 1, False, 0)', node.tag, node.sourceline)
raise GDSParseError('Requires sequence of boolean values (one of True, 1, False, 0)')
return values
|
1g1r-romset-generator
|
positive
|
def print_summary(symbol, shape=None, line_length=120, positions=[0.44, 0.64, 0.74, 1.0]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
"""
if not isinstance(symbol, Symbol):
raise TypeError('symbol must be Symbol')
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
(_, out_shapes, _) = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError('Input shape is incomplete')
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf['nodes']
heads = set(conf['heads'][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
<DeepExtract>
line = ''
for (i, field) in enumerate(to_display):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
</DeepExtract>
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node['op']
pre_node = []
pre_filter = 0
if op != 'null':
inputs = node['inputs']
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node['name']
if input_node['op'] != 'null' or item[0] in heads:
pre_node.append(input_name)
if show_shape:
if input_node['op'] != 'null':
key = input_name + '_output'
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if 'no_bias' in node['attrs'] and (isinstance(node['attrs']['no_bias'], (bool, int)) and int(node['attrs']['no_bias'])) or (isinstance(node['attrs']['no_bias'], str) and bool(node['attrs']['no_bias'])):
cur_param = pre_filter * int(node['attrs']['num_filter'])
for k in _str2tuple(node['attrs']['kernel']):
cur_param *= int(k)
else:
cur_param = pre_filter * int(node['attrs']['num_filter'])
for k in _str2tuple(node['attrs']['kernel']):
cur_param *= int(k)
cur_param += int(node['attrs']['num_filter'])
elif op == 'FullyConnected':
if 'no_bias' in node['attrs'] and int(node['attrs']['no_bias']):
cur_param = pre_filter * int(node['attrs']['num_hidden'])
else:
cur_param = (pre_filter + 1) * int(node['attrs']['num_hidden'])
elif op == 'BatchNorm':
key = node['name'] + '_output'
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')', 'x'.join([str(x) for x in out_shape]), cur_param, first_connection]
<DeepExtract>
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
</DeepExtract>
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
<DeepExtract>
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
</DeepExtract>
return cur_param
total_params = 0
for (i, node) in enumerate(nodes):
out_shape = []
op = node['op']
if op == 'null' and i > 0:
continue
if op != 'null' or i in heads:
if show_shape:
if op != 'null':
key = node['name'] + '_output'
else:
key = node['name']
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print('Total params: %s' % total_params)
print('_' * line_length)
|
def print_summary(symbol, shape=None, line_length=120, positions=[0.44, 0.64, 0.74, 1.0]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
"""
if not isinstance(symbol, Symbol):
raise TypeError('symbol must be Symbol')
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
(_, out_shapes, _) = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError('Input shape is incomplete')
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf['nodes']
heads = set(conf['heads'][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
line = ''
for (i, field) in enumerate(to_display):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node['op']
pre_node = []
pre_filter = 0
if op != 'null':
inputs = node['inputs']
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node['name']
if input_node['op'] != 'null' or item[0] in heads:
pre_node.append(input_name)
if show_shape:
if input_node['op'] != 'null':
key = input_name + '_output'
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if 'no_bias' in node['attrs'] and (isinstance(node['attrs']['no_bias'], (bool, int)) and int(node['attrs']['no_bias'])) or (isinstance(node['attrs']['no_bias'], str) and bool(node['attrs']['no_bias'])):
cur_param = pre_filter * int(node['attrs']['num_filter'])
for k in _str2tuple(node['attrs']['kernel']):
cur_param *= int(k)
else:
cur_param = pre_filter * int(node['attrs']['num_filter'])
for k in _str2tuple(node['attrs']['kernel']):
cur_param *= int(k)
cur_param += int(node['attrs']['num_filter'])
elif op == 'FullyConnected':
if 'no_bias' in node['attrs'] and int(node['attrs']['no_bias']):
cur_param = pre_filter * int(node['attrs']['num_hidden'])
else:
cur_param = (pre_filter + 1) * int(node['attrs']['num_hidden'])
elif op == 'BatchNorm':
key = node['name'] + '_output'
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')', 'x'.join([str(x) for x in out_shape]), cur_param, first_connection]
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
line = ''
for (i, field) in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
return cur_param
total_params = 0
for (i, node) in enumerate(nodes):
out_shape = []
op = node['op']
if op == 'null' and i > 0:
continue
if op != 'null' or i in heads:
if show_shape:
if op != 'null':
key = node['name'] + '_output'
else:
key = node['name']
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print('Total params: %s' % total_params)
print('_' * line_length)
|
Deep-Feature-Flow-Segmentation
|
positive
|
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError("An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
<DeepExtract>
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
</DeepExtract>
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
|
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError("An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
|
dtr-prototype
|
positive
|
def helper_test_prediction_accuracy(predict_n, ts, ts_train, ts_val, past_covariates, future_covariates, kwargs_tft):
"""prediction should be almost equal to y_true. Absolute tolarance is set
to 0.2 to give some flexibility"""
absolute_tolarance = 0.2
<DeepExtract>
model = TFTModel(**kwargs_tft)
model.fit(ts_train, past_covariates=past_covariates, future_covariates=future_covariates, val_series=ts_val, val_past_covariates=past_covariates, val_future_covariates=future_covariates, verbose=False)
series = None if isinstance(ts_train, TimeSeries) else ts_train
y_hat = model.predict(n=predict_n, series=series, past_covariates=past_covariates, future_covariates=future_covariates, num_samples=100 if model._is_probabilistic() else 1)
if isinstance(y_hat, TimeSeries):
y_hat = y_hat.quantile_timeseries(0.5) if y_hat.n_samples > 1 else y_hat
else:
y_hat = [ts.quantile_timeseries(0.5) if ts.n_samples > 1 else ts for ts in y_hat]
y_hat = y_hat
</DeepExtract>
y_true = ts[y_hat.start_time():y_hat.end_time()]
self.assertTrue(np.allclose(y_true[1:-1].all_values(), y_hat[1:-1].all_values(), atol=absolute_tolarance))
|
def helper_test_prediction_accuracy(predict_n, ts, ts_train, ts_val, past_covariates, future_covariates, kwargs_tft):
"""prediction should be almost equal to y_true. Absolute tolarance is set
to 0.2 to give some flexibility"""
absolute_tolarance = 0.2
model = TFTModel(**kwargs_tft)
model.fit(ts_train, past_covariates=past_covariates, future_covariates=future_covariates, val_series=ts_val, val_past_covariates=past_covariates, val_future_covariates=future_covariates, verbose=False)
series = None if isinstance(ts_train, TimeSeries) else ts_train
y_hat = model.predict(n=predict_n, series=series, past_covariates=past_covariates, future_covariates=future_covariates, num_samples=100 if model._is_probabilistic() else 1)
if isinstance(y_hat, TimeSeries):
y_hat = y_hat.quantile_timeseries(0.5) if y_hat.n_samples > 1 else y_hat
else:
y_hat = [ts.quantile_timeseries(0.5) if ts.n_samples > 1 else ts for ts in y_hat]
y_hat = y_hat
y_true = ts[y_hat.start_time():y_hat.end_time()]
self.assertTrue(np.allclose(y_true[1:-1].all_values(), y_hat[1:-1].all_values(), atol=absolute_tolarance))
|
darts
|
positive
|
def install_notifier():
"""Extract ``Notify.app`` from the workflow to data directory.
Changes the bundle ID of the installed app and gives it the
workflow's icon.
"""
archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz')
destdir = wf().datadir
app_path = os.path.join(destdir, 'Notify.app')
<DeepExtract>
n = wf().datafile('Notify.app/Contents/MacOS/applet')
</DeepExtract>
log().debug('installing Notify.app to %r ...', destdir)
tgz = tarfile.open(archive, 'r:gz')
tgz.extractall(destdir)
if not os.path.exists(n):
raise RuntimeError('Notify.app could not be installed in ' + destdir)
<DeepExtract>
icon = wf().datafile('Notify.app/Contents/Resources/applet.icns')
</DeepExtract>
workflow_icon = wf().workflowfile('icon.png')
if os.path.exists(icon):
os.unlink(icon)
<DeepExtract>
tempdir = tempfile.mkdtemp(prefix='aw-', dir=wf().datadir)
try:
iconset = os.path.join(tempdir, 'Icon.iconset')
if os.path.exists(iconset):
raise RuntimeError('iconset already exists: ' + iconset)
os.makedirs(iconset)
configs = []
for i in (16, 32, 128, 256, 512):
configs.append(('icon_{0}x{0}.png'.format(i), i))
configs.append(('icon_{0}x{0}@2x.png'.format(i), i * 2))
shutil.copy(workflow_icon, os.path.join(iconset, 'icon_256x256.png'))
shutil.copy(workflow_icon, os.path.join(iconset, 'icon_128x128@2x.png'))
for (name, size) in configs:
outpath = os.path.join(iconset, name)
if os.path.exists(outpath):
continue
convert_image(workflow_icon, outpath, size)
cmd = [b'iconutil', b'-c', b'icns', b'-o', icon, iconset]
retcode = subprocess.call(cmd)
if retcode != 0:
raise RuntimeError('iconset exited with %d' % retcode)
if not os.path.exists(icon):
raise ValueError('generated ICNS file not found: ' + repr(icon))
finally:
try:
shutil.rmtree(tempdir)
except OSError:
pass
</DeepExtract>
if sys.version_info >= (2, 7):
from AppKit import NSWorkspace, NSImage
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
img.initWithContentsOfFile_(icon)
ws.setIcon_forFile_options_(img, app_path, 0)
ip_path = os.path.join(app_path, 'Contents/Info.plist')
bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex)
data = plistlib.readPlist(ip_path)
log().debug('changing bundle ID to %r', bundle_id)
data['CFBundleIdentifier'] = bundle_id
plistlib.writePlist(data, ip_path)
|
def install_notifier():
"""Extract ``Notify.app`` from the workflow to data directory.
Changes the bundle ID of the installed app and gives it the
workflow's icon.
"""
archive = os.path.join(os.path.dirname(__file__), 'Notify.tgz')
destdir = wf().datadir
app_path = os.path.join(destdir, 'Notify.app')
n = wf().datafile('Notify.app/Contents/MacOS/applet')
log().debug('installing Notify.app to %r ...', destdir)
tgz = tarfile.open(archive, 'r:gz')
tgz.extractall(destdir)
if not os.path.exists(n):
raise RuntimeError('Notify.app could not be installed in ' + destdir)
icon = wf().datafile('Notify.app/Contents/Resources/applet.icns')
workflow_icon = wf().workflowfile('icon.png')
if os.path.exists(icon):
os.unlink(icon)
tempdir = tempfile.mkdtemp(prefix='aw-', dir=wf().datadir)
try:
iconset = os.path.join(tempdir, 'Icon.iconset')
if os.path.exists(iconset):
raise RuntimeError('iconset already exists: ' + iconset)
os.makedirs(iconset)
configs = []
for i in (16, 32, 128, 256, 512):
configs.append(('icon_{0}x{0}.png'.format(i), i))
configs.append(('icon_{0}x{0}@2x.png'.format(i), i * 2))
shutil.copy(workflow_icon, os.path.join(iconset, 'icon_256x256.png'))
shutil.copy(workflow_icon, os.path.join(iconset, 'icon_128x128@2x.png'))
for (name, size) in configs:
outpath = os.path.join(iconset, name)
if os.path.exists(outpath):
continue
convert_image(workflow_icon, outpath, size)
cmd = [b'iconutil', b'-c', b'icns', b'-o', icon, iconset]
retcode = subprocess.call(cmd)
if retcode != 0:
raise RuntimeError('iconset exited with %d' % retcode)
if not os.path.exists(icon):
raise ValueError('generated ICNS file not found: ' + repr(icon))
finally:
try:
shutil.rmtree(tempdir)
except OSError:
pass
if sys.version_info >= (2, 7):
from AppKit import NSWorkspace, NSImage
ws = NSWorkspace.sharedWorkspace()
img = NSImage.alloc().init()
img.initWithContentsOfFile_(icon)
ws.setIcon_forFile_options_(img, app_path, 0)
ip_path = os.path.join(app_path, 'Contents/Info.plist')
bundle_id = '{0}.{1}'.format(wf().bundleid, uuid.uuid4().hex)
data = plistlib.readPlist(ip_path)
log().debug('changing bundle ID to %r', bundle_id)
data['CFBundleIdentifier'] = bundle_id
plistlib.writePlist(data, ip_path)
|
alfred-workflow
|
positive
|
def build(self, req_data: Dict[str, Any], params: Dict[str, Any], storage: Optional[Dict[str, Any]]=None) -> None:
"""Populate some required fields to the request data."""
<DeepExtract>
required_not_passed = {'access_token'} - params.keys()
passed_not_required = params.keys() - {'access_token'}
if required_not_passed:
raise MissingRequiredAuthParams(required_not_passed)
if passed_not_required:
raise InvalidAuthParams(passed_not_required)
</DeepExtract>
req_data['headers'][self.key_name] = params['access_token']
req_data['headers'].update(self.extra)
|
def build(self, req_data: Dict[str, Any], params: Dict[str, Any], storage: Optional[Dict[str, Any]]=None) -> None:
"""Populate some required fields to the request data."""
required_not_passed = {'access_token'} - params.keys()
passed_not_required = params.keys() - {'access_token'}
if required_not_passed:
raise MissingRequiredAuthParams(required_not_passed)
if passed_not_required:
raise InvalidAuthParams(passed_not_required)
req_data['headers'][self.key_name] = params['access_token']
req_data['headers'].update(self.extra)
|
dataprep
|
positive
|
def test_subdir_coverage_console(self):
"""
Assert that when diff-cover is ran from a subdirectory it
generates correct reports.
"""
old_cwd = self._mock_getcwd.return_value
self._mock_getcwd.return_value = os.path.join(old_cwd, 'sub')
<DeepExtract>
with open('git_diff_subdir.txt', encoding='utf-8') as git_diff_file:
self._set_git_diff_output(git_diff_file.read(), '')
string_buffer = BytesIO()
self._capture_stdout(string_buffer)
if 'diff-cover' in ['diff-cover', 'coverage.xml'][0]:
code = diff_cover_tool.main(['diff-cover', 'coverage.xml'])
else:
code = diff_quality_tool.main(['diff-cover', 'coverage.xml'])
assert code == expected_status
with open('subdir_coverage_console_report.txt') as expected_file:
report = string_buffer.getvalue()
expected = expected_file.read()
assert expected.strip() == report.strip().decode('utf-8')
</DeepExtract>
self._mock_getcwd.return_value = old_cwd
|
def test_subdir_coverage_console(self):
"""
Assert that when diff-cover is ran from a subdirectory it
generates correct reports.
"""
old_cwd = self._mock_getcwd.return_value
self._mock_getcwd.return_value = os.path.join(old_cwd, 'sub')
with open('git_diff_subdir.txt', encoding='utf-8') as git_diff_file:
self._set_git_diff_output(git_diff_file.read(), '')
string_buffer = BytesIO()
self._capture_stdout(string_buffer)
if 'diff-cover' in ['diff-cover', 'coverage.xml'][0]:
code = diff_cover_tool.main(['diff-cover', 'coverage.xml'])
else:
code = diff_quality_tool.main(['diff-cover', 'coverage.xml'])
assert code == expected_status
with open('subdir_coverage_console_report.txt') as expected_file:
report = string_buffer.getvalue()
expected = expected_file.read()
assert expected.strip() == report.strip().decode('utf-8')
self._mock_getcwd.return_value = old_cwd
|
diff_cover
|
positive
|
def _stream_response_xml(self, payload, timeout, headers=None):
if not isinstance(payload, RestrictedElement):
raise ValueError("'payload' %r must be an RestrictedElement" % payload)
global req_id
<DeepExtract>
if isinstance(self, EWSAccountService):
(account, hint) = (self.account, self.account.version)
else:
(account, hint) = (None, self.protocol.version)
</DeepExtract>
<DeepExtract>
from .version import API_VERSIONS
api_versions = [hint.api_version] + [v for v in API_VERSIONS if v != hint.api_version]
</DeepExtract>
got_envelopes = False
for api_version in api_versions:
session = self.protocol.get_session()
try:
req_id += 1
local_req_id = req_id
soap_payload = wrap(content=payload, version=api_version, account=account)
<DeepExtract>
if os.environ.get('TRACE_EWS') is None:
return
now = time.time()
if 'streaming-request' == 'request':
stdout.write(u'REQUEST {} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'response':
stdout.write(u'RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'streaming-request':
stdout.write(u'STREAMING REQUEST {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'streaming-response':
stdout.write(u'STREAMING RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
stdout.write(ensure_text(PrettyXmlHandler.prettify_xml(soap_payload) + b'\n'))
</DeepExtract>
(r, session) = post_ratelimited(protocol=self.protocol, session=session, url=self.protocol.service_endpoint, headers=headers, data=soap_payload, allow_redirects=False, stream=True, timeout=timeout)
got_envelopes = False
for envelope in self._parse_envelopes(r):
<DeepExtract>
log.debug('Trying API version %s for account %s', api_version, account)
try:
soap_response_payload = to_xml(envelope)
except ParseError as e:
raise SOAPError('Bad SOAP response: %s' % e)
try:
res = self._get_soap_payload(soap_response=soap_response_payload)
except (ErrorInvalidSchemaVersionForMailboxVersion, ErrorInvalidServerVersion):
if not account:
raise ValueError("'account' should not be None")
log.debug('API version %s was invalid for account %s', api_version, account)
result = None
except ResponseMessageError:
self._update_api_version(hint=hint, api_version=api_version, response=r)
raise
else:
self._update_api_version(hint=hint, api_version=api_version, response=r)
result = res
</DeepExtract>
if result is None:
break
for r in result:
if r.text is not None:
<DeepExtract>
if os.environ.get('TRACE_EWS') is None:
return
now = time.time()
if 'streaming-response' == 'request':
stdout.write(u'REQUEST {} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'response':
stdout.write(u'RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'streaming-request':
stdout.write(u'STREAMING REQUEST {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'streaming-response':
stdout.write(u'STREAMING RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
stdout.write(ensure_text(PrettyXmlHandler.prettify_xml(r.text) + b'\n'))
</DeepExtract>
got_envelopes = True
yield result
finally:
self.protocol.release_session(session)
if got_envelopes:
break
if not got_envelopes:
raise ErrorInvalidSchemaVersionForMailboxVersion('Tried versions %s but all were invalid for account %s' % (api_versions, account))
|
def _stream_response_xml(self, payload, timeout, headers=None):
if not isinstance(payload, RestrictedElement):
raise ValueError("'payload' %r must be an RestrictedElement" % payload)
global req_id
if isinstance(self, EWSAccountService):
(account, hint) = (self.account, self.account.version)
else:
(account, hint) = (None, self.protocol.version)
from .version import API_VERSIONS
api_versions = [hint.api_version] + [v for v in API_VERSIONS if v != hint.api_version]
got_envelopes = False
for api_version in api_versions:
session = self.protocol.get_session()
try:
req_id += 1
local_req_id = req_id
soap_payload = wrap(content=payload, version=api_version, account=account)
if os.environ.get('TRACE_EWS') is None:
return
now = time.time()
if 'streaming-request' == 'request':
stdout.write(u'REQUEST {} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'response':
stdout.write(u'RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'streaming-request':
stdout.write(u'STREAMING REQUEST {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-request' == 'streaming-response':
stdout.write(u'STREAMING RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
stdout.write(ensure_text(PrettyXmlHandler.prettify_xml(soap_payload) + b'\n'))
(r, session) = post_ratelimited(protocol=self.protocol, session=session, url=self.protocol.service_endpoint, headers=headers, data=soap_payload, allow_redirects=False, stream=True, timeout=timeout)
got_envelopes = False
for envelope in self._parse_envelopes(r):
log.debug('Trying API version %s for account %s', api_version, account)
try:
soap_response_payload = to_xml(envelope)
except ParseError as e:
raise SOAPError('Bad SOAP response: %s' % e)
try:
res = self._get_soap_payload(soap_response=soap_response_payload)
except (ErrorInvalidSchemaVersionForMailboxVersion, ErrorInvalidServerVersion):
if not account:
raise ValueError("'account' should not be None")
log.debug('API version %s was invalid for account %s', api_version, account)
result = None
except ResponseMessageError:
self._update_api_version(hint=hint, api_version=api_version, response=r)
raise
else:
self._update_api_version(hint=hint, api_version=api_version, response=r)
result = res
if result is None:
break
for r in result:
if r.text is not None:
if os.environ.get('TRACE_EWS') is None:
return
now = time.time()
if 'streaming-response' == 'request':
stdout.write(u'REQUEST {} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'response':
stdout.write(u'RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'streaming-request':
stdout.write(u'STREAMING REQUEST {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
elif 'streaming-response' == 'streaming-response':
stdout.write(u'STREAMING RESPONSE {} <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< {}\n'.format(local_req_id, now))
stdout.write(ensure_text(PrettyXmlHandler.prettify_xml(r.text) + b'\n'))
got_envelopes = True
yield result
finally:
self.protocol.release_session(session)
if got_envelopes:
break
if not got_envelopes:
raise ErrorInvalidSchemaVersionForMailboxVersion('Tried versions %s but all were invalid for account %s' % (api_versions, account))
|
exchangelib
|
positive
|
def _get_transformer_image():
<DeepExtract>
if model_specs['net_type'] == 'rn':
(scale, mean_, std_) = (-1, np.array([123.68, 116.779, 103.939]).reshape((1, 1, 3)), None)
if model_specs['net_type'] in ('rna',):
(scale, mean_, std_) = (1.0 / 255, np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)), np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3)))
(scale, mean_, std_) = (None, None, None)
</DeepExtract>
transformers = []
if scale > 0:
transformers.append(ts.ColorScale(np.single(scale)))
transformers.append(ts.ColorNormalize(mean_, std_))
return transformers
|
def _get_transformer_image():
if model_specs['net_type'] == 'rn':
(scale, mean_, std_) = (-1, np.array([123.68, 116.779, 103.939]).reshape((1, 1, 3)), None)
if model_specs['net_type'] in ('rna',):
(scale, mean_, std_) = (1.0 / 255, np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)), np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3)))
(scale, mean_, std_) = (None, None, None)
transformers = []
if scale > 0:
transformers.append(ts.ColorScale(np.single(scale)))
transformers.append(ts.ColorNormalize(mean_, std_))
return transformers
|
CBST
|
positive
|
def to_int32(fragment):
<DeepExtract>
value = to_integer(fragment)
if value in ('NaN', 0, inf):
int32bit = value
posint = (1 if abs(value) == value else -1) * int(floor(abs(value)))
int32bit = posint % 2 ** 32
</DeepExtract>
if int32bit >= 2 ** 31:
return int32bit - 2 ** 32
else:
return int32bit
|
def to_int32(fragment):
value = to_integer(fragment)
if value in ('NaN', 0, inf):
int32bit = value
posint = (1 if abs(value) == value else -1) * int(floor(abs(value)))
int32bit = posint % 2 ** 32
if int32bit >= 2 ** 31:
return int32bit - 2 ** 32
else:
return int32bit
|
calmjs.parse
|
positive
|
def copy_chapters_across_with_fixes(chapter_info, fixed_toc):
comments_html = open('disqus_comments.html').read()
buy_book_div = html.fromstring(open('buy_the_book_banner.html').read())
analytics_div = html.fromstring(open('analytics.html').read())
load_toc_script = open('load_toc.js').read()
for chapter in CHAPTERS:
old_contents = open(chapter).read()
<DeepExtract>
parsed = html.fromstring(old_contents)
links = parsed.cssselect('a[href^=\\#]')
for link in links:
for other_chap in CHAPTERS:
if other_chap == chapter:
continue
chapter_id = chapter_info[other_chap].href_id
href = link.get('href')
targets = ['#' + x for x in chapter_info[other_chap].xrefs]
if href == '#' + chapter_id:
link.set('href', f'/book/{other_chap}')
elif href in targets:
link.set('href', f'/book/{other_chap}{href}')
new_contents = html.tostring(parsed)
</DeepExtract>
<DeepExtract>
parsed = html.fromstring(new_contents)
titles = parsed.cssselect('h2')
if titles and titles[0].text.startswith('Appendix A'):
title = titles[0]
title.text = chapter_info[chapter].chapter_title
new_contents = html.tostring(parsed)
</DeepExtract>
parsed = html.fromstring(new_contents)
body = parsed.cssselect('body')[0]
if parsed.cssselect('#header'):
head = parsed.cssselect('head')[0]
head.append(html.fragment_fromstring('<script>' + load_toc_script + '</script>'))
body.set('class', 'article toc2 toc-left')
body.insert(0, buy_book_div)
body.append(html.fromstring(comments_html.replace('CHAPTER_NAME', chapter.split('.')[0])))
body.append(analytics_div)
fixed_contents = html.tostring(parsed)
with open(DEST / chapter, 'w') as f:
f.write(fixed_contents.decode('utf8'))
with open(DEST / 'toc.html', 'w') as f:
f.write(html.tostring(fixed_toc).decode('utf8'))
|
def copy_chapters_across_with_fixes(chapter_info, fixed_toc):
comments_html = open('disqus_comments.html').read()
buy_book_div = html.fromstring(open('buy_the_book_banner.html').read())
analytics_div = html.fromstring(open('analytics.html').read())
load_toc_script = open('load_toc.js').read()
for chapter in CHAPTERS:
old_contents = open(chapter).read()
parsed = html.fromstring(old_contents)
links = parsed.cssselect('a[href^=\\#]')
for link in links:
for other_chap in CHAPTERS:
if other_chap == chapter:
continue
chapter_id = chapter_info[other_chap].href_id
href = link.get('href')
targets = ['#' + x for x in chapter_info[other_chap].xrefs]
if href == '#' + chapter_id:
link.set('href', f'/book/{other_chap}')
elif href in targets:
link.set('href', f'/book/{other_chap}{href}')
new_contents = html.tostring(parsed)
parsed = html.fromstring(new_contents)
titles = parsed.cssselect('h2')
if titles and titles[0].text.startswith('Appendix A'):
title = titles[0]
title.text = chapter_info[chapter].chapter_title
new_contents = html.tostring(parsed)
parsed = html.fromstring(new_contents)
body = parsed.cssselect('body')[0]
if parsed.cssselect('#header'):
head = parsed.cssselect('head')[0]
head.append(html.fragment_fromstring('<script>' + load_toc_script + '</script>'))
body.set('class', 'article toc2 toc-left')
body.insert(0, buy_book_div)
body.append(html.fromstring(comments_html.replace('CHAPTER_NAME', chapter.split('.')[0])))
body.append(analytics_div)
fixed_contents = html.tostring(parsed)
with open(DEST / chapter, 'w') as f:
f.write(fixed_contents.decode('utf8'))
with open(DEST / 'toc.html', 'w') as f:
f.write(html.tostring(fixed_toc).decode('utf8'))
|
Book-TDD-Web-Dev-Python
|
positive
|
def main():
image_dir = FLAGS.image_file
if os.path.isdir(FLAGS.output_dir):
if len(os.listdir(FLAGS.output_dir)) > 0:
print('WARNING: output folder is not empty')
else:
sys.exit('output path not defined or does not exist')
<DeepExtract>
stats_dict = {}
with open(FLAGS.tiles_stats) as f:
for line in f:
line2 = line.replace('[', '').replace(']', '').split()
if len(line2) > 0:
tilename = '.'.join(line2[0].split('.')[:-1])
cTileRootName = '_'.join(os.path.basename(tilename).split('_')[0:-2])
if cTileRootName not in stats_dict.keys():
stats_dict[cTileRootName] = {}
stats_dict[cTileRootName]['tiles'] = {}
stats_dict[cTileRootName]['xMax'] = 0
stats_dict[cTileRootName]['yMax'] = 0
ixTile = int(os.path.basename(tilename).split('_')[-2])
iyTile = int(os.path.basename(tilename).split('_')[-1].split('.')[0])
stats_dict[cTileRootName]['xMax'] = max(stats_dict[cTileRootName]['xMax'], ixTile)
stats_dict[cTileRootName]['yMax'] = max(stats_dict[cTileRootName]['yMax'], iyTile)
lineProb = line.split('[')[1]
lineProb = lineProb.split(']')[0]
lineProb = lineProb.split()
stats_dict[cTileRootName]['tiles'][tilename] = [str(ixTile), str(iyTile), lineProb]
stats_dict = stats_dict
</DeepExtract>
sub_dirs = []
for item in os.listdir(image_dir):
if os.path.isdir(os.path.join(image_dir, item)):
sub_dirs.append(os.path.join(image_dir, item))
print('sub_dirs:')
print(sub_dirs)
SlideRootName = ''
SlideNames = []
skip = False
filtered_dict = {}
for k in stats_dict.keys():
if FLAGS.slide_filter in k:
filtered_dict[k] = stats_dict[k]
dir_name = 'unknown'
for slide in sorted(filtered_dict.keys()):
NewSlide = True
t = time.time()
ixTile = int(stats_dict[slide]['xMax'])
iyTile = int(stats_dict[slide]['yMax'])
req_xLength = ixTile * (FLAGS.tiles_size - FLAGS.tiles_overlap) + FLAGS.tiles_size
req_yLength = iyTile * (FLAGS.tiles_size - FLAGS.tiles_overlap) + FLAGS.tiles_size
if FLAGS.resample_factor > 0:
req_xLength = int(req_xLength / FLAGS.resample_factor + 1)
req_yLength = int(req_yLength / FLAGS.resample_factor + 1)
WholeSlide_0 = np.zeros([req_xLength, req_yLength, 3])
HeatMap_0 = np.zeros([req_xLength, req_yLength, 3])
HeatMap_bin = np.zeros([req_xLength, req_yLength, 5])
HeatMap_divider = np.zeros([req_xLength, req_yLength, 5])
print('Checking slide ' + slide)
print(req_xLength, req_yLength)
<DeepExtract>
HeatMap_divider = HeatMap_divider * 1.0 + 0.0
HeatMap_0 = HeatMap_0
HeatMap_divider[HeatMap_divider == 0] = 1.0
HeatMap_0 = np.divide(HeatMap_0, HeatMap_divider[:, :, 0:3])
alpha = 0.33
out = HeatMap_0 * 255 * (1.0 - alpha) + WholeSlide_0 * alpha
out = out.transpose((1, 0, 2))
heatmap_path = os.path.join(FLAGS.output_dir, 'heatmaps')
if os.path.isdir(heatmap_path):
pass
else:
os.makedirs(heatmap_path)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.jpg')
if NewSlide:
if os.path.isfile(filename):
print(filename + ' has already been processed in the past. skipped.')
skip = True
skip = skip
else:
print(filename + ' is processed for the first times.')
out[out == [0, 0, 0]] = 255
imsave(filename, out)
if NewSlide == False:
HeatMap_bin = np.divide(HeatMap_bin, HeatMap_divider)
ImBin = HeatMap_bin * 0.0
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
ImBinT = ImBin
ImBinT[:, :, 0] = (HeatMap_bin[:, :, 0] - thresholds[0]) / (1 - thresholds[0])
ImBinT[:, :, 1] = (HeatMap_bin[:, :, 1] - thresholds[1]) / (1 - thresholds[1])
ImBinT[:, :, 2] = (HeatMap_bin[:, :, 2] - thresholds[2]) / (1 - thresholds[2])
ImBinT[:, :, 3] = (HeatMap_bin[:, :, 3] - thresholds[3]) / (1 - thresholds[3])
ImBinT[:, :, 4] = (HeatMap_bin[:, :, 4] - thresholds[4]) / (1 - thresholds[4])
Tmax = np.max(ImBinT, 2)
ImBin[:, :, 0] = ImBinT[:, :, 0] == Tmax
ImBin[:, :, 1] = ImBinT[:, :, 1] == Tmax
ImBin[:, :, 2] = ImBinT[:, :, 2] == Tmax
ImBin[:, :, 3] = ImBinT[:, :, 3] == Tmax
ImBin[:, :, 4] = ImBinT[:, :, 4] == Tmax
else:
Tmax = np.max(HeatMap_bin, 2)
ImBin[:, :, 0] = HeatMap_bin[:, :, 0] == Tmax
ImBin[:, :, 1] = HeatMap_bin[:, :, 1] == Tmax
ImBin[:, :, 2] = HeatMap_bin[:, :, 2] == Tmax
ImBin[:, :, 3] = HeatMap_bin[:, :, 3] == Tmax
ImBin[:, :, 4] = HeatMap_bin[:, :, 4] == Tmax
class_rgb = {}
class_rgb[0] = [0, 0, 0]
class_rgb[1] = [1.0, 0, 0]
class_rgb[2] = [1.0, 165.0 / 255.0, 0]
class_rgb[3] = [0, 0, 1.0]
class_rgb[4] = [186.0 / 255.0, 85.0 / 255.0, 211.0 / 255.0]
cl1 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 1])
cl2 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 2])
cl3 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 3])
ImBinf = np.zeros([ImBin.shape[0], ImBin.shape[1], 3])
for rgb in [0, 1, 2]:
ImBinf[:, :, rgb] = ImBin[:, :, 0] * class_rgb[0][rgb] + ImBin[:, :, 1] * class_rgb[1][rgb] + ImBin[:, :, 2] * class_rgb[2][rgb] + ImBin[:, :, 3] * class_rgb[3][rgb] + ImBin[:, :, 4] * class_rgb[4][rgb]
ImBinf[HeatMap_divider[:, :, 0:3] == 0] = 1
ImBinf = ImBinf.transpose((1, 0, 2))
ImBinf = ImBinf * 255.0
print('*************')
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.csv')
'\n\t\tt, b_tmp= cv2.threshold(np.array(np.uint8(ImBin[:,:,2])),120,255,cv2.THRESH_BINARY_INV)\n\t\tcontours,h = cv2.findContours(b_tmp,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\t\tEach_Tumor_Area = []\n\t\tEach_Tumor_Mean_Dia = []\n\t\tnIt = 0\n\t\tImBin = np.ascontiguousarray(ImBin)\n\t\tfor eachT in contours:\n\t\t\tnIt += 1\n\t\t\t# Each_Tumor_Area.append( (cv2.contourArea(eachT)+len(eachT)) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT>2):\n\t\t\t\tEach_Tumor_Area.append(cv2.contourArea(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\telse:\n\t\t\t\tEach_Tumor_Area.append( len(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT) >= 5:\n\t\t\t\tellipse = cv2.fitEllipse(eachT)\n\t\t\t\tMinAx = min(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tMaxAx = max(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tEach_Tumor_Mean_Dia.append( (MinAx + MaxAx) / 2 )\n\t\t\telse:\n\t\t\t\tEach_Tumor_Mean_Dia.append(np.sqrt( Each_Tumor_Area[-1] / np.pi) )\n\t\t\tM= cv2.moments(eachT)\n\t\t\tif M["m00"] != 0:\n\t\t\t\tcx= int(M[\'m10\']/M[\'m00\'])\n\t\t\t\tcy= int(M[\'m01\']/M[\'m00\'])\n\t\t\telse:\n\t\t\t\tcx = 0\n\t\t\t\tcy = 0\n\t\t\tImBin = cv2.putText(ImBin, text = str(nIt), org=(cx, cy), fontFace= cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,211,25), thickness=4, lineType=cv2.LINE_AA)\n\t\t\n\t\tNb_Tumor = len(Each_Tumor_Area)\n\t\timport csv\n\t\twith open(filename, \'w\', newline=\'\') as csvfile:\n\t\t\tcsvwriter = csv.writer(csvfile)\n\t\t\tfields = [\'imageName\', \'Percent_Tumor\', \'Avg_Tumor_Prob\', \'Nb_tumors\', \'Nb_tumors_1000px_Dia_or_more\', \'Nb_tumors_5000px_Dia_or_more\', \'Tumor_areas\', \'Tumor_avg_diam\'] \n\t\t\tcsvwriter.writerow(fields)\n\t\t\trows = [[cTileRootName, str(round(c1/(c1+c3)*100,2)), str(round(Avg_Prob_Class1*100, 2)), str(Nb_Tumor), str((np.asarray(Each_Tumor_Mean_Dia) > 1000).sum()), str((np.asarray(Each_Tumor_Mean_Dia) > 5000).sum()), str(Each_Tumor_Area), str(Each_Tumor_Mean_Dia)]]\n\t\t\tcsvwriter.writerows(rows)\t\n\t\t'
import csv
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
fields = ['imageName', 'Intraparaenchymal', 'Leptomeningeal', 'Non tumor']
csvwriter.writerow(fields)
rows = [[slide, str(round(cl1, 1)), str(round(cl2, 1)), str(round(cl3, 1))]]
csvwriter.writerows(rows)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_segmented.jpg')
imsave(filename, ImBinf * 255.0)
filename_tmp = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_' + 'unknown' + '.jpg')
print(filename_tmp)
if os.path.exists(filename_tmp):
os.remove(filename_tmp)
skip = False
skip = skip
</DeepExtract>
if skip:
print('slide done --')
continue
cc = 0
for tile in stats_dict[slide]['tiles'].keys():
cc += 1
extensions = ['.jpeg', '.jpg']
isError = True
dir_name = 'unknownTMP'
for extension in extensions:
for sub_dir in list(sub_dirs):
try:
test_filename = os.path.join(sub_dir, tile + extension)
im2 = imread(test_filename)
dir_name_old = dir_name
dir_name = os.path.basename(sub_dir)
isError = False
except:
isError = True
if isError == False:
break
if isError == False:
break
if isError == True:
continue
cTileRootName = slide
ixTile = int(stats_dict[slide]['tiles'][tile][0])
iyTile = int(stats_dict[slide]['tiles'][tile][1])
rTile = im2.shape[1]
cTile = im2.shape[0]
xTile = ixTile * (FLAGS.tiles_size - FLAGS.tiles_overlap)
yTile = iyTile * (FLAGS.tiles_size - FLAGS.tiles_overlap)
req_xLength = xTile + rTile
req_yLength = yTile + cTile
if FLAGS.resample_factor > 0:
rTile = int(rTile / FLAGS.resample_factor)
cTile = int(cTile / FLAGS.resample_factor)
if rTile <= 0:
im2s = im2
elif cTile <= 0:
im2s = im2
else:
im2s = np.array(Image.fromarray(im2).resize((cTile, rTile)))
rTile = im2s.shape[1]
cTile = im2s.shape[0]
xTile = int(xTile / FLAGS.resample_factor)
yTile = int(yTile / FLAGS.resample_factor)
req_xLength = xTile + rTile
req_yLength = yTile + cTile
else:
im2s = im2
<DeepExtract>
lineProb = [float(x) for x in stats_dict[slide]['tiles'][tile][2]]
if FLAGS.Cmap == 'CancerType':
NumberOfClasses = len(lineProb)
class_all = []
sum_class = 0
for nC in range(1, NumberOfClasses):
class_all.append(float(lineProb[nC]))
sum_class = sum_class + float(lineProb[nC])
for nC in range(NumberOfClasses - 1):
class_all[nC] = class_all[nC] / sum_class
current_score = max(class_all)
oClass = class_all.index(max(class_all)) + 1
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
if len(thresholds) != len(class_all):
print('Error: There must be one threshold per class:')
probDiff = []
for nC in range(len(class_all)):
probDiff.append(class_all[nC] - thresholds[nC])
oClass = probDiff.index(max(probDiff)) + 1
current_score = class_all[oClass - 1]
score_correction = thresholds[oClass - 1]
else:
score_correction = 1.0 / len(class_all)
if oClass == 1:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('black')])
elif oClass == 2:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('red')])
elif oClass == 3:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('orange')])
elif oClass == 4:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('blue')])
elif oClass == 5:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('mediumorchid')])
else:
cmap = plt.get_cmap('Greens')
(oClass, cmap, current_score, class_prob) = (oClass, cmap, (current_score - score_correction) / (1.0 - score_correction), [class_all[0], class_all[1], class_all[2], class_all[3], class_all[4]])
</DeepExtract>
if current_score < 0:
print('No probability found')
else:
WholeSlide_0[xTile:req_xLength, yTile:req_yLength, :] = np.swapaxes(im2s, 0, 1)
heattile = np.ones([req_xLength - xTile, req_yLength - yTile]) * current_score
heattile = cmap(heattile)
heattile = heattile[:, :, 0:3]
HeatMap_0[xTile:req_xLength, yTile:req_yLength, :] = HeatMap_0[xTile:req_xLength, yTile:req_yLength, :] + heattile
HeatMap_divider[xTile:req_xLength, yTile:req_yLength, :] = HeatMap_divider[xTile:req_xLength, yTile:req_yLength, :] + 1
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 0] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 0] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[0]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 1] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 1] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[1]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 2] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 2] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[2]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 3] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 3] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[3]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 4] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 4] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[4]
if cc % 1000 == 0:
print('tile time (sec): ' + str((time.time() - t) / cc))
NewSlide = False
<DeepExtract>
HeatMap_divider = HeatMap_divider * 1.0 + 0.0
HeatMap_0 = HeatMap_0
HeatMap_divider[HeatMap_divider == 0] = 1.0
HeatMap_0 = np.divide(HeatMap_0, HeatMap_divider[:, :, 0:3])
alpha = 0.33
out = HeatMap_0 * 255 * (1.0 - alpha) + WholeSlide_0 * alpha
out = out.transpose((1, 0, 2))
heatmap_path = os.path.join(FLAGS.output_dir, 'heatmaps')
if os.path.isdir(heatmap_path):
pass
else:
os.makedirs(heatmap_path)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.jpg')
if NewSlide:
if os.path.isfile(filename):
print(filename + ' has already been processed in the past. skipped.')
skip = True
skip = skip
else:
print(filename + ' is processed for the first times.')
out[out == [0, 0, 0]] = 255
imsave(filename, out)
if NewSlide == False:
HeatMap_bin = np.divide(HeatMap_bin, HeatMap_divider)
ImBin = HeatMap_bin * 0.0
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
ImBinT = ImBin
ImBinT[:, :, 0] = (HeatMap_bin[:, :, 0] - thresholds[0]) / (1 - thresholds[0])
ImBinT[:, :, 1] = (HeatMap_bin[:, :, 1] - thresholds[1]) / (1 - thresholds[1])
ImBinT[:, :, 2] = (HeatMap_bin[:, :, 2] - thresholds[2]) / (1 - thresholds[2])
ImBinT[:, :, 3] = (HeatMap_bin[:, :, 3] - thresholds[3]) / (1 - thresholds[3])
ImBinT[:, :, 4] = (HeatMap_bin[:, :, 4] - thresholds[4]) / (1 - thresholds[4])
Tmax = np.max(ImBinT, 2)
ImBin[:, :, 0] = ImBinT[:, :, 0] == Tmax
ImBin[:, :, 1] = ImBinT[:, :, 1] == Tmax
ImBin[:, :, 2] = ImBinT[:, :, 2] == Tmax
ImBin[:, :, 3] = ImBinT[:, :, 3] == Tmax
ImBin[:, :, 4] = ImBinT[:, :, 4] == Tmax
else:
Tmax = np.max(HeatMap_bin, 2)
ImBin[:, :, 0] = HeatMap_bin[:, :, 0] == Tmax
ImBin[:, :, 1] = HeatMap_bin[:, :, 1] == Tmax
ImBin[:, :, 2] = HeatMap_bin[:, :, 2] == Tmax
ImBin[:, :, 3] = HeatMap_bin[:, :, 3] == Tmax
ImBin[:, :, 4] = HeatMap_bin[:, :, 4] == Tmax
class_rgb = {}
class_rgb[0] = [0, 0, 0]
class_rgb[1] = [1.0, 0, 0]
class_rgb[2] = [1.0, 165.0 / 255.0, 0]
class_rgb[3] = [0, 0, 1.0]
class_rgb[4] = [186.0 / 255.0, 85.0 / 255.0, 211.0 / 255.0]
cl1 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 1])
cl2 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 2])
cl3 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 3])
ImBinf = np.zeros([ImBin.shape[0], ImBin.shape[1], 3])
for rgb in [0, 1, 2]:
ImBinf[:, :, rgb] = ImBin[:, :, 0] * class_rgb[0][rgb] + ImBin[:, :, 1] * class_rgb[1][rgb] + ImBin[:, :, 2] * class_rgb[2][rgb] + ImBin[:, :, 3] * class_rgb[3][rgb] + ImBin[:, :, 4] * class_rgb[4][rgb]
ImBinf[HeatMap_divider[:, :, 0:3] == 0] = 1
ImBinf = ImBinf.transpose((1, 0, 2))
ImBinf = ImBinf * 255.0
print('*************')
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.csv')
'\n\t\tt, b_tmp= cv2.threshold(np.array(np.uint8(ImBin[:,:,2])),120,255,cv2.THRESH_BINARY_INV)\n\t\tcontours,h = cv2.findContours(b_tmp,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\t\tEach_Tumor_Area = []\n\t\tEach_Tumor_Mean_Dia = []\n\t\tnIt = 0\n\t\tImBin = np.ascontiguousarray(ImBin)\n\t\tfor eachT in contours:\n\t\t\tnIt += 1\n\t\t\t# Each_Tumor_Area.append( (cv2.contourArea(eachT)+len(eachT)) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT>2):\n\t\t\t\tEach_Tumor_Area.append(cv2.contourArea(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\telse:\n\t\t\t\tEach_Tumor_Area.append( len(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT) >= 5:\n\t\t\t\tellipse = cv2.fitEllipse(eachT)\n\t\t\t\tMinAx = min(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tMaxAx = max(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tEach_Tumor_Mean_Dia.append( (MinAx + MaxAx) / 2 )\n\t\t\telse:\n\t\t\t\tEach_Tumor_Mean_Dia.append(np.sqrt( Each_Tumor_Area[-1] / np.pi) )\n\t\t\tM= cv2.moments(eachT)\n\t\t\tif M["m00"] != 0:\n\t\t\t\tcx= int(M[\'m10\']/M[\'m00\'])\n\t\t\t\tcy= int(M[\'m01\']/M[\'m00\'])\n\t\t\telse:\n\t\t\t\tcx = 0\n\t\t\t\tcy = 0\n\t\t\tImBin = cv2.putText(ImBin, text = str(nIt), org=(cx, cy), fontFace= cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,211,25), thickness=4, lineType=cv2.LINE_AA)\n\t\t\n\t\tNb_Tumor = len(Each_Tumor_Area)\n\t\timport csv\n\t\twith open(filename, \'w\', newline=\'\') as csvfile:\n\t\t\tcsvwriter = csv.writer(csvfile)\n\t\t\tfields = [\'imageName\', \'Percent_Tumor\', \'Avg_Tumor_Prob\', \'Nb_tumors\', \'Nb_tumors_1000px_Dia_or_more\', \'Nb_tumors_5000px_Dia_or_more\', \'Tumor_areas\', \'Tumor_avg_diam\'] \n\t\t\tcsvwriter.writerow(fields)\n\t\t\trows = [[cTileRootName, str(round(c1/(c1+c3)*100,2)), str(round(Avg_Prob_Class1*100, 2)), str(Nb_Tumor), str((np.asarray(Each_Tumor_Mean_Dia) > 1000).sum()), str((np.asarray(Each_Tumor_Mean_Dia) > 5000).sum()), str(Each_Tumor_Area), str(Each_Tumor_Mean_Dia)]]\n\t\t\tcsvwriter.writerows(rows)\t\n\t\t'
import csv
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
fields = ['imageName', 'Intraparaenchymal', 'Leptomeningeal', 'Non tumor']
csvwriter.writerow(fields)
rows = [[slide, str(round(cl1, 1)), str(round(cl2, 1)), str(round(cl3, 1))]]
csvwriter.writerows(rows)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_segmented.jpg')
imsave(filename, ImBinf * 255.0)
filename_tmp = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_' + 'unknown' + '.jpg')
print(filename_tmp)
if os.path.exists(filename_tmp):
os.remove(filename_tmp)
skip = False
skip = skip
</DeepExtract>
print('slide time (min): ' + str((time.time() - t) / 60))
|
def main():
image_dir = FLAGS.image_file
if os.path.isdir(FLAGS.output_dir):
if len(os.listdir(FLAGS.output_dir)) > 0:
print('WARNING: output folder is not empty')
else:
sys.exit('output path not defined or does not exist')
stats_dict = {}
with open(FLAGS.tiles_stats) as f:
for line in f:
line2 = line.replace('[', '').replace(']', '').split()
if len(line2) > 0:
tilename = '.'.join(line2[0].split('.')[:-1])
cTileRootName = '_'.join(os.path.basename(tilename).split('_')[0:-2])
if cTileRootName not in stats_dict.keys():
stats_dict[cTileRootName] = {}
stats_dict[cTileRootName]['tiles'] = {}
stats_dict[cTileRootName]['xMax'] = 0
stats_dict[cTileRootName]['yMax'] = 0
ixTile = int(os.path.basename(tilename).split('_')[-2])
iyTile = int(os.path.basename(tilename).split('_')[-1].split('.')[0])
stats_dict[cTileRootName]['xMax'] = max(stats_dict[cTileRootName]['xMax'], ixTile)
stats_dict[cTileRootName]['yMax'] = max(stats_dict[cTileRootName]['yMax'], iyTile)
lineProb = line.split('[')[1]
lineProb = lineProb.split(']')[0]
lineProb = lineProb.split()
stats_dict[cTileRootName]['tiles'][tilename] = [str(ixTile), str(iyTile), lineProb]
stats_dict = stats_dict
sub_dirs = []
for item in os.listdir(image_dir):
if os.path.isdir(os.path.join(image_dir, item)):
sub_dirs.append(os.path.join(image_dir, item))
print('sub_dirs:')
print(sub_dirs)
SlideRootName = ''
SlideNames = []
skip = False
filtered_dict = {}
for k in stats_dict.keys():
if FLAGS.slide_filter in k:
filtered_dict[k] = stats_dict[k]
dir_name = 'unknown'
for slide in sorted(filtered_dict.keys()):
NewSlide = True
t = time.time()
ixTile = int(stats_dict[slide]['xMax'])
iyTile = int(stats_dict[slide]['yMax'])
req_xLength = ixTile * (FLAGS.tiles_size - FLAGS.tiles_overlap) + FLAGS.tiles_size
req_yLength = iyTile * (FLAGS.tiles_size - FLAGS.tiles_overlap) + FLAGS.tiles_size
if FLAGS.resample_factor > 0:
req_xLength = int(req_xLength / FLAGS.resample_factor + 1)
req_yLength = int(req_yLength / FLAGS.resample_factor + 1)
WholeSlide_0 = np.zeros([req_xLength, req_yLength, 3])
HeatMap_0 = np.zeros([req_xLength, req_yLength, 3])
HeatMap_bin = np.zeros([req_xLength, req_yLength, 5])
HeatMap_divider = np.zeros([req_xLength, req_yLength, 5])
print('Checking slide ' + slide)
print(req_xLength, req_yLength)
HeatMap_divider = HeatMap_divider * 1.0 + 0.0
HeatMap_0 = HeatMap_0
HeatMap_divider[HeatMap_divider == 0] = 1.0
HeatMap_0 = np.divide(HeatMap_0, HeatMap_divider[:, :, 0:3])
alpha = 0.33
out = HeatMap_0 * 255 * (1.0 - alpha) + WholeSlide_0 * alpha
out = out.transpose((1, 0, 2))
heatmap_path = os.path.join(FLAGS.output_dir, 'heatmaps')
if os.path.isdir(heatmap_path):
pass
else:
os.makedirs(heatmap_path)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.jpg')
if NewSlide:
if os.path.isfile(filename):
print(filename + ' has already been processed in the past. skipped.')
skip = True
skip = skip
else:
print(filename + ' is processed for the first times.')
out[out == [0, 0, 0]] = 255
imsave(filename, out)
if NewSlide == False:
HeatMap_bin = np.divide(HeatMap_bin, HeatMap_divider)
ImBin = HeatMap_bin * 0.0
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
ImBinT = ImBin
ImBinT[:, :, 0] = (HeatMap_bin[:, :, 0] - thresholds[0]) / (1 - thresholds[0])
ImBinT[:, :, 1] = (HeatMap_bin[:, :, 1] - thresholds[1]) / (1 - thresholds[1])
ImBinT[:, :, 2] = (HeatMap_bin[:, :, 2] - thresholds[2]) / (1 - thresholds[2])
ImBinT[:, :, 3] = (HeatMap_bin[:, :, 3] - thresholds[3]) / (1 - thresholds[3])
ImBinT[:, :, 4] = (HeatMap_bin[:, :, 4] - thresholds[4]) / (1 - thresholds[4])
Tmax = np.max(ImBinT, 2)
ImBin[:, :, 0] = ImBinT[:, :, 0] == Tmax
ImBin[:, :, 1] = ImBinT[:, :, 1] == Tmax
ImBin[:, :, 2] = ImBinT[:, :, 2] == Tmax
ImBin[:, :, 3] = ImBinT[:, :, 3] == Tmax
ImBin[:, :, 4] = ImBinT[:, :, 4] == Tmax
else:
Tmax = np.max(HeatMap_bin, 2)
ImBin[:, :, 0] = HeatMap_bin[:, :, 0] == Tmax
ImBin[:, :, 1] = HeatMap_bin[:, :, 1] == Tmax
ImBin[:, :, 2] = HeatMap_bin[:, :, 2] == Tmax
ImBin[:, :, 3] = HeatMap_bin[:, :, 3] == Tmax
ImBin[:, :, 4] = HeatMap_bin[:, :, 4] == Tmax
class_rgb = {}
class_rgb[0] = [0, 0, 0]
class_rgb[1] = [1.0, 0, 0]
class_rgb[2] = [1.0, 165.0 / 255.0, 0]
class_rgb[3] = [0, 0, 1.0]
class_rgb[4] = [186.0 / 255.0, 85.0 / 255.0, 211.0 / 255.0]
cl1 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 1])
cl2 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 2])
cl3 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 3])
ImBinf = np.zeros([ImBin.shape[0], ImBin.shape[1], 3])
for rgb in [0, 1, 2]:
ImBinf[:, :, rgb] = ImBin[:, :, 0] * class_rgb[0][rgb] + ImBin[:, :, 1] * class_rgb[1][rgb] + ImBin[:, :, 2] * class_rgb[2][rgb] + ImBin[:, :, 3] * class_rgb[3][rgb] + ImBin[:, :, 4] * class_rgb[4][rgb]
ImBinf[HeatMap_divider[:, :, 0:3] == 0] = 1
ImBinf = ImBinf.transpose((1, 0, 2))
ImBinf = ImBinf * 255.0
print('*************')
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.csv')
'\n\t\tt, b_tmp= cv2.threshold(np.array(np.uint8(ImBin[:,:,2])),120,255,cv2.THRESH_BINARY_INV)\n\t\tcontours,h = cv2.findContours(b_tmp,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\t\tEach_Tumor_Area = []\n\t\tEach_Tumor_Mean_Dia = []\n\t\tnIt = 0\n\t\tImBin = np.ascontiguousarray(ImBin)\n\t\tfor eachT in contours:\n\t\t\tnIt += 1\n\t\t\t# Each_Tumor_Area.append( (cv2.contourArea(eachT)+len(eachT)) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT>2):\n\t\t\t\tEach_Tumor_Area.append(cv2.contourArea(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\telse:\n\t\t\t\tEach_Tumor_Area.append( len(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT) >= 5:\n\t\t\t\tellipse = cv2.fitEllipse(eachT)\n\t\t\t\tMinAx = min(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tMaxAx = max(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tEach_Tumor_Mean_Dia.append( (MinAx + MaxAx) / 2 )\n\t\t\telse:\n\t\t\t\tEach_Tumor_Mean_Dia.append(np.sqrt( Each_Tumor_Area[-1] / np.pi) )\n\t\t\tM= cv2.moments(eachT)\n\t\t\tif M["m00"] != 0:\n\t\t\t\tcx= int(M[\'m10\']/M[\'m00\'])\n\t\t\t\tcy= int(M[\'m01\']/M[\'m00\'])\n\t\t\telse:\n\t\t\t\tcx = 0\n\t\t\t\tcy = 0\n\t\t\tImBin = cv2.putText(ImBin, text = str(nIt), org=(cx, cy), fontFace= cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,211,25), thickness=4, lineType=cv2.LINE_AA)\n\t\t\n\t\tNb_Tumor = len(Each_Tumor_Area)\n\t\timport csv\n\t\twith open(filename, \'w\', newline=\'\') as csvfile:\n\t\t\tcsvwriter = csv.writer(csvfile)\n\t\t\tfields = [\'imageName\', \'Percent_Tumor\', \'Avg_Tumor_Prob\', \'Nb_tumors\', \'Nb_tumors_1000px_Dia_or_more\', \'Nb_tumors_5000px_Dia_or_more\', \'Tumor_areas\', \'Tumor_avg_diam\'] \n\t\t\tcsvwriter.writerow(fields)\n\t\t\trows = [[cTileRootName, str(round(c1/(c1+c3)*100,2)), str(round(Avg_Prob_Class1*100, 2)), str(Nb_Tumor), str((np.asarray(Each_Tumor_Mean_Dia) > 1000).sum()), str((np.asarray(Each_Tumor_Mean_Dia) > 5000).sum()), str(Each_Tumor_Area), str(Each_Tumor_Mean_Dia)]]\n\t\t\tcsvwriter.writerows(rows)\t\n\t\t'
import csv
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
fields = ['imageName', 'Intraparaenchymal', 'Leptomeningeal', 'Non tumor']
csvwriter.writerow(fields)
rows = [[slide, str(round(cl1, 1)), str(round(cl2, 1)), str(round(cl3, 1))]]
csvwriter.writerows(rows)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_segmented.jpg')
imsave(filename, ImBinf * 255.0)
filename_tmp = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_' + 'unknown' + '.jpg')
print(filename_tmp)
if os.path.exists(filename_tmp):
os.remove(filename_tmp)
skip = False
skip = skip
if skip:
print('slide done --')
continue
cc = 0
for tile in stats_dict[slide]['tiles'].keys():
cc += 1
extensions = ['.jpeg', '.jpg']
isError = True
dir_name = 'unknownTMP'
for extension in extensions:
for sub_dir in list(sub_dirs):
try:
test_filename = os.path.join(sub_dir, tile + extension)
im2 = imread(test_filename)
dir_name_old = dir_name
dir_name = os.path.basename(sub_dir)
isError = False
except:
isError = True
if isError == False:
break
if isError == False:
break
if isError == True:
continue
cTileRootName = slide
ixTile = int(stats_dict[slide]['tiles'][tile][0])
iyTile = int(stats_dict[slide]['tiles'][tile][1])
rTile = im2.shape[1]
cTile = im2.shape[0]
xTile = ixTile * (FLAGS.tiles_size - FLAGS.tiles_overlap)
yTile = iyTile * (FLAGS.tiles_size - FLAGS.tiles_overlap)
req_xLength = xTile + rTile
req_yLength = yTile + cTile
if FLAGS.resample_factor > 0:
rTile = int(rTile / FLAGS.resample_factor)
cTile = int(cTile / FLAGS.resample_factor)
if rTile <= 0:
im2s = im2
elif cTile <= 0:
im2s = im2
else:
im2s = np.array(Image.fromarray(im2).resize((cTile, rTile)))
rTile = im2s.shape[1]
cTile = im2s.shape[0]
xTile = int(xTile / FLAGS.resample_factor)
yTile = int(yTile / FLAGS.resample_factor)
req_xLength = xTile + rTile
req_yLength = yTile + cTile
else:
im2s = im2
lineProb = [float(x) for x in stats_dict[slide]['tiles'][tile][2]]
if FLAGS.Cmap == 'CancerType':
NumberOfClasses = len(lineProb)
class_all = []
sum_class = 0
for nC in range(1, NumberOfClasses):
class_all.append(float(lineProb[nC]))
sum_class = sum_class + float(lineProb[nC])
for nC in range(NumberOfClasses - 1):
class_all[nC] = class_all[nC] / sum_class
current_score = max(class_all)
oClass = class_all.index(max(class_all)) + 1
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
if len(thresholds) != len(class_all):
print('Error: There must be one threshold per class:')
probDiff = []
for nC in range(len(class_all)):
probDiff.append(class_all[nC] - thresholds[nC])
oClass = probDiff.index(max(probDiff)) + 1
current_score = class_all[oClass - 1]
score_correction = thresholds[oClass - 1]
else:
score_correction = 1.0 / len(class_all)
if oClass == 1:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('black')])
elif oClass == 2:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('red')])
elif oClass == 3:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('orange')])
elif oClass == 4:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('blue')])
elif oClass == 5:
c = mcolors.ColorConverter().to_rgb
cmap = make_colormap([c('white'), c('mediumorchid')])
else:
cmap = plt.get_cmap('Greens')
(oClass, cmap, current_score, class_prob) = (oClass, cmap, (current_score - score_correction) / (1.0 - score_correction), [class_all[0], class_all[1], class_all[2], class_all[3], class_all[4]])
if current_score < 0:
print('No probability found')
else:
WholeSlide_0[xTile:req_xLength, yTile:req_yLength, :] = np.swapaxes(im2s, 0, 1)
heattile = np.ones([req_xLength - xTile, req_yLength - yTile]) * current_score
heattile = cmap(heattile)
heattile = heattile[:, :, 0:3]
HeatMap_0[xTile:req_xLength, yTile:req_yLength, :] = HeatMap_0[xTile:req_xLength, yTile:req_yLength, :] + heattile
HeatMap_divider[xTile:req_xLength, yTile:req_yLength, :] = HeatMap_divider[xTile:req_xLength, yTile:req_yLength, :] + 1
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 0] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 0] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[0]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 1] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 1] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[1]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 2] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 2] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[2]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 3] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 3] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[3]
HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 4] = HeatMap_bin[xTile:req_xLength, yTile:req_yLength, 4] + np.ones([req_xLength - xTile, req_yLength - yTile]) * class_prob[4]
if cc % 1000 == 0:
print('tile time (sec): ' + str((time.time() - t) / cc))
NewSlide = False
HeatMap_divider = HeatMap_divider * 1.0 + 0.0
HeatMap_0 = HeatMap_0
HeatMap_divider[HeatMap_divider == 0] = 1.0
HeatMap_0 = np.divide(HeatMap_0, HeatMap_divider[:, :, 0:3])
alpha = 0.33
out = HeatMap_0 * 255 * (1.0 - alpha) + WholeSlide_0 * alpha
out = out.transpose((1, 0, 2))
heatmap_path = os.path.join(FLAGS.output_dir, 'heatmaps')
if os.path.isdir(heatmap_path):
pass
else:
os.makedirs(heatmap_path)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.jpg')
if NewSlide:
if os.path.isfile(filename):
print(filename + ' has already been processed in the past. skipped.')
skip = True
skip = skip
else:
print(filename + ' is processed for the first times.')
out[out == [0, 0, 0]] = 255
imsave(filename, out)
if NewSlide == False:
HeatMap_bin = np.divide(HeatMap_bin, HeatMap_divider)
ImBin = HeatMap_bin * 0.0
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
ImBinT = ImBin
ImBinT[:, :, 0] = (HeatMap_bin[:, :, 0] - thresholds[0]) / (1 - thresholds[0])
ImBinT[:, :, 1] = (HeatMap_bin[:, :, 1] - thresholds[1]) / (1 - thresholds[1])
ImBinT[:, :, 2] = (HeatMap_bin[:, :, 2] - thresholds[2]) / (1 - thresholds[2])
ImBinT[:, :, 3] = (HeatMap_bin[:, :, 3] - thresholds[3]) / (1 - thresholds[3])
ImBinT[:, :, 4] = (HeatMap_bin[:, :, 4] - thresholds[4]) / (1 - thresholds[4])
Tmax = np.max(ImBinT, 2)
ImBin[:, :, 0] = ImBinT[:, :, 0] == Tmax
ImBin[:, :, 1] = ImBinT[:, :, 1] == Tmax
ImBin[:, :, 2] = ImBinT[:, :, 2] == Tmax
ImBin[:, :, 3] = ImBinT[:, :, 3] == Tmax
ImBin[:, :, 4] = ImBinT[:, :, 4] == Tmax
else:
Tmax = np.max(HeatMap_bin, 2)
ImBin[:, :, 0] = HeatMap_bin[:, :, 0] == Tmax
ImBin[:, :, 1] = HeatMap_bin[:, :, 1] == Tmax
ImBin[:, :, 2] = HeatMap_bin[:, :, 2] == Tmax
ImBin[:, :, 3] = HeatMap_bin[:, :, 3] == Tmax
ImBin[:, :, 4] = HeatMap_bin[:, :, 4] == Tmax
class_rgb = {}
class_rgb[0] = [0, 0, 0]
class_rgb[1] = [1.0, 0, 0]
class_rgb[2] = [1.0, 165.0 / 255.0, 0]
class_rgb[3] = [0, 0, 1.0]
class_rgb[4] = [186.0 / 255.0, 85.0 / 255.0, 211.0 / 255.0]
cl1 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 1])
cl2 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 2])
cl3 = sum(ImBin[HeatMap_divider[:, :, 1] * 1.0 + 0.0 > 0, 3])
ImBinf = np.zeros([ImBin.shape[0], ImBin.shape[1], 3])
for rgb in [0, 1, 2]:
ImBinf[:, :, rgb] = ImBin[:, :, 0] * class_rgb[0][rgb] + ImBin[:, :, 1] * class_rgb[1][rgb] + ImBin[:, :, 2] * class_rgb[2][rgb] + ImBin[:, :, 3] * class_rgb[3][rgb] + ImBin[:, :, 4] * class_rgb[4][rgb]
ImBinf[HeatMap_divider[:, :, 0:3] == 0] = 1
ImBinf = ImBinf.transpose((1, 0, 2))
ImBinf = ImBinf * 255.0
print('*************')
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '.csv')
'\n\t\tt, b_tmp= cv2.threshold(np.array(np.uint8(ImBin[:,:,2])),120,255,cv2.THRESH_BINARY_INV)\n\t\tcontours,h = cv2.findContours(b_tmp,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\t\tEach_Tumor_Area = []\n\t\tEach_Tumor_Mean_Dia = []\n\t\tnIt = 0\n\t\tImBin = np.ascontiguousarray(ImBin)\n\t\tfor eachT in contours:\n\t\t\tnIt += 1\n\t\t\t# Each_Tumor_Area.append( (cv2.contourArea(eachT)+len(eachT)) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT>2):\n\t\t\t\tEach_Tumor_Area.append(cv2.contourArea(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\telse:\n\t\t\t\tEach_Tumor_Area.append( len(eachT) * FLAGS.resample_factor * FLAGS.resample_factor)\n\t\t\tif len(eachT) >= 5:\n\t\t\t\tellipse = cv2.fitEllipse(eachT)\n\t\t\t\tMinAx = min(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tMaxAx = max(ellipse[1]) * FLAGS.resample_factor\n\t\t\t\tEach_Tumor_Mean_Dia.append( (MinAx + MaxAx) / 2 )\n\t\t\telse:\n\t\t\t\tEach_Tumor_Mean_Dia.append(np.sqrt( Each_Tumor_Area[-1] / np.pi) )\n\t\t\tM= cv2.moments(eachT)\n\t\t\tif M["m00"] != 0:\n\t\t\t\tcx= int(M[\'m10\']/M[\'m00\'])\n\t\t\t\tcy= int(M[\'m01\']/M[\'m00\'])\n\t\t\telse:\n\t\t\t\tcx = 0\n\t\t\t\tcy = 0\n\t\t\tImBin = cv2.putText(ImBin, text = str(nIt), org=(cx, cy), fontFace= cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(255,211,25), thickness=4, lineType=cv2.LINE_AA)\n\t\t\n\t\tNb_Tumor = len(Each_Tumor_Area)\n\t\timport csv\n\t\twith open(filename, \'w\', newline=\'\') as csvfile:\n\t\t\tcsvwriter = csv.writer(csvfile)\n\t\t\tfields = [\'imageName\', \'Percent_Tumor\', \'Avg_Tumor_Prob\', \'Nb_tumors\', \'Nb_tumors_1000px_Dia_or_more\', \'Nb_tumors_5000px_Dia_or_more\', \'Tumor_areas\', \'Tumor_avg_diam\'] \n\t\t\tcsvwriter.writerow(fields)\n\t\t\trows = [[cTileRootName, str(round(c1/(c1+c3)*100,2)), str(round(Avg_Prob_Class1*100, 2)), str(Nb_Tumor), str((np.asarray(Each_Tumor_Mean_Dia) > 1000).sum()), str((np.asarray(Each_Tumor_Mean_Dia) > 5000).sum()), str(Each_Tumor_Area), str(Each_Tumor_Mean_Dia)]]\n\t\t\tcsvwriter.writerows(rows)\t\n\t\t'
import csv
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
fields = ['imageName', 'Intraparaenchymal', 'Leptomeningeal', 'Non tumor']
csvwriter.writerow(fields)
rows = [[slide, str(round(cl1, 1)), str(round(cl2, 1)), str(round(cl3, 1))]]
csvwriter.writerows(rows)
filename = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_segmented.jpg')
imsave(filename, ImBinf * 255.0)
filename_tmp = os.path.join(heatmap_path, 'heatmap_' + FLAGS.Cmap + '_' + slide + '_' + 'unknown' + '.jpg')
print(filename_tmp)
if os.path.exists(filename_tmp):
os.remove(filename_tmp)
skip = False
skip = skip
print('slide time (min): ' + str((time.time() - t) / 60))
|
DeepPATH
|
positive
|
def dict_literal_handle(self, tokens):
"""Handle {**d1, **d2}."""
if not tokens:
return '{}'
<DeepExtract>
groups = [[]]
has_star = False
has_comma = False
for tok_grp in tokens:
if tok_grp == ',':
has_comma = True
elif len(tok_grp) == 1:
groups[-1].append(tok_grp[0])
elif len(tok_grp) == 2:
internal_assert(not tok_grp[0].lstrip('*'), 'invalid star expr item signifier', tok_grp[0])
has_star = True
groups.append(tok_grp[1])
groups.append([])
else:
raise CoconutInternalException('invalid testlist_star_expr tokens', tokens)
if not groups[-1]:
groups.pop()
(groups, has_star, _) = (groups, has_star, has_comma)
</DeepExtract>
if not has_star:
<DeepExtract>
if not len(groups) == 1 or callable(len(groups) == 1):
internal_assert(len(groups) == 1, msg, item, exc_maker=partial(self.make_internal_syntax_err, 'dict_literal group splitting failed on', tokens))
</DeepExtract>
return '{' + ', '.join(groups[0]) + '}'
elif self.target_info >= (3, 5):
to_literal = []
for g in groups:
if isinstance(g, list):
to_literal.extend(g)
else:
to_literal.append('**' + g)
return '{' + ', '.join(to_literal) + '}'
else:
to_merge = []
for g in groups:
if isinstance(g, list):
to_merge.append('{' + ', '.join(g) + '}')
else:
to_merge.append(g)
return '_coconut_dict_merge(' + ', '.join(to_merge) + ')'
|
def dict_literal_handle(self, tokens):
"""Handle {**d1, **d2}."""
if not tokens:
return '{}'
groups = [[]]
has_star = False
has_comma = False
for tok_grp in tokens:
if tok_grp == ',':
has_comma = True
elif len(tok_grp) == 1:
groups[-1].append(tok_grp[0])
elif len(tok_grp) == 2:
internal_assert(not tok_grp[0].lstrip('*'), 'invalid star expr item signifier', tok_grp[0])
has_star = True
groups.append(tok_grp[1])
groups.append([])
else:
raise CoconutInternalException('invalid testlist_star_expr tokens', tokens)
if not groups[-1]:
groups.pop()
(groups, has_star, _) = (groups, has_star, has_comma)
if not has_star:
if not len(groups) == 1 or callable(len(groups) == 1):
internal_assert(len(groups) == 1, msg, item, exc_maker=partial(self.make_internal_syntax_err, 'dict_literal group splitting failed on', tokens))
return '{' + ', '.join(groups[0]) + '}'
elif self.target_info >= (3, 5):
to_literal = []
for g in groups:
if isinstance(g, list):
to_literal.extend(g)
else:
to_literal.append('**' + g)
return '{' + ', '.join(to_literal) + '}'
else:
to_merge = []
for g in groups:
if isinstance(g, list):
to_merge.append('{' + ', '.join(g) + '}')
else:
to_merge.append(g)
return '_coconut_dict_merge(' + ', '.join(to_merge) + ')'
|
coconut
|
positive
|
def train(self, epochs, batch_size, criterion, criterion_nlu, criterion_nlg, save_epochs=10, teacher_forcing_ratio=0.5, tf_decay_rate=0.9, max_norm=0.25):
self.batches = 0
for idx in range(1, epochs + 1):
epoch_nlg_loss = epoch_nlu_loss = epoch_dual_loss = 0
batch_amount = 0
nlu_scorer = IntentPredSlotFillScorer()
nlg_scorer = SequenceScorer()
'\n pbar = tqdm(\n self.train_data_loader,\n total=len(self.train_data_loader),\n dynamic_ncols=True\n )\n '
pbar = tqdm(zip(self.train_nlg_data_loader, self.train_nlu_data_loader), total=len(self.train_nlg_data_loader), dynamic_ncols=True)
for (batch_nlg, batch_nlu) in pbar:
self.batches += 1
<DeepExtract>
if False:
self.nlg.eval()
else:
self.nlg.train()
attrs = (batch_nlg['slot_key'].clone().detach().to(self.device), torch.tensor(batch_nlg['slot_key_lens']).to(self.device), batch_nlg['slot_value'].clone().detach().to(self.device), torch.tensor(batch_nlg['slot_value_lens']).to(self.device), batch_nlg['intent'].clone().detach().to(self.device))
labels = batch_nlg['target'].clone().detach().to(self.device)
refs = batch_nlg['multi_refs']
(logits, outputs, decisions, semantic_embs) = self.nlg(attrs, _BOS, labels, beam_size=beam_size, tf_ratio=teacher_forcing_ratio if not False else 0.0)
(batch_size, _, seq_length, vocab_size) = logits.size()
outputs_indices = decisions[:, 0].detach().cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
if nlg_scorer:
labels_clone = labels.detach().cpu().numpy()
nlg_scorer.update(labels_clone, refs, outputs_indices)
(nlg_logits, nlg_outputs, nlg_targets) = (logits, outputs, labels)
</DeepExtract>
<DeepExtract>
if False:
self.nlu.eval()
else:
self.nlu.train()
inputs = batch_nlu['inputs'].to(self.device)
targets = batch_nlu['labels'].clone().detach().to(self.device)
intent_targets = batch_nlu['intent'].clone()
(slot_logits, outputs, decisions, intent_logits) = self.nlu(inputs, _BOS, labels=targets, beam_size=beam_size, tf_ratio=teacher_forcing_ratio if not False else 0.0)
outputs_indices = decisions[:, 0].detach().cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
slot_prediction = outputs_indices
if self.with_intent:
intent_prediction = torch.argmax(intent_logits.detach().cpu(), dim=1)
intent_prediction = intent_prediction.clone().numpy()
if nlu_scorer:
targets_clone = targets.detach().cpu().long().numpy()
targets_clone = [self.train_data_engine.untokenize_nlu_slot_seq(target) for target in targets_clone]
slot_prediction = [self.train_data_engine.untokenize_nlu_slot_seq(prediction) for prediction in slot_prediction]
nlu_scorer.update(targets_clone, slot_prediction, intent_labels=batch_nlu['intent'].clone().cpu().numpy() if self.with_intent else None, intent_prediction=intent_prediction if self.with_intent else None)
(nlu_logits, slot_prediction, nlu_targets, intent_logits, intent_prediction, intent_targets) = (slot_logits, outputs_indices, targets, intent_logits, intent_prediction, intent_targets)
</DeepExtract>
attrs = (batch_nlg['slot_key'].cuda(), batch_nlg['slot_key_lens'], batch_nlg['slot_value'].cuda(), batch_nlg['slot_value_lens'], batch_nlg['intent'].cuda())
(nlg_loss, nlu_loss, dual_loss) = criterion(nlg_logits.cpu(), nlg_outputs.cpu(), nlg_targets.cpu(), nlu_logits.cpu(), nlu_targets.cpu(), intent_logits.cpu(), intent_targets.cpu(), attrs)
if _has_inf_or_nan(nlg_loss) or _has_inf_or_nan(nlu_loss):
print('Overflow! Skip this batch %d.' % self.batches)
else:
nlg_loss.backward(retain_graph=True)
nlu_loss.backward(retain_graph=False)
clip_grad_norm_(self.nlg_parameters, 1.0)
clip_grad_norm_(self.nlu_parameters, 1.0)
self.nlg_optimizer.step()
self.nlu_optimizer.step()
epoch_nlu_loss += nlu_loss.item()
epoch_nlg_loss += nlg_loss.item()
epoch_dual_loss += dual_loss.item()
batch_amount += 1
self.nlg_optimizer.zero_grad()
self.nlu_optimizer.zero_grad()
pbar.set_postfix(ULoss='{:.4f}'.format(epoch_nlu_loss / batch_amount if batch_amount > 0 else 0.0), GLoss='{:.3f}'.format(epoch_nlg_loss / batch_amount if batch_amount > 0 else 0.0), DLoss='{:.4f}'.format(epoch_dual_loss / batch_amount if batch_amount > 0 else 0.0))
nlg_scorer.print_avg_scores()
nlu_scorer.print_avg_scores()
if idx % save_epochs == 0:
print_time_info('Epoch {}: save model...'.format(idx))
<DeepExtract>
nlu_path = os.path.join(self.model_dir, 'nlu.ckpt')
nlg_path = os.path.join(self.model_dir, 'nlg.ckpt')
torch.save(self.nlu, nlu_path)
torch.save(self.nlg, nlg_path)
print_time_info('Save model successfully')
</DeepExtract>
<DeepExtract>
filename = self.valid_log_path if False else self.train_log_path
nlu_loss = 'None' if nlu_loss is None else '{:.4f}'.format(nlu_loss)
nlg_loss = 'None' if nlg_loss is None else '{:.3f}'.format(nlg_loss)
if nlu_scorer is not None:
(intent_acc, slot_f1_p_r) = nlu_scorer.get_avg_scores()
intent_acc = '{:.4f}'.format(intent_acc)
slot_f1 = '{:.4f}'.format(slot_f1_p_r[0])
else:
slot_f1 = '-1.0'
if nlg_scorer is not None:
(_, bleu, _, rouge, _, _) = nlg_scorer.get_avg_scores()
bleu = '{:.4f}'.format(bleu)
rouge = ' '.join(['{:.4f}'.format(s) for s in rouge])
else:
(bleu, rouge) = ('-1.0', '-1.0 -1.0 -1.0')
with open(filename, 'a') as file:
file.write(f'{idx},{nlu_loss},{nlg_loss},{intent_acc},{slot_f1},{bleu},{rouge}\n')
</DeepExtract>
<DeepExtract>
nlu_loss = nlg_loss = None
nlu_scorer = nlg_scorer = None
batch_amount = 0
if True:
nlu_scorer = IntentPredSlotFillScorer(intent_acc=self.with_intent)
nlu_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlu_data_loader)):
with torch.no_grad():
(batch_loss, batch_logits, slot_prediction, intent_logits, intent_prediction, _, _, _) = self.run_test_nlu_batch(batch, criterion_nlu, scorer=nlu_scorer, testing=True, beam_size=sample_size)
nlu_loss += batch_loss.item()
batch_amount += 1
nlu_loss /= batch_amount
nlu_scorer.print_avg_scores()
batch_amount = 0
if True:
nlg_scorer = SequenceScorer()
nlg_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlg_data_loader)):
with torch.no_grad():
(batch_loss, batch_logits, batch_decode_result, _, _, _, _) = self.run_test_nlg_batch(batch, criterion_nlg, scorer=nlg_scorer, testing=True, teacher_forcing_ratio=0.0, beam_size=sample_size, result_path=os.path.join(os.path.join(self.log_dir, 'validation'), 'test.txt'))
nlg_loss += batch_loss.item()
batch_amount += 1
nlg_loss /= batch_amount
nlg_scorer.print_avg_scores()
self._record_log(epoch=idx, testing=True, nlu_loss=nlu_loss, nlg_loss=nlg_loss, nlu_scorer=nlu_scorer, nlg_scorer=nlg_scorer)
if True:
nlg_scorer.write_avg_scores_to_file(self.test_result_path)
if True:
nlu_scorer.write_avg_scores_to_file(self.test_result_path)
</DeepExtract>
criterion_nlu.epoch_end()
criterion_nlg.epoch_end()
teacher_forcing_ratio *= tf_decay_rate
|
def train(self, epochs, batch_size, criterion, criterion_nlu, criterion_nlg, save_epochs=10, teacher_forcing_ratio=0.5, tf_decay_rate=0.9, max_norm=0.25):
self.batches = 0
for idx in range(1, epochs + 1):
epoch_nlg_loss = epoch_nlu_loss = epoch_dual_loss = 0
batch_amount = 0
nlu_scorer = IntentPredSlotFillScorer()
nlg_scorer = SequenceScorer()
'\n pbar = tqdm(\n self.train_data_loader,\n total=len(self.train_data_loader),\n dynamic_ncols=True\n )\n '
pbar = tqdm(zip(self.train_nlg_data_loader, self.train_nlu_data_loader), total=len(self.train_nlg_data_loader), dynamic_ncols=True)
for (batch_nlg, batch_nlu) in pbar:
self.batches += 1
if False:
self.nlg.eval()
else:
self.nlg.train()
attrs = (batch_nlg['slot_key'].clone().detach().to(self.device), torch.tensor(batch_nlg['slot_key_lens']).to(self.device), batch_nlg['slot_value'].clone().detach().to(self.device), torch.tensor(batch_nlg['slot_value_lens']).to(self.device), batch_nlg['intent'].clone().detach().to(self.device))
labels = batch_nlg['target'].clone().detach().to(self.device)
refs = batch_nlg['multi_refs']
(logits, outputs, decisions, semantic_embs) = self.nlg(attrs, _BOS, labels, beam_size=beam_size, tf_ratio=teacher_forcing_ratio if not False else 0.0)
(batch_size, _, seq_length, vocab_size) = logits.size()
outputs_indices = decisions[:, 0].detach().cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
if nlg_scorer:
labels_clone = labels.detach().cpu().numpy()
nlg_scorer.update(labels_clone, refs, outputs_indices)
(nlg_logits, nlg_outputs, nlg_targets) = (logits, outputs, labels)
if False:
self.nlu.eval()
else:
self.nlu.train()
inputs = batch_nlu['inputs'].to(self.device)
targets = batch_nlu['labels'].clone().detach().to(self.device)
intent_targets = batch_nlu['intent'].clone()
(slot_logits, outputs, decisions, intent_logits) = self.nlu(inputs, _BOS, labels=targets, beam_size=beam_size, tf_ratio=teacher_forcing_ratio if not False else 0.0)
outputs_indices = decisions[:, 0].detach().cpu().clone().numpy()
outputs_indices = np.argmax(outputs_indices, axis=-1)
slot_prediction = outputs_indices
if self.with_intent:
intent_prediction = torch.argmax(intent_logits.detach().cpu(), dim=1)
intent_prediction = intent_prediction.clone().numpy()
if nlu_scorer:
targets_clone = targets.detach().cpu().long().numpy()
targets_clone = [self.train_data_engine.untokenize_nlu_slot_seq(target) for target in targets_clone]
slot_prediction = [self.train_data_engine.untokenize_nlu_slot_seq(prediction) for prediction in slot_prediction]
nlu_scorer.update(targets_clone, slot_prediction, intent_labels=batch_nlu['intent'].clone().cpu().numpy() if self.with_intent else None, intent_prediction=intent_prediction if self.with_intent else None)
(nlu_logits, slot_prediction, nlu_targets, intent_logits, intent_prediction, intent_targets) = (slot_logits, outputs_indices, targets, intent_logits, intent_prediction, intent_targets)
attrs = (batch_nlg['slot_key'].cuda(), batch_nlg['slot_key_lens'], batch_nlg['slot_value'].cuda(), batch_nlg['slot_value_lens'], batch_nlg['intent'].cuda())
(nlg_loss, nlu_loss, dual_loss) = criterion(nlg_logits.cpu(), nlg_outputs.cpu(), nlg_targets.cpu(), nlu_logits.cpu(), nlu_targets.cpu(), intent_logits.cpu(), intent_targets.cpu(), attrs)
if _has_inf_or_nan(nlg_loss) or _has_inf_or_nan(nlu_loss):
print('Overflow! Skip this batch %d.' % self.batches)
else:
nlg_loss.backward(retain_graph=True)
nlu_loss.backward(retain_graph=False)
clip_grad_norm_(self.nlg_parameters, 1.0)
clip_grad_norm_(self.nlu_parameters, 1.0)
self.nlg_optimizer.step()
self.nlu_optimizer.step()
epoch_nlu_loss += nlu_loss.item()
epoch_nlg_loss += nlg_loss.item()
epoch_dual_loss += dual_loss.item()
batch_amount += 1
self.nlg_optimizer.zero_grad()
self.nlu_optimizer.zero_grad()
pbar.set_postfix(ULoss='{:.4f}'.format(epoch_nlu_loss / batch_amount if batch_amount > 0 else 0.0), GLoss='{:.3f}'.format(epoch_nlg_loss / batch_amount if batch_amount > 0 else 0.0), DLoss='{:.4f}'.format(epoch_dual_loss / batch_amount if batch_amount > 0 else 0.0))
nlg_scorer.print_avg_scores()
nlu_scorer.print_avg_scores()
if idx % save_epochs == 0:
print_time_info('Epoch {}: save model...'.format(idx))
nlu_path = os.path.join(self.model_dir, 'nlu.ckpt')
nlg_path = os.path.join(self.model_dir, 'nlg.ckpt')
torch.save(self.nlu, nlu_path)
torch.save(self.nlg, nlg_path)
print_time_info('Save model successfully')
filename = self.valid_log_path if False else self.train_log_path
nlu_loss = 'None' if nlu_loss is None else '{:.4f}'.format(nlu_loss)
nlg_loss = 'None' if nlg_loss is None else '{:.3f}'.format(nlg_loss)
if nlu_scorer is not None:
(intent_acc, slot_f1_p_r) = nlu_scorer.get_avg_scores()
intent_acc = '{:.4f}'.format(intent_acc)
slot_f1 = '{:.4f}'.format(slot_f1_p_r[0])
else:
slot_f1 = '-1.0'
if nlg_scorer is not None:
(_, bleu, _, rouge, _, _) = nlg_scorer.get_avg_scores()
bleu = '{:.4f}'.format(bleu)
rouge = ' '.join(['{:.4f}'.format(s) for s in rouge])
else:
(bleu, rouge) = ('-1.0', '-1.0 -1.0 -1.0')
with open(filename, 'a') as file:
file.write(f'{idx},{nlu_loss},{nlg_loss},{intent_acc},{slot_f1},{bleu},{rouge}\n')
nlu_loss = nlg_loss = None
nlu_scorer = nlg_scorer = None
batch_amount = 0
if True:
nlu_scorer = IntentPredSlotFillScorer(intent_acc=self.with_intent)
nlu_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlu_data_loader)):
with torch.no_grad():
(batch_loss, batch_logits, slot_prediction, intent_logits, intent_prediction, _, _, _) = self.run_test_nlu_batch(batch, criterion_nlu, scorer=nlu_scorer, testing=True, beam_size=sample_size)
nlu_loss += batch_loss.item()
batch_amount += 1
nlu_loss /= batch_amount
nlu_scorer.print_avg_scores()
batch_amount = 0
if True:
nlg_scorer = SequenceScorer()
nlg_loss = 0
for (b_idx, batch) in enumerate(tqdm(self.test_nlg_data_loader)):
with torch.no_grad():
(batch_loss, batch_logits, batch_decode_result, _, _, _, _) = self.run_test_nlg_batch(batch, criterion_nlg, scorer=nlg_scorer, testing=True, teacher_forcing_ratio=0.0, beam_size=sample_size, result_path=os.path.join(os.path.join(self.log_dir, 'validation'), 'test.txt'))
nlg_loss += batch_loss.item()
batch_amount += 1
nlg_loss /= batch_amount
nlg_scorer.print_avg_scores()
self._record_log(epoch=idx, testing=True, nlu_loss=nlu_loss, nlg_loss=nlg_loss, nlu_scorer=nlu_scorer, nlg_scorer=nlg_scorer)
if True:
nlg_scorer.write_avg_scores_to_file(self.test_result_path)
if True:
nlu_scorer.write_avg_scores_to_file(self.test_result_path)
criterion_nlu.epoch_end()
criterion_nlg.epoch_end()
teacher_forcing_ratio *= tf_decay_rate
|
DuaLUG
|
positive
|
def unpack(self, packet):
"""parse Connection Confirm Packet (header only)"""
try:
<DeepExtract>
try:
header = unpack('!BBH', '!BBHHB'[:4])
except struct.error:
raise S7ProtocolError('Unknown TPKT format')
self.data = '!BBHHB'[4:4 + header[2]]
(size, pdu_type, self.dst_ref, self.src_ref, _) = self
</DeepExtract>
except struct.error:
raise S7ProtocolError('Wrong CC packet format')
if len(packet) != size + 1:
raise S7ProtocolError('Wrong CC packet size')
if pdu_type != 208:
raise S7ProtocolError('Not a CC packet')
return self
|
def unpack(self, packet):
"""parse Connection Confirm Packet (header only)"""
try:
try:
header = unpack('!BBH', '!BBHHB'[:4])
except struct.error:
raise S7ProtocolError('Unknown TPKT format')
self.data = '!BBHHB'[4:4 + header[2]]
(size, pdu_type, self.dst_ref, self.src_ref, _) = self
except struct.error:
raise S7ProtocolError('Wrong CC packet format')
if len(packet) != size + 1:
raise S7ProtocolError('Wrong CC packet size')
if pdu_type != 208:
raise S7ProtocolError('Not a CC packet')
return self
|
conpot
|
positive
|
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""
Apply resize on the full-image segmentation.
Args:
segmentation (ndarray): of shape HxW. The array should have integer
or bool dtype.
Returns:
ndarray: resized segmentation.
"""
<DeepExtract>
pass
</DeepExtract>
return segmentation
|
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""
Apply resize on the full-image segmentation.
Args:
segmentation (ndarray): of shape HxW. The array should have integer
or bool dtype.
Returns:
ndarray: resized segmentation.
"""
pass
return segmentation
|
DynamicRouting
|
positive
|
def test_can_logout(self):
<DeepExtract>
response = self._attempt_login()
response = self.session.get(response.url)
matches = self.REGEX_KEYCLOAK_LOGIN_ACTION.search(response.text)
auth_url = html.unescape(matches.groups(1)[0])
response = self.session.post(auth_url, data={'username': 'nonadmin', 'password': 'testpassword'})
return response
</DeepExtract>
<DeepExtract>
response = self._get_clusters()
matches = self.REGEX_CSRF_TOKEN.search(response.text)
csrftoken = html.unescape(matches.groups(1)[0])
url = reverse('oidc_logout')
response = self.session.post(f'http://localhost:8000{url}', headers={'X-CSRFToken': csrftoken})
</DeepExtract>
self.assertIn('auth/realms/master/protocol/openid-connect/logout', response.url)
|
def test_can_logout(self):
response = self._attempt_login()
response = self.session.get(response.url)
matches = self.REGEX_KEYCLOAK_LOGIN_ACTION.search(response.text)
auth_url = html.unescape(matches.groups(1)[0])
response = self.session.post(auth_url, data={'username': 'nonadmin', 'password': 'testpassword'})
return response
response = self._get_clusters()
matches = self.REGEX_CSRF_TOKEN.search(response.text)
csrftoken = html.unescape(matches.groups(1)[0])
url = reverse('oidc_logout')
response = self.session.post(f'http://localhost:8000{url}', headers={'X-CSRFToken': csrftoken})
self.assertIn('auth/realms/master/protocol/openid-connect/logout', response.url)
|
cloudman
|
positive
|
def rpn_net(self, feature_pyramid, name):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope(name):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
<DeepExtract>
rpn_conv2d_3x3 = feature_pyramid[level]
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3, num_outputs=256, kernel_size=[3, 3], stride=1, activation_fn=tf.nn.relu, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope='{}_{}'.format(scope_list[0], i), reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3, num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER, scope=scope_list[2], activation_fn=None, reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM], name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
(rpn_box_scores, rpn_box_probs) = (rpn_box_scores, rpn_box_probs)
</DeepExtract>
<DeepExtract>
rpn_delta_boxes = feature_pyramid[level]
for i in range(4):
rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes, num_outputs=256, kernel_size=[3, 3], weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, stride=1, activation_fn=tf.nn.relu, scope='{}_{}'.format(scope_list[1], i), reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_delta_boxes, num_outputs=5 * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope=scope_list[3], activation_fn=None, reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5], name='rpn_{}_regression_reshape'.format(level))
rpn_delta_boxes = rpn_delta_boxes
</DeepExtract>
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
return (rpn_delta_boxes_list, rpn_scores_list, rpn_probs_list)
|
def rpn_net(self, feature_pyramid, name):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope(name):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
rpn_conv2d_3x3 = feature_pyramid[level]
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3, num_outputs=256, kernel_size=[3, 3], stride=1, activation_fn=tf.nn.relu, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope='{}_{}'.format(scope_list[0], i), reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3, num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER, scope=scope_list[2], activation_fn=None, reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM], name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
(rpn_box_scores, rpn_box_probs) = (rpn_box_scores, rpn_box_probs)
rpn_delta_boxes = feature_pyramid[level]
for i in range(4):
rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes, num_outputs=256, kernel_size=[3, 3], weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, stride=1, activation_fn=tf.nn.relu, scope='{}_{}'.format(scope_list[1], i), reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_delta_boxes, num_outputs=5 * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope=scope_list[3], activation_fn=None, reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5], name='rpn_{}_regression_reshape'.format(level))
rpn_delta_boxes = rpn_delta_boxes
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
return (rpn_delta_boxes_list, rpn_scores_list, rpn_probs_list)
|
DCL_RetinaNet_Tensorflow
|
positive
|
@property
def bacnet_properties(self):
if not self.properties.bacnet_properties:
<DeepExtract>
try:
res = self.properties.device.properties.network.readMultiple('{} {} {} all'.format(self.properties.device.properties.address, self.properties.type, str(self.properties.address)), vendor_id=self.properties.device.properties.vendor_id, show_property_name=True)
for each in res:
if not each:
continue
(v, prop) = each
self.properties.bacnet_properties[prop] = v
except Exception as e:
raise Exception('Problem reading : {} | {}'.format(self.properties.name, e))
</DeepExtract>
return self.properties.bacnet_properties
|
@property
def bacnet_properties(self):
if not self.properties.bacnet_properties:
try:
res = self.properties.device.properties.network.readMultiple('{} {} {} all'.format(self.properties.device.properties.address, self.properties.type, str(self.properties.address)), vendor_id=self.properties.device.properties.vendor_id, show_property_name=True)
for each in res:
if not each:
continue
(v, prop) = each
self.properties.bacnet_properties[prop] = v
except Exception as e:
raise Exception('Problem reading : {} | {}'.format(self.properties.name, e))
return self.properties.bacnet_properties
|
BAC0
|
positive
|
def execute(self, context):
settings = context.user_preferences.addons[__name__].preferences
merge_hide = settings.merge_hide
merge_position = settings.merge_position
do_hide = False
do_hide_shader = False
if merge_hide == 'ALWAYS':
do_hide = True
do_hide_shader = True
elif merge_hide == 'NON_SHADER':
do_hide = True
tree_type = context.space_data.node_tree.type
if tree_type == 'COMPOSITING':
node_type = 'CompositorNode'
elif tree_type == 'SHADER':
node_type = 'ShaderNode'
elif tree_type == 'TEXTURE':
node_type = 'TextureNode'
<DeepExtract>
tree = context.space_data.node_tree
if tree.nodes.active:
while tree.nodes.active != context.active_node:
tree = tree.nodes.active.node_tree
(nodes, links) = (tree.nodes, tree.links)
</DeepExtract>
mode = self.mode
merge_type = self.merge_type
if (merge_type == 'ZCOMBINE' or merge_type == 'ALPHAOVER') and tree_type != 'COMPOSITING':
merge_type = 'MIX'
mode = 'MIX'
selected_mix = []
selected_shader = []
selected_math = []
selected_z = []
selected_alphaover = []
for (i, node) in enumerate(nodes):
if node.select and node.outputs:
if merge_type == 'AUTO':
for (type, types_list, dst) in (('SHADER', ('MIX', 'ADD'), selected_shader), ('RGBA', [t[0] for t in blend_types], selected_mix), ('VALUE', [t[0] for t in operations], selected_math)):
output_type = node.outputs[0].type
valid_mode = mode in types_list
if output_type != 'SHADER' and mode == 'MIX':
output_type = 'RGBA'
valid_mode = True
if output_type == type and valid_mode:
dst.append([i, node.location.x, node.location.y, node.dimensions.x, node.hide])
else:
for (type, types_list, dst) in (('SHADER', ('MIX', 'ADD'), selected_shader), ('MIX', [t[0] for t in blend_types], selected_mix), ('MATH', [t[0] for t in operations], selected_math), ('ZCOMBINE', ('MIX',), selected_z), ('ALPHAOVER', ('MIX',), selected_alphaover)):
if merge_type == type and mode in types_list:
dst.append([i, node.location.x, node.location.y, node.dimensions.x, node.hide])
if selected_mix and selected_math and (merge_type == 'AUTO'):
selected_mix += selected_math
selected_math = []
for nodes_list in [selected_mix, selected_shader, selected_math, selected_z, selected_alphaover]:
if nodes_list:
count_before = len(nodes)
nodes_list.sort(key=lambda k: k[1], reverse=True)
loc_x = nodes_list[0][1] + nodes_list[0][3] + 70
nodes_list.sort(key=lambda k: k[2], reverse=True)
if merge_position == 'CENTER':
loc_y = (nodes_list[len(nodes_list) - 1][2] + nodes_list[len(nodes_list) - 2][2]) / 2
if nodes_list[len(nodes_list) - 1][-1] == True:
if do_hide:
loc_y += 40
else:
loc_y += 80
else:
loc_y = nodes_list[len(nodes_list) - 1][2]
offset_y = 100
if not do_hide:
offset_y = 200
if nodes_list == selected_shader and (not do_hide_shader):
offset_y = 150.0
the_range = len(nodes_list) - 1
if len(nodes_list) == 1:
the_range = 1
for i in range(the_range):
if nodes_list == selected_mix:
add_type = node_type + 'MixRGB'
add = nodes.new(add_type)
add.blend_type = mode
if mode != 'MIX':
add.inputs[0].default_value = 1.0
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_math:
add_type = node_type + 'Math'
add = nodes.new(add_type)
add.operation = mode
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_shader:
if mode == 'MIX':
add_type = node_type + 'MixShader'
add = nodes.new(add_type)
add.hide = do_hide_shader
if do_hide_shader:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
elif mode == 'ADD':
add_type = node_type + 'AddShader'
add = nodes.new(add_type)
add.hide = do_hide_shader
if do_hide_shader:
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_z:
add = nodes.new('CompositorNodeZcombine')
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 0
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_alphaover:
add = nodes.new('CompositorNodeAlphaOver')
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
add.location = (loc_x, loc_y)
loc_y += offset_y
add.select = True
count_adds = i + 1
count_after = len(nodes)
index = count_after - 1
first_selected = nodes[nodes_list[0][0]]
last_add = nodes[count_before]
if len(nodes_list) == 2:
if not first_selected.outputs[0].links:
second_selected = nodes[nodes_list[1][0]]
for ss_link in second_selected.outputs[0].links:
invalid_i = [n[0] for n in selected_mix + selected_math + selected_shader + selected_z]
if ss_link.to_node not in [nodes[i] for i in invalid_i]:
links.new(last_add.outputs[0], ss_link.to_socket)
for fs_link in first_selected.outputs[0].links:
invalid_i = [n[0] for n in selected_mix + selected_math + selected_shader + selected_z]
if fs_link.to_node not in [nodes[i] for i in invalid_i]:
links.new(last_add.outputs[0], fs_link.to_socket)
node_to = nodes[count_after - 1]
links.new(first_selected.outputs[0], node_to.inputs[first])
if node_to.type == 'ZCOMBINE':
for fs_out in first_selected.outputs:
if fs_out != first_selected.outputs[0] and fs_out.name in ('Z', 'Depth'):
links.new(fs_out, node_to.inputs[1])
break
for i in range(count_adds):
if i < count_adds - 1:
node_from = nodes[index]
node_to = nodes[index - 1]
node_to_input_i = first
node_to_z_i = 1
links.new(node_from.outputs[0], node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != node_from.outputs[0] and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
if len(nodes_list) > 1:
node_from = nodes[nodes_list[i + 1][0]]
node_to = nodes[index]
node_to_input_i = second
node_to_z_i = 3
links.new(node_from.outputs[0], node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != node_from.outputs[0] and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
index -= 1
nodes.active = last_add
for (i, x, y, dx, h) in nodes_list:
nodes[i].select = False
return {'FINISHED'}
|
def execute(self, context):
settings = context.user_preferences.addons[__name__].preferences
merge_hide = settings.merge_hide
merge_position = settings.merge_position
do_hide = False
do_hide_shader = False
if merge_hide == 'ALWAYS':
do_hide = True
do_hide_shader = True
elif merge_hide == 'NON_SHADER':
do_hide = True
tree_type = context.space_data.node_tree.type
if tree_type == 'COMPOSITING':
node_type = 'CompositorNode'
elif tree_type == 'SHADER':
node_type = 'ShaderNode'
elif tree_type == 'TEXTURE':
node_type = 'TextureNode'
tree = context.space_data.node_tree
if tree.nodes.active:
while tree.nodes.active != context.active_node:
tree = tree.nodes.active.node_tree
(nodes, links) = (tree.nodes, tree.links)
mode = self.mode
merge_type = self.merge_type
if (merge_type == 'ZCOMBINE' or merge_type == 'ALPHAOVER') and tree_type != 'COMPOSITING':
merge_type = 'MIX'
mode = 'MIX'
selected_mix = []
selected_shader = []
selected_math = []
selected_z = []
selected_alphaover = []
for (i, node) in enumerate(nodes):
if node.select and node.outputs:
if merge_type == 'AUTO':
for (type, types_list, dst) in (('SHADER', ('MIX', 'ADD'), selected_shader), ('RGBA', [t[0] for t in blend_types], selected_mix), ('VALUE', [t[0] for t in operations], selected_math)):
output_type = node.outputs[0].type
valid_mode = mode in types_list
if output_type != 'SHADER' and mode == 'MIX':
output_type = 'RGBA'
valid_mode = True
if output_type == type and valid_mode:
dst.append([i, node.location.x, node.location.y, node.dimensions.x, node.hide])
else:
for (type, types_list, dst) in (('SHADER', ('MIX', 'ADD'), selected_shader), ('MIX', [t[0] for t in blend_types], selected_mix), ('MATH', [t[0] for t in operations], selected_math), ('ZCOMBINE', ('MIX',), selected_z), ('ALPHAOVER', ('MIX',), selected_alphaover)):
if merge_type == type and mode in types_list:
dst.append([i, node.location.x, node.location.y, node.dimensions.x, node.hide])
if selected_mix and selected_math and (merge_type == 'AUTO'):
selected_mix += selected_math
selected_math = []
for nodes_list in [selected_mix, selected_shader, selected_math, selected_z, selected_alphaover]:
if nodes_list:
count_before = len(nodes)
nodes_list.sort(key=lambda k: k[1], reverse=True)
loc_x = nodes_list[0][1] + nodes_list[0][3] + 70
nodes_list.sort(key=lambda k: k[2], reverse=True)
if merge_position == 'CENTER':
loc_y = (nodes_list[len(nodes_list) - 1][2] + nodes_list[len(nodes_list) - 2][2]) / 2
if nodes_list[len(nodes_list) - 1][-1] == True:
if do_hide:
loc_y += 40
else:
loc_y += 80
else:
loc_y = nodes_list[len(nodes_list) - 1][2]
offset_y = 100
if not do_hide:
offset_y = 200
if nodes_list == selected_shader and (not do_hide_shader):
offset_y = 150.0
the_range = len(nodes_list) - 1
if len(nodes_list) == 1:
the_range = 1
for i in range(the_range):
if nodes_list == selected_mix:
add_type = node_type + 'MixRGB'
add = nodes.new(add_type)
add.blend_type = mode
if mode != 'MIX':
add.inputs[0].default_value = 1.0
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_math:
add_type = node_type + 'Math'
add = nodes.new(add_type)
add.operation = mode
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_shader:
if mode == 'MIX':
add_type = node_type + 'MixShader'
add = nodes.new(add_type)
add.hide = do_hide_shader
if do_hide_shader:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
elif mode == 'ADD':
add_type = node_type + 'AddShader'
add = nodes.new(add_type)
add.hide = do_hide_shader
if do_hide_shader:
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_z:
add = nodes.new('CompositorNodeZcombine')
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 0
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_alphaover:
add = nodes.new('CompositorNodeAlphaOver')
add.show_preview = False
add.hide = do_hide
if do_hide:
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
add.location = (loc_x, loc_y)
loc_y += offset_y
add.select = True
count_adds = i + 1
count_after = len(nodes)
index = count_after - 1
first_selected = nodes[nodes_list[0][0]]
last_add = nodes[count_before]
if len(nodes_list) == 2:
if not first_selected.outputs[0].links:
second_selected = nodes[nodes_list[1][0]]
for ss_link in second_selected.outputs[0].links:
invalid_i = [n[0] for n in selected_mix + selected_math + selected_shader + selected_z]
if ss_link.to_node not in [nodes[i] for i in invalid_i]:
links.new(last_add.outputs[0], ss_link.to_socket)
for fs_link in first_selected.outputs[0].links:
invalid_i = [n[0] for n in selected_mix + selected_math + selected_shader + selected_z]
if fs_link.to_node not in [nodes[i] for i in invalid_i]:
links.new(last_add.outputs[0], fs_link.to_socket)
node_to = nodes[count_after - 1]
links.new(first_selected.outputs[0], node_to.inputs[first])
if node_to.type == 'ZCOMBINE':
for fs_out in first_selected.outputs:
if fs_out != first_selected.outputs[0] and fs_out.name in ('Z', 'Depth'):
links.new(fs_out, node_to.inputs[1])
break
for i in range(count_adds):
if i < count_adds - 1:
node_from = nodes[index]
node_to = nodes[index - 1]
node_to_input_i = first
node_to_z_i = 1
links.new(node_from.outputs[0], node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != node_from.outputs[0] and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
if len(nodes_list) > 1:
node_from = nodes[nodes_list[i + 1][0]]
node_to = nodes[index]
node_to_input_i = second
node_to_z_i = 3
links.new(node_from.outputs[0], node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != node_from.outputs[0] and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
index -= 1
nodes.active = last_add
for (i, x, y, dx, h) in nodes_list:
nodes[i].select = False
return {'FINISHED'}
|
blender-architecture-scripts
|
positive
|
def write_nginx_pattern(self):
"""Write nginx build pattern to spec file."""
<DeepExtract>
self._write_strip('%prep')
self.write_prep_prepend()
prefix = self.content.prefixes[self.url]
if self.config.default_pattern == 'R':
prefix = self.content.tarball_prefix
self._write_strip('%setup -q -n ' + prefix)
else:
self._write_strip('%setup -q -n ' + prefix)
for archive in self.config.sources['archive']:
extract_cmd = 'tar xf {}'
if archive.endswith('.zip'):
extract_cmd = 'unzip -q {}'
self._write_strip('cd %{_builddir}')
archive_file = os.path.basename(archive)
if self.config.archive_details.get(archive + 'prefix'):
self._write_strip(extract_cmd.format('%{_sourcedir}/' + archive_file))
else:
fake_prefix = os.path.splitext(os.path.basename(archive))[0]
self._write_strip('mkdir -p {}'.format(fake_prefix))
self._write_strip('cd {}'.format(fake_prefix))
self._write_strip(extract_cmd.format('%{_sourcedir}/' + archive_file))
self._write_strip('cd %{_builddir}/' + prefix)
for (archive, destination) in zip(self.config.sources['archive'], self.config.sources['destination']):
if destination.startswith(':'):
continue
if self.config.archive_details[archive + 'prefix'] == self.content.tarball_prefix:
print('Archive {} already unpacked in {}; ignoring destination'.format(archive, self.content.tarball_prefix))
else:
self._write_strip('mkdir -p {}'.format(destination))
archive_prefix = self.config.archive_details[archive + 'prefix']
if not archive_prefix:
archive_prefix = os.path.splitext(os.path.basename(archive))[0]
self._write_strip('cp -r %{{_builddir}}/{0}/* %{{_builddir}}/{1}/{2}'.format(archive_prefix, self.content.tarball_prefix, destination))
self.apply_patches()
if self.config.config_opts['cargo_vendor']:
if self.config.subdir:
self._write_strip('pushd ' + self.config.subdir)
self._write_strip('mkdir -p .cargo')
self._write_strip("echo '[source.crates-io]' >> .cargo/config.toml")
self._write_strip('echo \'replace-with = "vendored-sources"\' >> .cargo/config.toml')
self._write_strip("echo '[source.vendored-sources]' >> .cargo/config.toml")
self._write_strip('echo \'directory = "vendor"\' >> .cargo/config.toml')
if self.config.subdir:
self._write_strip('popd')
if self.config.default_pattern == 'distutils3' or self.config.default_pattern == 'pyproject':
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx2'.format(self.content.tarball_prefix))
self._write_strip('popd')
elif self.config.default_pattern != 'cmake':
if self.config.config_opts['32bit']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} build32'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['use_avx2']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx2'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['use_avx512']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx512'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['openmpi']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} build-openmpi'.format(self.content.tarball_prefix))
self._write_strip('popd')
self._write_strip('\n')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('%build')
</DeepExtract>
<DeepExtract>
if self.config.build_prepend:
self._write_strip('## build_prepend content')
for line in self.config.build_prepend:
self._write_strip('{}\n'.format(line))
self._write_strip('## build_prepend end')
</DeepExtract>
<DeepExtract>
self._write_strip('export http_proxy=http://127.0.0.1:9/')
self._write_strip('export https_proxy=http://127.0.0.1:9/')
self._write_strip('export no_proxy=localhost,127.0.0.1,0.0.0.0')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('nginx-module configure')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('nginx-module build')
</DeepExtract>
<DeepExtract>
if self.config.build_append:
self._write_strip('## build_append content')
for line in self.config.build_append:
self._write_strip('{}\n'.format(line))
self._write_strip('## build_append end')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('\n')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('%install')
</DeepExtract>
<DeepExtract>
if self.config.install_prepend:
self._write_strip('## install_prepend content')
for line in self.config.install_prepend:
self._write_strip('{}\n'.format(line))
self._write_strip('## install_prepend end')
</DeepExtract>
<DeepExtract>
if len(self.license_files) > 0:
self._write_strip('mkdir -p %{buildroot}/usr/share/package-licenses/' + self.name)
for lfile in self.license_files:
file2 = self.hashes[lfile]
lfile = lfile.replace(self.version, '%{version}')
self._write_strip('cp ' + '%{_builddir}/' + lfile + ' %{buildroot}/usr/share/package-licenses/' + self.name + '/' + file2 + ' || :\n')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('nginx-module install %{buildroot}')
</DeepExtract>
<DeepExtract>
self.specfile.write_strip('\n')
</DeepExtract>
|
def write_nginx_pattern(self):
"""Write nginx build pattern to spec file."""
self._write_strip('%prep')
self.write_prep_prepend()
prefix = self.content.prefixes[self.url]
if self.config.default_pattern == 'R':
prefix = self.content.tarball_prefix
self._write_strip('%setup -q -n ' + prefix)
else:
self._write_strip('%setup -q -n ' + prefix)
for archive in self.config.sources['archive']:
extract_cmd = 'tar xf {}'
if archive.endswith('.zip'):
extract_cmd = 'unzip -q {}'
self._write_strip('cd %{_builddir}')
archive_file = os.path.basename(archive)
if self.config.archive_details.get(archive + 'prefix'):
self._write_strip(extract_cmd.format('%{_sourcedir}/' + archive_file))
else:
fake_prefix = os.path.splitext(os.path.basename(archive))[0]
self._write_strip('mkdir -p {}'.format(fake_prefix))
self._write_strip('cd {}'.format(fake_prefix))
self._write_strip(extract_cmd.format('%{_sourcedir}/' + archive_file))
self._write_strip('cd %{_builddir}/' + prefix)
for (archive, destination) in zip(self.config.sources['archive'], self.config.sources['destination']):
if destination.startswith(':'):
continue
if self.config.archive_details[archive + 'prefix'] == self.content.tarball_prefix:
print('Archive {} already unpacked in {}; ignoring destination'.format(archive, self.content.tarball_prefix))
else:
self._write_strip('mkdir -p {}'.format(destination))
archive_prefix = self.config.archive_details[archive + 'prefix']
if not archive_prefix:
archive_prefix = os.path.splitext(os.path.basename(archive))[0]
self._write_strip('cp -r %{{_builddir}}/{0}/* %{{_builddir}}/{1}/{2}'.format(archive_prefix, self.content.tarball_prefix, destination))
self.apply_patches()
if self.config.config_opts['cargo_vendor']:
if self.config.subdir:
self._write_strip('pushd ' + self.config.subdir)
self._write_strip('mkdir -p .cargo')
self._write_strip("echo '[source.crates-io]' >> .cargo/config.toml")
self._write_strip('echo \'replace-with = "vendored-sources"\' >> .cargo/config.toml')
self._write_strip("echo '[source.vendored-sources]' >> .cargo/config.toml")
self._write_strip('echo \'directory = "vendor"\' >> .cargo/config.toml')
if self.config.subdir:
self._write_strip('popd')
if self.config.default_pattern == 'distutils3' or self.config.default_pattern == 'pyproject':
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx2'.format(self.content.tarball_prefix))
self._write_strip('popd')
elif self.config.default_pattern != 'cmake':
if self.config.config_opts['32bit']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} build32'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['use_avx2']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx2'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['use_avx512']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} buildavx512'.format(self.content.tarball_prefix))
self._write_strip('popd')
if self.config.config_opts['openmpi']:
self._write_strip('pushd ..')
self._write_strip('cp -a {} build-openmpi'.format(self.content.tarball_prefix))
self._write_strip('popd')
self._write_strip('\n')
self.specfile.write_strip('%build')
if self.config.build_prepend:
self._write_strip('## build_prepend content')
for line in self.config.build_prepend:
self._write_strip('{}\n'.format(line))
self._write_strip('## build_prepend end')
self._write_strip('export http_proxy=http://127.0.0.1:9/')
self._write_strip('export https_proxy=http://127.0.0.1:9/')
self._write_strip('export no_proxy=localhost,127.0.0.1,0.0.0.0')
self.specfile.write_strip('nginx-module configure')
self.specfile.write_strip('nginx-module build')
if self.config.build_append:
self._write_strip('## build_append content')
for line in self.config.build_append:
self._write_strip('{}\n'.format(line))
self._write_strip('## build_append end')
self.specfile.write_strip('\n')
self.specfile.write_strip('%install')
if self.config.install_prepend:
self._write_strip('## install_prepend content')
for line in self.config.install_prepend:
self._write_strip('{}\n'.format(line))
self._write_strip('## install_prepend end')
if len(self.license_files) > 0:
self._write_strip('mkdir -p %{buildroot}/usr/share/package-licenses/' + self.name)
for lfile in self.license_files:
file2 = self.hashes[lfile]
lfile = lfile.replace(self.version, '%{version}')
self._write_strip('cp ' + '%{_builddir}/' + lfile + ' %{buildroot}/usr/share/package-licenses/' + self.name + '/' + file2 + ' || :\n')
self.specfile.write_strip('nginx-module install %{buildroot}')
self.specfile.write_strip('\n')
</DeepExtract>
|
autospec
|
positive
|
def __init__(self, dim=3):
assert dim == 3
centers = numpy.array([[0.9, 0.9, 0.9], [0.9, 0.9, 1], [0.9, 1, 0.9], [1, 0.9, 0.9], [1, 1, 1], [1, 0, 0], [0.5, 0, 0], [0, 1, 0], [0, 0.7, 0], [0, 0, 0], [0.4, 0.3, 0.6], [0.7, 0.7, 0.7], [0.7, 0.7, 1], [1, 0.7, 0.7], [0.7, 1, 0.7]])
e_mat = 0.8 * numpy.array([[9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [1, 0.5, 1], [2, 0.5, 1], [0.5, 0.5, 0.5], [0.5, 1, 0.5], [1, 1, 1], [2, 2, 3.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5]])
coefs = numpy.array([4, 4, 4, 4, -12, 1, 3, -2, 5, -2, 1, -2, -2, -2, -2])
def kernel(x):
<DeepExtract>
if dist_type == 1:
ret_val = numpy.array([[numpy.sum(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x])
elif dist_type == 2:
ret_val = numpy.array([[numpy.dot((xpt - center) * evec, xpt - center) for (evec, center) in lzip(e_mat, centers)] for xpt in x])
elif dist_type == 'inf':
ret_val = numpy.array([[numpy.max(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x])
else:
raise ValueError('Unrecognized distance type {0}'.format(dist_type))
r2 = ret_val
</DeepExtract>
return numpy.exp(-r2)
super(McCourt13, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [1, 1, 1]
self.fmin = 1.49048296359
self.fmax = 5.15444049449
self.classifiers = ['bound_min']
|
def __init__(self, dim=3):
assert dim == 3
centers = numpy.array([[0.9, 0.9, 0.9], [0.9, 0.9, 1], [0.9, 1, 0.9], [1, 0.9, 0.9], [1, 1, 1], [1, 0, 0], [0.5, 0, 0], [0, 1, 0], [0, 0.7, 0], [0, 0, 0], [0.4, 0.3, 0.6], [0.7, 0.7, 0.7], [0.7, 0.7, 1], [1, 0.7, 0.7], [0.7, 1, 0.7]])
e_mat = 0.8 * numpy.array([[9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [9.5, 9.5, 9.5], [1, 0.5, 1], [2, 0.5, 1], [0.5, 0.5, 0.5], [0.5, 1, 0.5], [1, 1, 1], [2, 2, 3.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5], [8.5, 8.5, 8.5]])
coefs = numpy.array([4, 4, 4, 4, -12, 1, 3, -2, 5, -2, 1, -2, -2, -2, -2])
def kernel(x):
if dist_type == 1:
ret_val = numpy.array([[numpy.sum(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x])
elif dist_type == 2:
ret_val = numpy.array([[numpy.dot((xpt - center) * evec, xpt - center) for (evec, center) in lzip(e_mat, centers)] for xpt in x])
elif dist_type == 'inf':
ret_val = numpy.array([[numpy.max(numpy.abs((xpt - center) * evec)) for (evec, center) in lzip(numpy.sqrt(e_mat), centers)] for xpt in x])
else:
raise ValueError('Unrecognized distance type {0}'.format(dist_type))
r2 = ret_val
return numpy.exp(-r2)
super(McCourt13, self).__init__(dim, kernel, e_mat, coefs, centers)
self.min_loc = [1, 1, 1]
self.fmin = 1.49048296359
self.fmax = 5.15444049449
self.classifiers = ['bound_min']
|
evalset
|
positive
|
def forward_train(self, img, img_metas, chargrid_map, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs):
input_img = chargrid_map if self.use_chargrid else img
losses = super().forward_train(img=input_img, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, proposals=proposals, **kwargs)
if sum([box.shape[0] for box in gt_bboxes]) == 0:
return losses
img_feat = self.extract_feat(input_img)
recog_rois = bbox2roi(gt_bboxes)
infor_feats = self.ie_roi_extractor(img_feat[:self.ie_roi_extractor.num_inputs], recog_rois)
<DeepExtract>
pooling = nn.AdaptiveAvgPool2d((1, 1))
max_length = max([per_b.size(0) for per_b in gt_bboxes])
batch_size = len(gt_bboxes)
batched_feat = []
batched_label = []
last_idx = 0
for i in range(batch_size):
b_s = gt_bboxes[i].size(0)
feat_size = list(infor_feats.size())
feat_size[0] = max_length - b_s
batched_feat.append(torch.cat((infor_feats[last_idx:last_idx + b_s], infor_feats.new_full(feat_size, 0)), 0))
if gt_labels is not None:
per_label = gt_labels[i]
label_size = list(per_label.size())
label_size[0] = max_length - b_s
batched_label.append(torch.cat((per_label, per_label.new_full(label_size, 255)), 0))
last_idx += b_s
feat = torch.cat(batched_feat, 0)
feat = pooling(feat).squeeze(2).squeeze(2)
feat = feat.view(batch_size, -1, feat.size(-1))
(feat, batched_label) = (feat, batched_label)
</DeepExtract>
cls_pred = self.ie_cls_head(feat)
cls_pred = cls_pred.view(-1, cls_pred.size(-1))
tmp_labels = torch.cat(batched_label, 0).view(-1)
valid_mask = tmp_labels != 255
info_loss = self.ie_cls_head.loss(cls_pred[valid_mask], tmp_labels[valid_mask], prefix='infor_')
losses.update(info_loss)
return losses
|
def forward_train(self, img, img_metas, chargrid_map, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs):
input_img = chargrid_map if self.use_chargrid else img
losses = super().forward_train(img=input_img, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, proposals=proposals, **kwargs)
if sum([box.shape[0] for box in gt_bboxes]) == 0:
return losses
img_feat = self.extract_feat(input_img)
recog_rois = bbox2roi(gt_bboxes)
infor_feats = self.ie_roi_extractor(img_feat[:self.ie_roi_extractor.num_inputs], recog_rois)
pooling = nn.AdaptiveAvgPool2d((1, 1))
max_length = max([per_b.size(0) for per_b in gt_bboxes])
batch_size = len(gt_bboxes)
batched_feat = []
batched_label = []
last_idx = 0
for i in range(batch_size):
b_s = gt_bboxes[i].size(0)
feat_size = list(infor_feats.size())
feat_size[0] = max_length - b_s
batched_feat.append(torch.cat((infor_feats[last_idx:last_idx + b_s], infor_feats.new_full(feat_size, 0)), 0))
if gt_labels is not None:
per_label = gt_labels[i]
label_size = list(per_label.size())
label_size[0] = max_length - b_s
batched_label.append(torch.cat((per_label, per_label.new_full(label_size, 255)), 0))
last_idx += b_s
feat = torch.cat(batched_feat, 0)
feat = pooling(feat).squeeze(2).squeeze(2)
feat = feat.view(batch_size, -1, feat.size(-1))
(feat, batched_label) = (feat, batched_label)
cls_pred = self.ie_cls_head(feat)
cls_pred = cls_pred.view(-1, cls_pred.size(-1))
tmp_labels = torch.cat(batched_label, 0).view(-1)
valid_mask = tmp_labels != 255
info_loss = self.ie_cls_head.loss(cls_pred[valid_mask], tmp_labels[valid_mask], prefix='infor_')
losses.update(info_loss)
return losses
|
DAVAR-Lab-OCR
|
positive
|
@mock.patch.object(bb.BigBenchJsonTaskFetcher, 'get_bigbench_json_task_or_subtask', autospec=True)
def test_additional_metrics(self, mock_get_json_task_or_subtask):
<DeepExtract>
examples = [{'input': f'{prefix} input {i}.', 'target': f'{prefix} target {i}.'} for i in range(16)]
</DeepExtract>
<DeepExtract>
task_data = _create_task_data(name='test_generative_task', examples=examples, description='test generative task', preferred_score='exact_str_match', metrics=['exact_str_match'])
</DeepExtract>
mock_get_json_task_or_subtask.return_value = json_task.JsonTask(task_data=task_data, shot_list=[0])
def additional_metric(targets, predictions):
del targets, predictions
return {'my_score': 42.0}
seqio_task_name = task_api.register_seqio_task('test_task', '/foo', bb.BigBenchTaskType.GENERATIVE, vocabs.T5_DEFAULT_VOCAB, 0, bigbench_subtask_name=None, additional_metrics=[additional_metric])
evaluator = seqio.Evaluator(seqio_task_name, seqio.EncDecFeatureConverter(pack=False), eval_split='all')
vocab = vocabs.T5_DEFAULT_VOCAB.vocabulary
(all_metrics, _, _) = evaluator.evaluate(compute_metrics=True, step=None, predict_fn=lambda ds: [(0, vocab.encode('foo'))] * len(list(ds)), score_fn=lambda ds: [(0, -0.6931471805599453)] * len(list(ds)))
results = all_metrics.result()
self.assertDictEqual(results, {'bigbench:test_task.gen.t5_default_vocab.0_shot.all_examples': {'exact_str_match': 0.0, 'my_score': 42.0}})
|
@mock.patch.object(bb.BigBenchJsonTaskFetcher, 'get_bigbench_json_task_or_subtask', autospec=True)
def test_additional_metrics(self, mock_get_json_task_or_subtask):
examples = [{'input': f'{prefix} input {i}.', 'target': f'{prefix} target {i}.'} for i in range(16)]
task_data = _create_task_data(name='test_generative_task', examples=examples, description='test generative task', preferred_score='exact_str_match', metrics=['exact_str_match'])
mock_get_json_task_or_subtask.return_value = json_task.JsonTask(task_data=task_data, shot_list=[0])
def additional_metric(targets, predictions):
del targets, predictions
return {'my_score': 42.0}
seqio_task_name = task_api.register_seqio_task('test_task', '/foo', bb.BigBenchTaskType.GENERATIVE, vocabs.T5_DEFAULT_VOCAB, 0, bigbench_subtask_name=None, additional_metrics=[additional_metric])
evaluator = seqio.Evaluator(seqio_task_name, seqio.EncDecFeatureConverter(pack=False), eval_split='all')
vocab = vocabs.T5_DEFAULT_VOCAB.vocabulary
(all_metrics, _, _) = evaluator.evaluate(compute_metrics=True, step=None, predict_fn=lambda ds: [(0, vocab.encode('foo'))] * len(list(ds)), score_fn=lambda ds: [(0, -0.6931471805599453)] * len(list(ds)))
results = all_metrics.result()
self.assertDictEqual(results, {'bigbench:test_task.gen.t5_default_vocab.0_shot.all_examples': {'exact_str_match': 0.0, 'my_score': 42.0}})
|
BIG-bench
|
positive
|
def make_scene_dataset(ds_name, n_frames=None):
if ds_name == 'tless.primesense.train':
<DeepExtract>
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='train_primesense')
ds = ds
</DeepExtract>
elif ds_name == 'tless.primesense.test':
<DeepExtract>
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
ds = ds
</DeepExtract>
elif ds_name == 'tless.primesense.test.bop19':
<DeepExtract>
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
ds = ds
</DeepExtract>
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'ycbv.train.real':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_real')
elif ds_name == 'ycbv.train.synt':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_synt')
elif ds_name == 'ycbv.test':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
elif ds_name == 'ycbv.test.keyframes':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
keyframes_path = ds_dir / 'keyframe.txt'
ls = keyframes_path.read_text().split('\n')[:-1]
frame_index = ds.frame_index
ids = []
for l_n in ls:
(scene_id, view_id) = l_n.split('/')
(scene_id, view_id) = (int(scene_id), int(view_id))
mask = (frame_index['scene_id'] == scene_id) & (frame_index['view_id'] == view_id)
ids.append(np.where(mask)[0].item())
ds.frame_index = frame_index.iloc[ids].reset_index(drop=True)
elif ds_name == 'hb.bop19':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='test_primesense')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'icbin.bop19':
ds_dir = BOP_DS_DIR / 'icbin'
ds = BOPDataset(ds_dir, split='test')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'itodd.bop19':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='test')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'lmo.bop19':
ds_dir = BOP_DS_DIR / 'lmo'
ds = BOPDataset(ds_dir, split='test')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'tless.bop19':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'tudl.bop19':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='test')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'ycbv.bop19':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
<DeepExtract>
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
</DeepExtract>
elif ds_name == 'hb.pbr':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'icbin.pbr':
ds_dir = BOP_DS_DIR / 'icbin'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'itodd.pbr':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'lm.pbr':
ds_dir = BOP_DS_DIR / 'lm'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'tless.pbr':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'tudl.pbr':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'ycbv.pbr':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'hb.val':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='val_primesense')
elif ds_name == 'itodd.val':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='val')
elif ds_name == 'tudl.train.real':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='train_real')
elif 'synthetic.' in ds_name:
from .synthetic_dataset import SyntheticSceneDataset
assert '.train' in ds_name or '.val' in ds_name
is_train = 'train' in ds_name.split('.')[-1]
ds_name = ds_name.split('.')[1]
ds = SyntheticSceneDataset(ds_dir=LOCAL_DATA_DIR / 'synt_datasets' / ds_name, train=is_train)
else:
raise ValueError(ds_name)
if n_frames is not None:
ds.frame_index = ds.frame_index.iloc[:n_frames].reset_index(drop=True)
ds.name = ds_name
return ds
|
def make_scene_dataset(ds_name, n_frames=None):
if ds_name == 'tless.primesense.train':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='train_primesense')
ds = ds
elif ds_name == 'tless.primesense.test':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
ds = ds
elif ds_name == 'tless.primesense.test.bop19':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
ds = ds
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'ycbv.train.real':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_real')
elif ds_name == 'ycbv.train.synt':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_synt')
elif ds_name == 'ycbv.test':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
elif ds_name == 'ycbv.test.keyframes':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
keyframes_path = ds_dir / 'keyframe.txt'
ls = keyframes_path.read_text().split('\n')[:-1]
frame_index = ds.frame_index
ids = []
for l_n in ls:
(scene_id, view_id) = l_n.split('/')
(scene_id, view_id) = (int(scene_id), int(view_id))
mask = (frame_index['scene_id'] == scene_id) & (frame_index['view_id'] == view_id)
ids.append(np.where(mask)[0].item())
ds.frame_index = frame_index.iloc[ids].reset_index(drop=True)
elif ds_name == 'hb.bop19':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='test_primesense')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'icbin.bop19':
ds_dir = BOP_DS_DIR / 'icbin'
ds = BOPDataset(ds_dir, split='test')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'itodd.bop19':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='test')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'lmo.bop19':
ds_dir = BOP_DS_DIR / 'lmo'
ds = BOPDataset(ds_dir, split='test')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'tless.bop19':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='test_primesense')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'tudl.bop19':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='test')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'ycbv.bop19':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='test')
targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
targets = remap_bop_targets(targets)
targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
assert len(index) == len(targets)
ds.frame_index = index
ds = ds
elif ds_name == 'hb.pbr':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'icbin.pbr':
ds_dir = BOP_DS_DIR / 'icbin'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'itodd.pbr':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'lm.pbr':
ds_dir = BOP_DS_DIR / 'lm'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'tless.pbr':
ds_dir = BOP_DS_DIR / 'tless'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'tudl.pbr':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'ycbv.pbr':
ds_dir = BOP_DS_DIR / 'ycbv'
ds = BOPDataset(ds_dir, split='train_pbr')
elif ds_name == 'hb.val':
ds_dir = BOP_DS_DIR / 'hb'
ds = BOPDataset(ds_dir, split='val_primesense')
elif ds_name == 'itodd.val':
ds_dir = BOP_DS_DIR / 'itodd'
ds = BOPDataset(ds_dir, split='val')
elif ds_name == 'tudl.train.real':
ds_dir = BOP_DS_DIR / 'tudl'
ds = BOPDataset(ds_dir, split='train_real')
elif 'synthetic.' in ds_name:
from .synthetic_dataset import SyntheticSceneDataset
assert '.train' in ds_name or '.val' in ds_name
is_train = 'train' in ds_name.split('.')[-1]
ds_name = ds_name.split('.')[1]
ds = SyntheticSceneDataset(ds_dir=LOCAL_DATA_DIR / 'synt_datasets' / ds_name, train=is_train)
else:
raise ValueError(ds_name)
if n_frames is not None:
ds.frame_index = ds.frame_index.iloc[:n_frames].reset_index(drop=True)
ds.name = ds_name
return ds
|
cosypose
|
positive
|
def search_packages(self, names):
packages = []
pkg_name = None
pkg_version = None
pkg_arch = None
names = [name.split('=', maxsplit=1)[0] for name in names]
<DeepExtract>
if not names:
output = None
command = '{apt-cache} show %s' % ' '.join(names)
command = command.format(**self._deps)
self.logger.debug(command)
_proc = subprocess.run(command, stdout=subprocess.PIPE, shell=True, env=self._get_environment())
shell.assert_successful_result(_proc)
output = _proc
</DeepExtract>
for line in output.stdout.decode('utf-8').splitlines():
if line.startswith('Package:'):
pkg_name = line.split(' ', maxsplit=2)[1]
if line.startswith('Architecture'):
pkg_arch = line.split(' ', maxsplit=2)[1]
if line.startswith('Version:'):
pkg_version = line.split(' ', maxsplit=2)[1]
if not line and pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
pkg_name = None
pkg_arch = None
pkg_version = None
if pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
return packages
|
def search_packages(self, names):
packages = []
pkg_name = None
pkg_version = None
pkg_arch = None
names = [name.split('=', maxsplit=1)[0] for name in names]
if not names:
output = None
command = '{apt-cache} show %s' % ' '.join(names)
command = command.format(**self._deps)
self.logger.debug(command)
_proc = subprocess.run(command, stdout=subprocess.PIPE, shell=True, env=self._get_environment())
shell.assert_successful_result(_proc)
output = _proc
for line in output.stdout.decode('utf-8').splitlines():
if line.startswith('Package:'):
pkg_name = line.split(' ', maxsplit=2)[1]
if line.startswith('Architecture'):
pkg_arch = line.split(' ', maxsplit=2)[1]
if line.startswith('Version:'):
pkg_version = line.split(' ', maxsplit=2)[1]
if not line and pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
pkg_name = None
pkg_arch = None
pkg_version = None
if pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
return packages
|
appimage-builder
|
positive
|
def tryPositionsWithTwoBondsLP(self, acc, donor, newname, loc1, loc2):
"""
Try placing an LP on a tetrahedral geometry with
two existing bonds. If this isn't a hydrogen bond
it can return - otherwise ensure that the H(D)-A-LP
angle is minimized.
"""
global donorhatom, donorhatom
bestangle = 180.0
bestcoords = []
residue = acc.residue
if not self.isHbond(donor, acc):
return 0
for donorhatom in donor.bonds:
if donorhatom.isHydrogen() and self.getHbondangle(acc, donor, donorhatom) < ANGLE_CUTOFF:
break
residue.createAtom(newname, loc1)
newatom = residue.getAtom(newname)
angle = abs(self.getHbondangle(donorhatom, acc, newatom))
if angle < bestangle:
bestangle = angle
bestcoords = loc1
newatom.x = loc2[0]
newatom.y = loc2[1]
newatom.z = loc2[2]
<DeepExtract>
angle = 0.0
atom2Coords = acc.getCoords()
coords1 = subtract(newatom.getCoords(), atom2Coords)
coords2 = subtract(donorhatom.getCoords(), atom2Coords)
norm1 = normalize(coords1)
norm2 = normalize(coords2)
dotted = dot(norm1, norm2)
if dotted > 1.0:
dotted = 1.0
rad = abs(math.acos(dotted))
angle = rad * 180.0 / math.pi
if angle > 180.0:
angle = 360.0 - angle
angle = angle
</DeepExtract>
if angle < bestangle:
bestcoords = loc2
if bestangle > ANGLE_CUTOFF * 2.0:
residue.removeAtom(newname)
return 0
newatom.x = bestcoords[0]
newatom.y = bestcoords[1]
newatom.z = bestcoords[2]
self.routines.cells.addCell(newatom)
if newatom not in acc.bonds:
acc.bonds.append(newatom)
if acc not in newatom.bonds:
newatom.bonds.append(acc)
return 1
|
def tryPositionsWithTwoBondsLP(self, acc, donor, newname, loc1, loc2):
"""
Try placing an LP on a tetrahedral geometry with
two existing bonds. If this isn't a hydrogen bond
it can return - otherwise ensure that the H(D)-A-LP
angle is minimized.
"""
global donorhatom, donorhatom
bestangle = 180.0
bestcoords = []
residue = acc.residue
if not self.isHbond(donor, acc):
return 0
for donorhatom in donor.bonds:
if donorhatom.isHydrogen() and self.getHbondangle(acc, donor, donorhatom) < ANGLE_CUTOFF:
break
residue.createAtom(newname, loc1)
newatom = residue.getAtom(newname)
angle = abs(self.getHbondangle(donorhatom, acc, newatom))
if angle < bestangle:
bestangle = angle
bestcoords = loc1
newatom.x = loc2[0]
newatom.y = loc2[1]
newatom.z = loc2[2]
angle = 0.0
atom2Coords = acc.getCoords()
coords1 = subtract(newatom.getCoords(), atom2Coords)
coords2 = subtract(donorhatom.getCoords(), atom2Coords)
norm1 = normalize(coords1)
norm2 = normalize(coords2)
dotted = dot(norm1, norm2)
if dotted > 1.0:
dotted = 1.0
rad = abs(math.acos(dotted))
angle = rad * 180.0 / math.pi
if angle > 180.0:
angle = 360.0 - angle
angle = angle
if angle < bestangle:
bestcoords = loc2
if bestangle > ANGLE_CUTOFF * 2.0:
residue.removeAtom(newname)
return 0
newatom.x = bestcoords[0]
newatom.y = bestcoords[1]
newatom.z = bestcoords[2]
self.routines.cells.addCell(newatom)
if newatom not in acc.bonds:
acc.bonds.append(newatom)
if acc not in newatom.bonds:
newatom.bonds.append(acc)
return 1
|
BioBlender21
|
positive
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
<DeepExtract>
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
<DeepExtract>
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
|
def __init__(self):
self.headers = default_headers()
self.auth = None
self.proxies = {}
self.hooks = default_hooks()
self.params = {}
self.stream = False
self.verify = True
self.cert = None
self.max_redirects = DEFAULT_REDIRECT_LIMIT
self.trust_env = True
self.cookies = cookiejar_from_dict({})
self.adapters = OrderedDict()
self.adapters['https://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('https://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
self.adapters['http://'] = HTTPAdapter()
keys_to_move = [k for k in self.adapters if len(k) < len('http://')]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
</DeepExtract>
|
acousticbrainz-client
|
positive
|
def testPrintRawUtf8String(self):
message = unittest_pb2.TestAllTypes()
message.repeated_string.append('\\u00fc\\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
<DeepExtract>
self.assertMultiLineEqual(text, 'repeated_string: "üê\x9c\x9f"\n')
</DeepExtract>
parsed_message = unittest_pb2.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEquals(message, parsed_message, '\n%s != %s' % (message, parsed_message))
|
def testPrintRawUtf8String(self):
message = unittest_pb2.TestAllTypes()
message.repeated_string.append('\\u00fc\\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.assertMultiLineEqual(text, 'repeated_string: "üê\x9c\x9f"\n')
parsed_message = unittest_pb2.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEquals(message, parsed_message, '\n%s != %s' % (message, parsed_message))
|
botchallenge
|
positive
|
def get_context(self, context):
context['brand_name'] = self.relate_obj.get_brand_name()
context['rel_objs'] = self.relate_obj.to_objs
if len(self.relate_obj.to_objs) == 1:
context['rel_obj'] = self.relate_obj.to_objs[0]
if 'add_url' in context:
<DeepExtract>
context['add_url'] = context['add_url'] + ('&' if context['add_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
</DeepExtract>
return context
|
def get_context(self, context):
context['brand_name'] = self.relate_obj.get_brand_name()
context['rel_objs'] = self.relate_obj.to_objs
if len(self.relate_obj.to_objs) == 1:
context['rel_obj'] = self.relate_obj.to_objs[0]
if 'add_url' in context:
context['add_url'] = context['add_url'] + ('&' if context['add_url'].find('?') > 0 else '?') + '%s=%s' % self._get_relate_params()
return context
|
CTF_AWD_Platform
|
positive
|
def ac2mol(mol, atom_connectivity, atoms: List[int], charge: int, allow_charged_fragments: bool=True, use_graph: bool=True) -> Optional[list]:
"""
Args:
mol (RDMol) An rdkit molecule object instance.
atom_connectivity (np.ndarray): Atom connectivity.
atoms (List[int]): Entries are integer atomic symbols.
charge (int): The molecular charge.
allow_charged_fragments (bool, optional): Whether to allow charged fragments.
use_graph (bool, optional): Whether to use the graph representation of the molecule.
Returns:
List[RDMol]: Respective RDKit Molecule object instances.
"""
<DeepExtract>
global atomic_valence
global atomic_valence_electrons
valences_list_of_lists = []
ac_valence = list(atom_connectivity.sum(axis=1))
for (i, (atomic_num, valence)) in enumerate(zip(atoms, ac_valence)):
possible_valence = [x for x in atomic_valence[atomic_num] if x >= valence]
if not possible_valence:
logger.warning(f'Valence of atom {i} is {valence}, which bigger than the allowed max {max(atomic_valence[atomic_num])}. Stopping')
(bond_orders, atomic_valence_electrons_) = (None, None)
valences_list_of_lists.append(possible_valence)
valences_list = itertools.product(*valences_list_of_lists)
best_bo = atom_connectivity.copy()
for valences in valences_list:
(unsaturated_atoms, du_from_ac) = get_ua(valences, ac_valence)
check_len = len(unsaturated_atoms) == 0
if check_len:
check_bo = bo_is_ok(atom_connectivity, atom_connectivity, charge, du_from_ac, atomic_valence_electrons, atoms, valences, allow_charged_fragments=allow_charged_fragments)
else:
check_bo = None
if check_len and check_bo:
(bond_orders, atomic_valence_electrons_) = (atom_connectivity, atomic_valence_electrons)
ua_pairs_list = get_ua_pairs(unsaturated_atoms, atom_connectivity, use_graph=use_graph)
for ua_pairs in ua_pairs_list:
bond_orders = get_bo(atom_connectivity, du_from_ac, valences, ua_pairs, use_graph=use_graph)
status = bo_is_ok(bond_orders, atom_connectivity, charge, du_from_ac, atomic_valence_electrons, atoms, valences, allow_charged_fragments=allow_charged_fragments)
charge_ok = is_charge_ok(bond_orders, charge, atomic_valence_electrons, atoms, allow_charged_fragments=allow_charged_fragments)
if status:
(bond_orders, atomic_valence_electrons_) = (bond_orders, atomic_valence_electrons)
elif bond_orders.sum() >= best_bo.sum() and valences_not_too_large(bond_orders, valences) and charge_ok:
best_bo = bond_orders.copy()
(bond_orders, atomic_valence_electrons_) = (best_bo, atomic_valence_electrons)
</DeepExtract>
if bond_orders is None or atomic_valence_electrons_ is None:
return None
<DeepExtract>
l1 = len(bond_orders)
l2 = len(atoms)
bo_valences = list(bond_orders.sum(axis=1))
if l1 != l2:
logger.warning(f'sizes of adjMat ({l1:d}) and Atoms {l2:d} differ.')
mol = None
rw_mol = Chem.RWMol(mol)
bond_type_dict = {1: Chem.BondType.SINGLE, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE}
for i in range(l1):
for j in range(i + 1, l1):
bond_orders = int(round(bond_orders[i, j]))
if bond_orders == 0:
continue
bt = bond_type_dict.get(bond_orders, Chem.BondType.SINGLE)
rw_mol.AddBond(i, j, bt)
mol = rw_mol.GetMol()
if allow_charged_fragments:
mol = set_atomic_charges(mol, atoms, atomic_valence_electrons_, bo_valences, bond_orders, charge)
else:
mol = set_atomic_radicals(mol, atoms, atomic_valence_electrons_, bo_valences)
mol = mol
</DeepExtract>
if mol is None:
return None
if Chem.GetFormalCharge(mol) != charge:
return []
mols = rdchem.ResonanceMolSupplier(mol, Chem.UNCONSTRAINED_CATIONS, Chem.UNCONSTRAINED_ANIONS)
mols = [mol for mol in mols]
return mols
|
def ac2mol(mol, atom_connectivity, atoms: List[int], charge: int, allow_charged_fragments: bool=True, use_graph: bool=True) -> Optional[list]:
"""
Args:
mol (RDMol) An rdkit molecule object instance.
atom_connectivity (np.ndarray): Atom connectivity.
atoms (List[int]): Entries are integer atomic symbols.
charge (int): The molecular charge.
allow_charged_fragments (bool, optional): Whether to allow charged fragments.
use_graph (bool, optional): Whether to use the graph representation of the molecule.
Returns:
List[RDMol]: Respective RDKit Molecule object instances.
"""
global atomic_valence
global atomic_valence_electrons
valences_list_of_lists = []
ac_valence = list(atom_connectivity.sum(axis=1))
for (i, (atomic_num, valence)) in enumerate(zip(atoms, ac_valence)):
possible_valence = [x for x in atomic_valence[atomic_num] if x >= valence]
if not possible_valence:
logger.warning(f'Valence of atom {i} is {valence}, which bigger than the allowed max {max(atomic_valence[atomic_num])}. Stopping')
(bond_orders, atomic_valence_electrons_) = (None, None)
valences_list_of_lists.append(possible_valence)
valences_list = itertools.product(*valences_list_of_lists)
best_bo = atom_connectivity.copy()
for valences in valences_list:
(unsaturated_atoms, du_from_ac) = get_ua(valences, ac_valence)
check_len = len(unsaturated_atoms) == 0
if check_len:
check_bo = bo_is_ok(atom_connectivity, atom_connectivity, charge, du_from_ac, atomic_valence_electrons, atoms, valences, allow_charged_fragments=allow_charged_fragments)
else:
check_bo = None
if check_len and check_bo:
(bond_orders, atomic_valence_electrons_) = (atom_connectivity, atomic_valence_electrons)
ua_pairs_list = get_ua_pairs(unsaturated_atoms, atom_connectivity, use_graph=use_graph)
for ua_pairs in ua_pairs_list:
bond_orders = get_bo(atom_connectivity, du_from_ac, valences, ua_pairs, use_graph=use_graph)
status = bo_is_ok(bond_orders, atom_connectivity, charge, du_from_ac, atomic_valence_electrons, atoms, valences, allow_charged_fragments=allow_charged_fragments)
charge_ok = is_charge_ok(bond_orders, charge, atomic_valence_electrons, atoms, allow_charged_fragments=allow_charged_fragments)
if status:
(bond_orders, atomic_valence_electrons_) = (bond_orders, atomic_valence_electrons)
elif bond_orders.sum() >= best_bo.sum() and valences_not_too_large(bond_orders, valences) and charge_ok:
best_bo = bond_orders.copy()
(bond_orders, atomic_valence_electrons_) = (best_bo, atomic_valence_electrons)
if bond_orders is None or atomic_valence_electrons_ is None:
return None
l1 = len(bond_orders)
l2 = len(atoms)
bo_valences = list(bond_orders.sum(axis=1))
if l1 != l2:
logger.warning(f'sizes of adjMat ({l1:d}) and Atoms {l2:d} differ.')
mol = None
rw_mol = Chem.RWMol(mol)
bond_type_dict = {1: Chem.BondType.SINGLE, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE}
for i in range(l1):
for j in range(i + 1, l1):
bond_orders = int(round(bond_orders[i, j]))
if bond_orders == 0:
continue
bt = bond_type_dict.get(bond_orders, Chem.BondType.SINGLE)
rw_mol.AddBond(i, j, bt)
mol = rw_mol.GetMol()
if allow_charged_fragments:
mol = set_atomic_charges(mol, atoms, atomic_valence_electrons_, bo_valences, bond_orders, charge)
else:
mol = set_atomic_radicals(mol, atoms, atomic_valence_electrons_, bo_valences)
mol = mol
if mol is None:
return None
if Chem.GetFormalCharge(mol) != charge:
return []
mols = rdchem.ResonanceMolSupplier(mol, Chem.UNCONSTRAINED_CATIONS, Chem.UNCONSTRAINED_ANIONS)
mols = [mol for mol in mols]
return mols
|
ARC
|
positive
|
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls,)
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i + start) for (i, e) in enumerate(names)]
item = None
for item in names:
if isinstance(item, basestring):
(member_name, member_value) = (item, names[item])
else:
(member_name, member_value) = item
classdict[member_name] = member_value
_order_.append(member_name)
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
<DeepExtract>
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
enum_class.__reduce_ex__ = _break_on_call_reduce
enum_class.__module__ = '<unknown>'
</DeepExtract>
else:
enum_class.__module__ = module
return enum_class
|
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls,)
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
_order_ = []
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i + start) for (i, e) in enumerate(names)]
item = None
for item in names:
if isinstance(item, basestring):
(member_name, member_value) = (item, names[item])
else:
(member_name, member_value) = item
classdict[member_name] = member_value
_order_.append(member_name)
if not isinstance(item, basestring):
classdict['_order_'] = ' '.join(_order_)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
enum_class.__reduce_ex__ = _break_on_call_reduce
enum_class.__module__ = '<unknown>'
else:
enum_class.__module__ = module
return enum_class
|
Deviot
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.