before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def __call__(self, value, serializer_field):
field_name = serializer_field.source_attrs[-1]
instance = getattr(serializer_field.parent, 'instance', None)
queryset = self.queryset
queryset = self.filter_queryset(value, queryset, field_name)
<DeepExtract>
if instance is not None:
queryset = queryset.filter(pk__ne=instance.pk)
queryset = queryset
</DeepExtract>
if queryset.first():
raise ValidationError(self.message.format())
|
def __call__(self, value, serializer_field):
field_name = serializer_field.source_attrs[-1]
instance = getattr(serializer_field.parent, 'instance', None)
queryset = self.queryset
queryset = self.filter_queryset(value, queryset, field_name)
if instance is not None:
queryset = queryset.filter(pk__ne=instance.pk)
queryset = queryset
if queryset.first():
raise ValidationError(self.message.format())
|
django-rest-framework-mongoengine
|
positive
|
def configure_yarn_repo(os_type, arch_type):
comment = 'configure_yarn'
if os_type == OSType.LINUX:
<DeepExtract>
stdout_lines = []
if not comment + ': add key':
comment + ': add key' = inspect.currentframe().f_back.f_code.co_name
if comment + ': add key':
print(' ' * 110, end='\r')
print('\x1b[1m%s\x1b[0m...\n' % comment + ': add key', end='\r')
p = subprocess.Popen('curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in iter(p.stdout.readline, b''):
stdout_lines.append(line.decode('utf-8'))
sys.stdout.write('%s\r' % line.decode('utf-8')[:-1].rstrip())
sys.stdout.flush()
sys.stdout.write('\x1b[2K')
sys.stdout.write('\x1b[1A')
p.communicate()
if p.returncode != 0:
if comment + ': add key':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[93mFAILED\x1b[0m ]\n' % comment + ': add key')
for line in stdout_lines:
sys.stdout.write(f'{line}\n')
raise CalledProcessError(p.returncode, 'curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -')
if comment + ': add key':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[92mOK\x1b[0m ]\n' % comment + ': add key')
return (p.returncode, stdout_lines)
</DeepExtract>
<DeepExtract>
stdout_lines = []
if not comment + ': add repo':
comment + ': add repo' = inspect.currentframe().f_back.f_code.co_name
if comment + ': add repo':
print(' ' * 110, end='\r')
print('\x1b[1m%s\x1b[0m...\n' % comment + ': add repo', end='\r')
p = subprocess.Popen('echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in iter(p.stdout.readline, b''):
stdout_lines.append(line.decode('utf-8'))
sys.stdout.write('%s\r' % line.decode('utf-8')[:-1].rstrip())
sys.stdout.flush()
sys.stdout.write('\x1b[2K')
sys.stdout.write('\x1b[1A')
p.communicate()
if p.returncode != 0:
if comment + ': add repo':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[93mFAILED\x1b[0m ]\n' % comment + ': add repo')
for line in stdout_lines:
sys.stdout.write(f'{line}\n')
raise CalledProcessError(p.returncode, 'echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list')
if comment + ': add repo':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[92mOK\x1b[0m ]\n' % comment + ': add repo')
return (p.returncode, stdout_lines)
</DeepExtract>
|
def configure_yarn_repo(os_type, arch_type):
comment = 'configure_yarn'
if os_type == OSType.LINUX:
stdout_lines = []
if not comment + ': add key':
comment + ': add key' = inspect.currentframe().f_back.f_code.co_name
if comment + ': add key':
print(' ' * 110, end='\r')
print('\x1b[1m%s\x1b[0m...\n' % comment + ': add key', end='\r')
p = subprocess.Popen('curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in iter(p.stdout.readline, b''):
stdout_lines.append(line.decode('utf-8'))
sys.stdout.write('%s\r' % line.decode('utf-8')[:-1].rstrip())
sys.stdout.flush()
sys.stdout.write('\x1b[2K')
sys.stdout.write('\x1b[1A')
p.communicate()
if p.returncode != 0:
if comment + ': add key':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[93mFAILED\x1b[0m ]\n' % comment + ': add key')
for line in stdout_lines:
sys.stdout.write(f'{line}\n')
raise CalledProcessError(p.returncode, 'curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -')
if comment + ': add key':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[92mOK\x1b[0m ]\n' % comment + ': add key')
return (p.returncode, stdout_lines)
stdout_lines = []
if not comment + ': add repo':
comment + ': add repo' = inspect.currentframe().f_back.f_code.co_name
if comment + ': add repo':
print(' ' * 110, end='\r')
print('\x1b[1m%s\x1b[0m...\n' % comment + ': add repo', end='\r')
p = subprocess.Popen('echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
for line in iter(p.stdout.readline, b''):
stdout_lines.append(line.decode('utf-8'))
sys.stdout.write('%s\r' % line.decode('utf-8')[:-1].rstrip())
sys.stdout.flush()
sys.stdout.write('\x1b[2K')
sys.stdout.write('\x1b[1A')
p.communicate()
if p.returncode != 0:
if comment + ': add repo':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[93mFAILED\x1b[0m ]\n' % comment + ': add repo')
for line in stdout_lines:
sys.stdout.write(f'{line}\n')
raise CalledProcessError(p.returncode, 'echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list')
if comment + ': add repo':
sys.stdout.write('\x1b[1m%s\x1b[0m... [ \x1b[92mOK\x1b[0m ]\n' % comment + ': add repo')
return (p.returncode, stdout_lines)
</DeepExtract>
|
aimmo
|
positive
|
def sync_sslcertkey_bindings(self):
log('ModuleExecutor.sync_sslcertkey_bindings()')
bound_lbvserver = None
result = self.fetcher.get('sslvserver_sslcertkey_binding', self.module.params['name'])
if result['nitro_errorcode'] in [258, 461, 1544]:
bound_sslcertkeys = []
elif result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
elif 'sslvserver_sslcertkey_binding' in result['data']:
bound_sslcertkeys = result['data']['sslvserver_sslcertkey_binding']
else:
bound_sslcertkeys = []
configured_sslcertkey = self.module.params.get('ssl_certkey')
found_configured = False
for binding in bound_sslcertkeys:
if binding['certkeyname'] != configured_sslcertkey:
self.module_result['changed'] = True
self.prepared_list.append('Delete ssl_certkey binding %s' % binding['certkeyname'])
if not self.module.check_mode:
<DeepExtract>
log('ModuleExecutor.delete_sslcertkey_binding()')
args = {'certkeyname': binding['certkeyname']}
result = self.fetcher.delete(resource='sslvserver_sslcertkey_binding', id=self.module.params['name'], args=args)
</DeepExtract>
else:
found_configured = True
if configured_sslcertkey is not None and (not found_configured):
self.module_result['changed'] = True
self.prepared_list.append('Add ssl_certkey binding %s' % configured_sslcertkey)
if not self.module.check_mode:
<DeepExtract>
log('ModuleExecutor.add_sslcertkey_binding()')
put_data = {'sslvserver_sslcertkey_binding': {'vservername': self.module.params['name'], 'certkeyname': configured_sslcertkey}}
log('put data %s' % put_data)
result = self.fetcher.put(put_data=put_data, resource='sslvserver_sslcertkey_binding')
log('result of put: %s' % result)
if result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
</DeepExtract>
pass
|
def sync_sslcertkey_bindings(self):
log('ModuleExecutor.sync_sslcertkey_bindings()')
bound_lbvserver = None
result = self.fetcher.get('sslvserver_sslcertkey_binding', self.module.params['name'])
if result['nitro_errorcode'] in [258, 461, 1544]:
bound_sslcertkeys = []
elif result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
elif 'sslvserver_sslcertkey_binding' in result['data']:
bound_sslcertkeys = result['data']['sslvserver_sslcertkey_binding']
else:
bound_sslcertkeys = []
configured_sslcertkey = self.module.params.get('ssl_certkey')
found_configured = False
for binding in bound_sslcertkeys:
if binding['certkeyname'] != configured_sslcertkey:
self.module_result['changed'] = True
self.prepared_list.append('Delete ssl_certkey binding %s' % binding['certkeyname'])
if not self.module.check_mode:
log('ModuleExecutor.delete_sslcertkey_binding()')
args = {'certkeyname': binding['certkeyname']}
result = self.fetcher.delete(resource='sslvserver_sslcertkey_binding', id=self.module.params['name'], args=args)
else:
found_configured = True
if configured_sslcertkey is not None and (not found_configured):
self.module_result['changed'] = True
self.prepared_list.append('Add ssl_certkey binding %s' % configured_sslcertkey)
if not self.module.check_mode:
log('ModuleExecutor.add_sslcertkey_binding()')
put_data = {'sslvserver_sslcertkey_binding': {'vservername': self.module.params['name'], 'certkeyname': configured_sslcertkey}}
log('put data %s' % put_data)
result = self.fetcher.put(put_data=put_data, resource='sslvserver_sslcertkey_binding')
log('result of put: %s' % result)
if result['nitro_errorcode'] != 0:
raise NitroException(errorcode=result['nitro_errorcode'], message=result.get('nitro_message'), severity=result.get('nitro_severity'))
pass
|
citrix-adc-ansible-modules
|
positive
|
def forward(self, prev_output_tokens, encoder_out_dict):
encoder_out = encoder_out_dict['encoder']['encoder_out']
trained_encoder_out = encoder_out_dict['pretrained'] if self.pretrained else None
<DeepExtract>
(encoder_a, encoder_b) = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
(encoder_a, encoder_b) = result
</DeepExtract>
positions = self.embed_positions(prev_output_tokens)
x = self.embed_tokens(prev_output_tokens) + positions
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
x = self.fc1(x)
x = x.transpose(0, 1)
avg_attn_scores = None
for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
r = x
(x, attn_scores) = attention(attproj(x) + target_embedding, encoder_a, encoder_b)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
x = x.transpose(0, 1)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if not self.pretrained:
x = self.fc3(x)
if self.pretrained:
(trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs['out']], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs['out']
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return (fusion_output, avg_attn_scores)
else:
return (x, avg_attn_scores)
|
def forward(self, prev_output_tokens, encoder_out_dict):
encoder_out = encoder_out_dict['encoder']['encoder_out']
trained_encoder_out = encoder_out_dict['pretrained'] if self.pretrained else None
(encoder_a, encoder_b) = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
(encoder_a, encoder_b) = result
positions = self.embed_positions(prev_output_tokens)
x = self.embed_tokens(prev_output_tokens) + positions
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
x = self.fc1(x)
x = x.transpose(0, 1)
avg_attn_scores = None
for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
r = x
(x, attn_scores) = attention(attproj(x) + target_embedding, encoder_a, encoder_b)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
x = x.transpose(0, 1)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if not self.pretrained:
x = self.fc3(x)
if self.pretrained:
(trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs['out']], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs['out']
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return (fusion_output, avg_attn_scores)
else:
return (x, avg_attn_scores)
|
Abstractive-Text-Summarization
|
positive
|
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
<DeepExtract>
u.scheme = u.scheme or 'http'
u.port = u.port or port_by_scheme.get(u.scheme, 80)
pool_key = (u.scheme, u.host, u.port)
with self.pools.lock:
pool = self.pools.get(pool_key)
if pool:
conn = pool
pool = self._new_pool(u.scheme, u.host, u.port)
self.pools[pool_key] = pool
conn = pool
</DeepExtract>
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == 'http':
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
method = 'GET'
log.info('Redirecting %s -> %s' % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
|
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
u.scheme = u.scheme or 'http'
u.port = u.port or port_by_scheme.get(u.scheme, 80)
pool_key = (u.scheme, u.host, u.port)
with self.pools.lock:
pool = self.pools.get(pool_key)
if pool:
conn = pool
pool = self._new_pool(u.scheme, u.host, u.port)
self.pools[pool_key] = pool
conn = pool
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == 'http':
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
redirect_location = urljoin(url, redirect_location)
if response.status == 303:
method = 'GET'
log.info('Redirecting %s -> %s' % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
|
acousticbrainz-client
|
positive
|
def get_updates(self, params, constraints, loss):
<DeepExtract>
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [T.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
grads = grads
</DeepExtract>
accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, c) in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2
self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
self.updates.append((p, c(new_p)))
return self.updates
|
def get_updates(self, params, constraints, loss):
grads = T.grad(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [T.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
grads = grads
accumulators = [shared_zeros(p.get_value().shape) for p in params]
self.updates = []
for (p, g, a, c) in zip(params, grads, accumulators, constraints):
new_a = a + g ** 2
self.updates.append((a, new_a))
new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
self.updates.append((p, c(new_p)))
return self.updates
|
deep-coref
|
positive
|
def test_load_config_passed(self):
name = 'setup_two_arguments'
<DeepExtract>
plugin_manager = plugin_manager or self.plugin_manager
failed_plugins = plugin_manager.load(name)
if not isinstance(name, list):
name = [name]
assert failed_plugins == []
assert len(list(plugin_manager.plugins.keys())) == len(name)
for name in name:
class_ = 'Test'
name_pieces = name.split('_')
for name_piece in name_pieces:
class_ += name_piece[0].upper() + name_piece[1:].lower()
class_ += 'Plugin'
assert name in list(plugin_manager.plugins.keys())
assert plugin_manager.plugins[name]['name'] == name
assert isinstance(plugin_manager.plugins[name]['instance'], object)
if assert_commands_is_empty:
assert plugin_manager.plugins[name]['commands'] == []
if assert_callbacks_is_empty:
assert plugin_manager.plugins[name]['callbacks'] == []
assert plugin_manager.plugins[name]['callback_ids'] == {}
if False:
assert plugin_manager.plugins[name]['config'] is None
if assert_blacklist_is_empty:
assert plugin_manager.plugins[name]['blacklist'] == []
</DeepExtract>
assert self.plugin_manager.plugins[name]['instance'].cardinal is self.cardinal
assert self.plugin_manager.plugins[name]['instance'].config == {'test': True}
|
def test_load_config_passed(self):
name = 'setup_two_arguments'
plugin_manager = plugin_manager or self.plugin_manager
failed_plugins = plugin_manager.load(name)
if not isinstance(name, list):
name = [name]
assert failed_plugins == []
assert len(list(plugin_manager.plugins.keys())) == len(name)
for name in name:
class_ = 'Test'
name_pieces = name.split('_')
for name_piece in name_pieces:
class_ += name_piece[0].upper() + name_piece[1:].lower()
class_ += 'Plugin'
assert name in list(plugin_manager.plugins.keys())
assert plugin_manager.plugins[name]['name'] == name
assert isinstance(plugin_manager.plugins[name]['instance'], object)
if assert_commands_is_empty:
assert plugin_manager.plugins[name]['commands'] == []
if assert_callbacks_is_empty:
assert plugin_manager.plugins[name]['callbacks'] == []
assert plugin_manager.plugins[name]['callback_ids'] == {}
if False:
assert plugin_manager.plugins[name]['config'] is None
if assert_blacklist_is_empty:
assert plugin_manager.plugins[name]['blacklist'] == []
assert self.plugin_manager.plugins[name]['instance'].cardinal is self.cardinal
assert self.plugin_manager.plugins[name]['instance'].config == {'test': True}
|
Cardinal
|
positive
|
def test_execute_delete_control_id(tmp_path: pathlib.Path) -> None:
"""Test execute delete control id."""
<DeepExtract>
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
</DeepExtract>
section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json'
<DeepExtract>
rows = []
csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv')
with open(csv_path, 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
rows.append(row)
rows = rows
</DeepExtract>
assert rows[2][10] == 'sc-7_smt.a sc-7_smt.b sc-7.3 sc-7.4_smt.a sc-7.5 ia-3'
rows[2][10] = rows[2][10].replace(' ia-3', '')
with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader:
mock_csv_reader.return_value = rows
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.SUCCESS
fp = pathlib.Path(tmp_path) / 'component-definition.json'
cd = ComponentDefinition.oscal_read(fp)
component = cd.components[0]
control_implementation = component.control_implementations[0]
implemented_requirements = control_implementation.implemented_requirements
assert len(implemented_requirements) == 20
assert implemented_requirements[3].control_id == 'sc-7.5'
assert implemented_requirements[4].control_id == 'ac-6'
|
def test_execute_delete_control_id(tmp_path: pathlib.Path) -> None:
"""Test execute delete control id."""
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json'
rows = []
csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv')
with open(csv_path, 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
rows.append(row)
rows = rows
assert rows[2][10] == 'sc-7_smt.a sc-7_smt.b sc-7.3 sc-7.4_smt.a sc-7.5 ia-3'
rows[2][10] = rows[2][10].replace(' ia-3', '')
with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader:
mock_csv_reader.return_value = rows
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.SUCCESS
fp = pathlib.Path(tmp_path) / 'component-definition.json'
cd = ComponentDefinition.oscal_read(fp)
component = cd.components[0]
control_implementation = component.control_implementations[0]
implemented_requirements = control_implementation.implemented_requirements
assert len(implemented_requirements) == 20
assert implemented_requirements[3].control_id == 'sc-7.5'
assert implemented_requirements[4].control_id == 'ac-6'
|
compliance-trestle
|
positive
|
def setup_device(self, rack):
""" Set up macros and rack chain params """
MacrobatParameterRackTemplate.setup_device(self, rack)
<DeepExtract>
rack_chains = {}
if self._track and self._track.devices:
for d in self._track.devices:
if d.class_name.endswith('GroupDevice') and (not d.class_name.startswith('Midi')):
for chain_index in range(len(d.chains)):
c = d.chains[chain_index]
rack_chains[str(chain_index + 1)] = {'VOL': c.mixer_device.volume, 'PAN': c.mixer_device.panning, 'MUTE': c.mixer_device.chain_activator}
break
self._rack = rack_chains
</DeepExtract>
if self._rack:
param_name = self._parent.get_name(rack.name[12:].strip())
for index in range(1, 9):
chain_to_edit = {}
macro = rack.parameters[index]
param = None
if macro.is_enabled:
chain_name = self._parent.get_name(macro.name)
if self._rack.__contains__(chain_name):
chain_to_edit = self._rack[chain_name]
if chain_to_edit.__contains__(param_name):
param = chain_to_edit[param_name]
if param and param.is_enabled:
m_listener = lambda index=index: self.macro_changed(index)
macro.add_value_listener(m_listener)
p_listener = lambda index=index: self.param_changed(index)
param.add_value_listener(p_listener)
self._param_macros[index] = (macro, param)
if IS_LIVE_9:
self._tasks.add(self.get_initial_value)
else:
self._get_initial_value = True
|
def setup_device(self, rack):
""" Set up macros and rack chain params """
MacrobatParameterRackTemplate.setup_device(self, rack)
rack_chains = {}
if self._track and self._track.devices:
for d in self._track.devices:
if d.class_name.endswith('GroupDevice') and (not d.class_name.startswith('Midi')):
for chain_index in range(len(d.chains)):
c = d.chains[chain_index]
rack_chains[str(chain_index + 1)] = {'VOL': c.mixer_device.volume, 'PAN': c.mixer_device.panning, 'MUTE': c.mixer_device.chain_activator}
break
self._rack = rack_chains
if self._rack:
param_name = self._parent.get_name(rack.name[12:].strip())
for index in range(1, 9):
chain_to_edit = {}
macro = rack.parameters[index]
param = None
if macro.is_enabled:
chain_name = self._parent.get_name(macro.name)
if self._rack.__contains__(chain_name):
chain_to_edit = self._rack[chain_name]
if chain_to_edit.__contains__(param_name):
param = chain_to_edit[param_name]
if param and param.is_enabled:
m_listener = lambda index=index: self.macro_changed(index)
macro.add_value_listener(m_listener)
p_listener = lambda index=index: self.param_changed(index)
param.add_value_listener(p_listener)
self._param_macros[index] = (macro, param)
if IS_LIVE_9:
self._tasks.add(self.get_initial_value)
else:
self._get_initial_value = True
|
clyphx-live10
|
positive
|
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Sobol' set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Sobol' sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
sample : array_like (n_samples, dim)
Sobol' set.
"""
total_n_samples = self.num_generated + n_samples
if not total_n_samples & total_n_samples - 1 == 0:
warnings.warn("The balance properties of Sobol' points require n to be a power of 2. {0} points have been previously generated, then: n={0}+{1}={2}. ".format(self.num_generated, n_samples, total_n_samples))
if self.skip != 0 and total_n_samples > self.skip:
raise ValueError(f'{self.skip} points have been skipped: generating {n_samples} more points would cause the sequence to repeat.')
rng = check_random_state(random_state)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer('normalize')
r = np.full((n_samples, n_dim), np.nan)
seed = self.skip
for j in range(n_samples):
<DeepExtract>
if n_dim != self.dim_num_save:
self.init(n_dim)
seed = int(np.floor(seed))
if seed < 0:
seed = 0
pos_lo0 = 1
if seed == 0:
self.lastq = np.zeros(n_dim)
elif seed == self.seed_save + 1:
pos_lo0 = _bit_lo0(seed)
elif seed <= self.seed_save:
self.seed_save = 0
self.lastq = np.zeros(n_dim)
for seed_temp in range(int(self.seed_save), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, n_dim + 1):
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
pos_lo0 = _bit_lo0(seed)
elif self.seed_save + 1 < seed:
for seed_temp in range(int(self.seed_save + 1), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, n_dim + 1):
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
pos_lo0 = _bit_lo0(seed)
if self.maxcol < pos_lo0:
raise ValueError(f'I4_SOBOL - Fatal error!\n Too many calls!\n MAXCOL = {self.maxcol}\n L = {pos_lo0}\n')
quasi = np.zeros(n_dim)
for i in range(1, n_dim + 1):
quasi[i - 1] = self.lastq[i - 1] * self.recipd
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
self.seed_save = seed
seed += 1
(r[j, 0:n_dim], seed) = [quasi, seed]
</DeepExtract>
if self.randomize:
<DeepExtract>
rng = check_random_state(rng)
shift = np.repeat(rng.rand(1, r.shape[1]), r.shape[0], axis=0)
r = (r + shift) % 1
</DeepExtract>
r = space.inverse_transform(r)
space.set_transformer(transformer)
self.num_generated += n_samples
return r
|
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Sobol' set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Sobol' sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
sample : array_like (n_samples, dim)
Sobol' set.
"""
total_n_samples = self.num_generated + n_samples
if not total_n_samples & total_n_samples - 1 == 0:
warnings.warn("The balance properties of Sobol' points require n to be a power of 2. {0} points have been previously generated, then: n={0}+{1}={2}. ".format(self.num_generated, n_samples, total_n_samples))
if self.skip != 0 and total_n_samples > self.skip:
raise ValueError(f'{self.skip} points have been skipped: generating {n_samples} more points would cause the sequence to repeat.')
rng = check_random_state(random_state)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer('normalize')
r = np.full((n_samples, n_dim), np.nan)
seed = self.skip
for j in range(n_samples):
if n_dim != self.dim_num_save:
self.init(n_dim)
seed = int(np.floor(seed))
if seed < 0:
seed = 0
pos_lo0 = 1
if seed == 0:
self.lastq = np.zeros(n_dim)
elif seed == self.seed_save + 1:
pos_lo0 = _bit_lo0(seed)
elif seed <= self.seed_save:
self.seed_save = 0
self.lastq = np.zeros(n_dim)
for seed_temp in range(int(self.seed_save), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, n_dim + 1):
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
pos_lo0 = _bit_lo0(seed)
elif self.seed_save + 1 < seed:
for seed_temp in range(int(self.seed_save + 1), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, n_dim + 1):
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
pos_lo0 = _bit_lo0(seed)
if self.maxcol < pos_lo0:
raise ValueError(f'I4_SOBOL - Fatal error!\n Too many calls!\n MAXCOL = {self.maxcol}\n L = {pos_lo0}\n')
quasi = np.zeros(n_dim)
for i in range(1, n_dim + 1):
quasi[i - 1] = self.lastq[i - 1] * self.recipd
self.lastq[i - 1] = np.bitwise_xor(int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1]))
self.seed_save = seed
seed += 1
(r[j, 0:n_dim], seed) = [quasi, seed]
if self.randomize:
rng = check_random_state(rng)
shift = np.repeat(rng.rand(1, r.shape[1]), r.shape[0], axis=0)
r = (r + shift) % 1
r = space.inverse_transform(r)
space.set_transformer(transformer)
self.num_generated += n_samples
return r
|
deephyper
|
positive
|
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Reading from input files ***')
for input_file in input_files:
tf.logging.info(' %s', input_file)
rng = random.Random(FLAGS.random_seed)
<DeepExtract>
all_documents = [[]]
for input_file in input_files:
with tf.gfile.GFile(input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(FLAGS.dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
instances = instances
</DeepExtract>
output_files = FLAGS.output_file.split(',')
tf.logging.info('*** Writing to output files ***')
for output_file in output_files:
tf.logging.info(' %s', output_file)
<DeepExtract>
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= FLAGS.max_seq_length
while len(input_ids) < FLAGS.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == FLAGS.max_seq_length
assert len(input_mask) == FLAGS.max_seq_length
assert len(segment_ids) == FLAGS.max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < FLAGS.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(input_mask)
features['segment_ids'] = create_int_feature(segment_ids)
features['masked_lm_positions'] = create_int_feature(masked_lm_positions)
features['masked_lm_ids'] = create_int_feature(masked_lm_ids)
features['masked_lm_weights'] = create_float_feature(masked_lm_weights)
features['next_sentence_labels'] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
</DeepExtract>
|
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Reading from input files ***')
for input_file in input_files:
tf.logging.info(' %s', input_file)
rng = random.Random(FLAGS.random_seed)
all_documents = [[]]
for input_file in input_files:
with tf.gfile.GFile(input_file, 'r') as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(FLAGS.dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(create_instances_from_document(all_documents, document_index, FLAGS.max_seq_length, FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
instances = instances
output_files = FLAGS.output_file.split(',')
tf.logging.info('*** Writing to output files ***')
for output_file in output_files:
tf.logging.info(' %s', output_file)
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= FLAGS.max_seq_length
while len(input_ids) < FLAGS.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == FLAGS.max_seq_length
assert len(input_mask) == FLAGS.max_seq_length
assert len(segment_ids) == FLAGS.max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < FLAGS.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(input_mask)
features['segment_ids'] = create_int_feature(segment_ids)
features['masked_lm_positions'] = create_int_feature(masked_lm_positions)
features['masked_lm_ids'] = create_int_feature(masked_lm_ids)
features['masked_lm_weights'] = create_float_feature(masked_lm_weights)
features['next_sentence_labels'] = create_int_feature([next_sentence_label])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = (writer_index + 1) % len(writers)
total_written += 1
if inst_index < 20:
tf.logging.info('*** Example ***')
tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
</DeepExtract>
|
ChineseEHRBert
|
positive
|
def test_find(self):
hunter = StringHunter(self.data, self.addr)
with self.subTest('Any string @ 0'):
result = hunter.find(None, min_len=5, max_len=35)
<DeepExtract>
exp_off = self.expected[0][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[0][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Any string @ 297'):
result = hunter.find(None, min_len=3, max_len=49, start=297)
<DeepExtract>
exp_off = self.expected[3][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[3][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Any string @ 332 w/ no bounds'):
result = hunter.find(None, start=332)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
result = hunter.find(None, start=332, min_len=-1)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
result = hunter.find(None, start=332, max_len=-1)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
result = hunter.find(None, start=332, min_len=-1, max_len=-1)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Any string @ 332 w/ bounds: (min=48, max=100)'):
result = hunter.find(None, min_len=48, max_len=100, start=332)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Any string @ 332 w/ bounds (min=48, max=48)'):
result = hunter.find(None, min_len=48, max_len=48, start=332)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Overconstrained - no results'):
with self.assertRaises(HunterResultNotFound):
result = hunter.find(None, min_len=49, start=332)
<DeepExtract>
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Custom pattern, str'):
result = hunter.find('[a-zA-Z ]+world!?')
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Custom pattern, bytes'):
result = hunter.find(b'([a-zA-Z ]+world!?|0123456789)')
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
result = hunter.find(b'([a-zA-Z ]+world!?|0123456789)', start=300)
<DeepExtract>
exp_off = self.expected[-1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[-1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Start offset=100'):
hunter = StringHunter(self.data, self.addr, start_offset=100)
result = hunter.find(None)
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Start offset=100, End offset=145'):
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=145)
result = hunter.find(None)
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=145)
result = hunter.find(None, min_len=29, max_len=29)
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
with self.subTest('Start offset=100, End offset=144'):
with self.assertRaises(HunterResultNotFound):
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=144)
result = hunter.find(None)
<DeepExtract>
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
|
def test_find(self):
hunter = StringHunter(self.data, self.addr)
with self.subTest('Any string @ 0'):
result = hunter.find(None, min_len=5, max_len=35)
exp_off = self.expected[0][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[0][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Any string @ 297'):
result = hunter.find(None, min_len=3, max_len=49, start=297)
exp_off = self.expected[3][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[3][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Any string @ 332 w/ no bounds'):
result = hunter.find(None, start=332)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
result = hunter.find(None, start=332, min_len=-1)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
result = hunter.find(None, start=332, max_len=-1)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
result = hunter.find(None, start=332, min_len=-1, max_len=-1)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Any string @ 332 w/ bounds: (min=48, max=100)'):
result = hunter.find(None, min_len=48, max_len=100, start=332)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Any string @ 332 w/ bounds (min=48, max=48)'):
result = hunter.find(None, min_len=48, max_len=48, start=332)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Overconstrained - no results'):
with self.assertRaises(HunterResultNotFound):
result = hunter.find(None, min_len=49, start=332)
exp_off = self.expected[4][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[4][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Custom pattern, str'):
result = hunter.find('[a-zA-Z ]+world!?')
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Custom pattern, bytes'):
result = hunter.find(b'([a-zA-Z ]+world!?|0123456789)')
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
result = hunter.find(b'([a-zA-Z ]+world!?|0123456789)', start=300)
exp_off = self.expected[-1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[-1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Start offset=100'):
hunter = StringHunter(self.data, self.addr, start_offset=100)
result = hunter.find(None)
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Start offset=100, End offset=145'):
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=145)
result = hunter.find(None)
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=145)
result = hunter.find(None, min_len=29, max_len=29)
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
with self.subTest('Start offset=100, End offset=144'):
with self.assertRaises(HunterResultNotFound):
hunter = StringHunter(self.data, self.addr, start_offset=100, end_offset=144)
result = hunter.find(None)
exp_off = self.expected[1][0]
exp_addr = self.addr + exp_off
exp_len = len(self.expected[1][1])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result['src_off'], exp_off)
self.assertEqual(result['src_addr'], exp_addr)
self.assertEqual(result['src_size'], exp_len)
</DeepExtract>
|
depthcharge
|
positive
|
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
<DeepExtract>
signs_differ = (x < 0) != (matrix[..., 2, 1] - matrix[..., 1, 2] < 0)
o1 = torch.where(signs_differ, -x, x)
</DeepExtract>
<DeepExtract>
signs_differ = (y < 0) != (matrix[..., 0, 2] - matrix[..., 2, 0] < 0)
o2 = torch.where(signs_differ, -y, y)
</DeepExtract>
<DeepExtract>
signs_differ = (z < 0) != (matrix[..., 1, 0] - matrix[..., 0, 1] < 0)
o3 = torch.where(signs_differ, -z, z)
</DeepExtract>
return torch.stack((o0, o1, o2, o3), -1)
|
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f'Invalid rotation matrix shape f{matrix.shape}.')
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
signs_differ = (x < 0) != (matrix[..., 2, 1] - matrix[..., 1, 2] < 0)
o1 = torch.where(signs_differ, -x, x)
signs_differ = (y < 0) != (matrix[..., 0, 2] - matrix[..., 2, 0] < 0)
o2 = torch.where(signs_differ, -y, y)
signs_differ = (z < 0) != (matrix[..., 1, 0] - matrix[..., 0, 1] < 0)
o3 = torch.where(signs_differ, -z, z)
return torch.stack((o0, o1, o2, o3), -1)
|
dro-sfm
|
positive
|
def calc_portfolio_vector(self, ticker=None):
"""
Calculate portfolio position vector
:return:
"""
portfolio = np.empty(len(self.symbols), dtype=np.dtype(Decimal))
<DeepExtract>
portval = dec_zero
for symbol in self._crypto:
portval = self.get_crypto(symbol).fma(self.get_open_price(symbol, ticker), portval)
portval = dec_con.add(self.fiat, portval)
portval = portval
</DeepExtract>
if not ticker:
ticker = self.tapi.returnTicker()
<DeepExtract>
try:
balance = self.tapi.returnBalances()
filtered_balance = {}
for symbol in self.symbols:
filtered_balance[symbol] = convert_to.decimal(balance[symbol])
balance = filtered_balance
except Exception as e:
try:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e, balance))
except Exception:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e))
raise e
</DeepExtract>
for (i, pair) in enumerate(self.pairs):
portfolio[i] = safe_div(dec_con.multiply(balance[pair.split('_')[1]], convert_to.decimal(ticker[pair]['last'])), portval)
portfolio[-1] = safe_div(balance[self._fiat], portval)
return convert_to.decimal(portfolio)
|
def calc_portfolio_vector(self, ticker=None):
"""
Calculate portfolio position vector
:return:
"""
portfolio = np.empty(len(self.symbols), dtype=np.dtype(Decimal))
portval = dec_zero
for symbol in self._crypto:
portval = self.get_crypto(symbol).fma(self.get_open_price(symbol, ticker), portval)
portval = dec_con.add(self.fiat, portval)
portval = portval
if not ticker:
ticker = self.tapi.returnTicker()
try:
balance = self.tapi.returnBalances()
filtered_balance = {}
for symbol in self.symbols:
filtered_balance[symbol] = convert_to.decimal(balance[symbol])
balance = filtered_balance
except Exception as e:
try:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e, balance))
except Exception:
Logger.error(LiveTradingEnvironment.get_balance, self.parse_error(e))
raise e
for (i, pair) in enumerate(self.pairs):
portfolio[i] = safe_div(dec_con.multiply(balance[pair.split('_')[1]], convert_to.decimal(ticker[pair]['last'])), portval)
portfolio[-1] = safe_div(balance[self._fiat], portval)
return convert_to.decimal(portfolio)
|
cryptotrader
|
positive
|
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [resnet_v2.resnet_v2_block('block1', base_depth=1, num_units=2, stride=2), resnet_v2.resnet_v2_block('block2', base_depth=2, num_units=2, stride=1)]
<DeepExtract>
if None in [2, 32, 16, 3]:
inputs = tf.placeholder(tf.float32, (2, 32, 16, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(32), [32, 1]) + np.reshape(np.arange(16), [1, 16]), [1, 32, 16, 1]), [2, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
with tf.variable_scope('tiny', values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
(_, end_points) = (net, end_points)
</DeepExtract>
expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points)
|
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [resnet_v2.resnet_v2_block('block1', base_depth=1, num_units=2, stride=2), resnet_v2.resnet_v2_block('block2', base_depth=2, num_units=2, stride=1)]
if None in [2, 32, 16, 3]:
inputs = tf.placeholder(tf.float32, (2, 32, 16, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(32), [32, 1]) + np.reshape(np.arange(16), [1, 16]), [1, 32, 16, 1]), [2, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.variable_scope('tiny', values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
(_, end_points) = (net, end_points)
expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points)
|
caad_18
|
positive
|
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
<DeepExtract>
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
(w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr)
</DeepExtract>
ws = w * scales
hs = h * scales
<DeepExtract>
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))
anchors = anchors
</DeepExtract>
return anchors
|
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
(w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr)
ws = w * scales
hs = h * scales
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))
anchors = anchors
return anchors
|
DRN
|
positive
|
def get_cocos2dx_path(self):
cocos2d_x = self._cp.get('paths', 'cocos2d_x')
<DeepExtract>
if len(cocos2d_x) == 0:
cocos2d_x = None
cocos2d_x = os.path.expanduser(cocos2d_x)
cocos2d_x = os.path.abspath(os.path.join(self.cocos2d_path, cocos2d_x))
if not os.path.isdir(cocos2d_x):
Logging.warning(MultiLanguage.get_string('COCOS_WARNING_INVALID_DIR_IN_INI_FMT', cocos2d_x))
cocos2d_x = None
cocos2d_x = cocos2d_x
</DeepExtract>
return cocos2d_x
|
def get_cocos2dx_path(self):
cocos2d_x = self._cp.get('paths', 'cocos2d_x')
if len(cocos2d_x) == 0:
cocos2d_x = None
cocos2d_x = os.path.expanduser(cocos2d_x)
cocos2d_x = os.path.abspath(os.path.join(self.cocos2d_path, cocos2d_x))
if not os.path.isdir(cocos2d_x):
Logging.warning(MultiLanguage.get_string('COCOS_WARNING_INVALID_DIR_IN_INI_FMT', cocos2d_x))
cocos2d_x = None
cocos2d_x = cocos2d_x
return cocos2d_x
|
cocos2d-console
|
positive
|
def adjusted_mutual_information(first_partition: object, second_partition: object) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. Vinh, N. X., Epps, J., & Bailey, J. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
<DeepExtract>
nodes_first = {node: None for community in first_partition.communities for node in community}
nodes_second = {node: None for community in second_partition.communities for node in community}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError('Both partitions should cover the same node set')
</DeepExtract>
<DeepExtract>
if first_partition.overlap or second_partition.overlap:
raise ValueError('Not defined for overlapping partitions')
</DeepExtract>
first_partition_c = [x[1] for x in sorted([(node, nid) for (nid, cluster) in enumerate(first_partition.communities) for node in cluster], key=lambda x: x[0])]
second_partition_c = [x[1] for x in sorted([(node, nid) for (nid, cluster) in enumerate(second_partition.communities) for node in cluster], key=lambda x: x[0])]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(score=adjusted_mutual_info_score(first_partition_c, second_partition_c))
|
def adjusted_mutual_information(first_partition: object, second_partition: object) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. Vinh, N. X., Epps, J., & Bailey, J. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
nodes_first = {node: None for community in first_partition.communities for node in community}
nodes_second = {node: None for community in second_partition.communities for node in community}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError('Both partitions should cover the same node set')
if first_partition.overlap or second_partition.overlap:
raise ValueError('Not defined for overlapping partitions')
first_partition_c = [x[1] for x in sorted([(node, nid) for (nid, cluster) in enumerate(first_partition.communities) for node in cluster], key=lambda x: x[0])]
second_partition_c = [x[1] for x in sorted([(node, nid) for (nid, cluster) in enumerate(second_partition.communities) for node in cluster], key=lambda x: x[0])]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(score=adjusted_mutual_info_score(first_partition_c, second_partition_c))
|
cdlib
|
positive
|
def play(self) -> None:
"""Plays the first player in the queue."""
if self.first is not None:
self._display.modified_episodes.append(self.first.episode)
progress = self.first.episode.progress
if progress is None or progress == 0:
self.first.play()
else:
<DeepExtract>
progress = self.first.episode.progress
if progress is not None and progress != 0:
resume_point = self.first.episode.progress / constants.MILLISECONDS_IN_SECOND
if self.first.state == 0:
resume_point -= self._resume_rewind
self.first.play_from(resume_point)
</DeepExtract>
self.first.set_volume(self.volume)
self.first.set_rate(self.speed)
|
def play(self) -> None:
"""Plays the first player in the queue."""
if self.first is not None:
self._display.modified_episodes.append(self.first.episode)
progress = self.first.episode.progress
if progress is None or progress == 0:
self.first.play()
else:
progress = self.first.episode.progress
if progress is not None and progress != 0:
resume_point = self.first.episode.progress / constants.MILLISECONDS_IN_SECOND
if self.first.state == 0:
resume_point -= self._resume_rewind
self.first.play_from(resume_point)
self.first.set_volume(self.volume)
self.first.set_rate(self.speed)
|
castero
|
positive
|
def __init__(self, in_chans, n_classes, input_window_samples, final_conv_length, n_filters_time=25, n_filters_spat=25, filter_time_length=10, pool_time_length=3, pool_time_stride=3, n_filters_2=50, filter_length_2=10, n_filters_3=100, filter_length_3=10, n_filters_4=200, filter_length_4=10, first_conv_nonlin=elu, first_pool_mode='max', first_pool_nonlin=identity, later_conv_nonlin=elu, later_pool_mode='max', later_pool_nonlin=identity, drop_prob=0.5, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, stride_before_pool=False):
super().__init__()
if final_conv_length == 'auto':
assert input_window_samples is not None
self.in_chans = in_chans
self.n_classes = n_classes
self.input_window_samples = input_window_samples
self.final_conv_length = final_conv_length
self.n_filters_time = n_filters_time
self.n_filters_spat = n_filters_spat
self.filter_time_length = filter_time_length
self.pool_time_length = pool_time_length
self.pool_time_stride = pool_time_stride
self.n_filters_2 = n_filters_2
self.filter_length_2 = filter_length_2
self.n_filters_3 = n_filters_3
self.filter_length_3 = filter_length_3
self.n_filters_4 = n_filters_4
self.filter_length_4 = filter_length_4
self.first_nonlin = first_conv_nonlin
self.first_pool_mode = first_pool_mode
self.first_pool_nonlin = first_pool_nonlin
self.later_conv_nonlin = later_conv_nonlin
self.later_pool_mode = later_pool_mode
self.later_pool_nonlin = later_pool_nonlin
self.drop_prob = drop_prob
self.split_first_layer = split_first_layer
self.batch_norm = batch_norm
self.batch_norm_alpha = batch_norm_alpha
self.stride_before_pool = stride_before_pool
if self.stride_before_pool:
conv_stride = self.pool_time_stride
pool_stride = 1
else:
conv_stride = 1
pool_stride = self.pool_time_stride
self.add_module('ensuredims', Ensure4d())
pool_class_dict = dict(max=nn.MaxPool2d, mean=AvgPool2dWithConv)
first_pool_class = pool_class_dict[self.first_pool_mode]
later_pool_class = pool_class_dict[self.later_pool_mode]
if self.split_first_layer:
self.add_module('dimshuffle', Expression(transpose_time_to_spat))
self.add_module('conv_time', nn.Conv2d(1, self.n_filters_time, (self.filter_time_length, 1), stride=1))
self.add_module('conv_spat', nn.Conv2d(self.n_filters_time, self.n_filters_spat, (1, self.in_chans), stride=(conv_stride, 1), bias=not self.batch_norm))
n_filters_conv = self.n_filters_spat
else:
self.add_module('conv_time', nn.Conv2d(self.in_chans, self.n_filters_time, (self.filter_time_length, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
n_filters_conv = self.n_filters_time
if self.batch_norm:
self.add_module('bnorm', nn.BatchNorm2d(n_filters_conv, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('conv_nonlin', Expression(self.first_nonlin))
self.add_module('pool', first_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin', Expression(self.first_pool_nonlin))
def add_conv_pool_block(model, n_filters_before, n_filters, filter_length, block_nr):
suffix = '_{:d}'.format(block_nr)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(n_filters_before, n_filters, (filter_length, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(n_filters, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
<DeepExtract>
suffix = '_{:d}'.format(2)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(n_filters_conv, self.n_filters_2, (self.filter_length_2, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_2, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
</DeepExtract>
<DeepExtract>
suffix = '_{:d}'.format(3)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(self.n_filters_2, self.n_filters_3, (self.filter_length_3, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_3, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
</DeepExtract>
<DeepExtract>
suffix = '_{:d}'.format(4)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(self.n_filters_3, self.n_filters_4, (self.filter_length_4, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_4, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
</DeepExtract>
self.eval()
if self.final_conv_length == 'auto':
out = self(np_to_th(np.ones((1, self.in_chans, self.input_window_samples, 1), dtype=np.float32)))
n_out_time = out.cpu().data.numpy().shape[2]
self.final_conv_length = n_out_time
self.add_module('conv_classifier', nn.Conv2d(self.n_filters_4, self.n_classes, (self.final_conv_length, 1), bias=True))
self.add_module('softmax', nn.LogSoftmax(dim=1))
self.add_module('squeeze', Expression(squeeze_final_output))
init.xavier_uniform_(self.conv_time.weight, gain=1)
if self.split_first_layer or not self.batch_norm:
init.constant_(self.conv_time.bias, 0)
if self.split_first_layer:
init.xavier_uniform_(self.conv_spat.weight, gain=1)
if not self.batch_norm:
init.constant_(self.conv_spat.bias, 0)
if self.batch_norm:
init.constant_(self.bnorm.weight, 1)
init.constant_(self.bnorm.bias, 0)
param_dict = dict(list(self.named_parameters()))
for block_nr in range(2, 5):
conv_weight = param_dict['conv_{:d}.weight'.format(block_nr)]
init.xavier_uniform_(conv_weight, gain=1)
if not self.batch_norm:
conv_bias = param_dict['conv_{:d}.bias'.format(block_nr)]
init.constant_(conv_bias, 0)
else:
bnorm_weight = param_dict['bnorm_{:d}.weight'.format(block_nr)]
bnorm_bias = param_dict['bnorm_{:d}.bias'.format(block_nr)]
init.constant_(bnorm_weight, 1)
init.constant_(bnorm_bias, 0)
init.xavier_uniform_(self.conv_classifier.weight, gain=1)
init.constant_(self.conv_classifier.bias, 0)
self.eval()
|
def __init__(self, in_chans, n_classes, input_window_samples, final_conv_length, n_filters_time=25, n_filters_spat=25, filter_time_length=10, pool_time_length=3, pool_time_stride=3, n_filters_2=50, filter_length_2=10, n_filters_3=100, filter_length_3=10, n_filters_4=200, filter_length_4=10, first_conv_nonlin=elu, first_pool_mode='max', first_pool_nonlin=identity, later_conv_nonlin=elu, later_pool_mode='max', later_pool_nonlin=identity, drop_prob=0.5, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, stride_before_pool=False):
super().__init__()
if final_conv_length == 'auto':
assert input_window_samples is not None
self.in_chans = in_chans
self.n_classes = n_classes
self.input_window_samples = input_window_samples
self.final_conv_length = final_conv_length
self.n_filters_time = n_filters_time
self.n_filters_spat = n_filters_spat
self.filter_time_length = filter_time_length
self.pool_time_length = pool_time_length
self.pool_time_stride = pool_time_stride
self.n_filters_2 = n_filters_2
self.filter_length_2 = filter_length_2
self.n_filters_3 = n_filters_3
self.filter_length_3 = filter_length_3
self.n_filters_4 = n_filters_4
self.filter_length_4 = filter_length_4
self.first_nonlin = first_conv_nonlin
self.first_pool_mode = first_pool_mode
self.first_pool_nonlin = first_pool_nonlin
self.later_conv_nonlin = later_conv_nonlin
self.later_pool_mode = later_pool_mode
self.later_pool_nonlin = later_pool_nonlin
self.drop_prob = drop_prob
self.split_first_layer = split_first_layer
self.batch_norm = batch_norm
self.batch_norm_alpha = batch_norm_alpha
self.stride_before_pool = stride_before_pool
if self.stride_before_pool:
conv_stride = self.pool_time_stride
pool_stride = 1
else:
conv_stride = 1
pool_stride = self.pool_time_stride
self.add_module('ensuredims', Ensure4d())
pool_class_dict = dict(max=nn.MaxPool2d, mean=AvgPool2dWithConv)
first_pool_class = pool_class_dict[self.first_pool_mode]
later_pool_class = pool_class_dict[self.later_pool_mode]
if self.split_first_layer:
self.add_module('dimshuffle', Expression(transpose_time_to_spat))
self.add_module('conv_time', nn.Conv2d(1, self.n_filters_time, (self.filter_time_length, 1), stride=1))
self.add_module('conv_spat', nn.Conv2d(self.n_filters_time, self.n_filters_spat, (1, self.in_chans), stride=(conv_stride, 1), bias=not self.batch_norm))
n_filters_conv = self.n_filters_spat
else:
self.add_module('conv_time', nn.Conv2d(self.in_chans, self.n_filters_time, (self.filter_time_length, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
n_filters_conv = self.n_filters_time
if self.batch_norm:
self.add_module('bnorm', nn.BatchNorm2d(n_filters_conv, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('conv_nonlin', Expression(self.first_nonlin))
self.add_module('pool', first_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin', Expression(self.first_pool_nonlin))
def add_conv_pool_block(model, n_filters_before, n_filters, filter_length, block_nr):
suffix = '_{:d}'.format(block_nr)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(n_filters_before, n_filters, (filter_length, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(n_filters, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
suffix = '_{:d}'.format(2)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(n_filters_conv, self.n_filters_2, (self.filter_length_2, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_2, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
suffix = '_{:d}'.format(3)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(self.n_filters_2, self.n_filters_3, (self.filter_length_3, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_3, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
suffix = '_{:d}'.format(4)
self.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
self.add_module('conv' + suffix, nn.Conv2d(self.n_filters_3, self.n_filters_4, (self.filter_length_4, 1), stride=(conv_stride, 1), bias=not self.batch_norm))
if self.batch_norm:
self.add_module('bnorm' + suffix, nn.BatchNorm2d(self.n_filters_4, momentum=self.batch_norm_alpha, affine=True, eps=1e-05))
self.add_module('nonlin' + suffix, Expression(self.later_conv_nonlin))
self.add_module('pool' + suffix, later_pool_class(kernel_size=(self.pool_time_length, 1), stride=(pool_stride, 1)))
self.add_module('pool_nonlin' + suffix, Expression(self.later_pool_nonlin))
self.eval()
if self.final_conv_length == 'auto':
out = self(np_to_th(np.ones((1, self.in_chans, self.input_window_samples, 1), dtype=np.float32)))
n_out_time = out.cpu().data.numpy().shape[2]
self.final_conv_length = n_out_time
self.add_module('conv_classifier', nn.Conv2d(self.n_filters_4, self.n_classes, (self.final_conv_length, 1), bias=True))
self.add_module('softmax', nn.LogSoftmax(dim=1))
self.add_module('squeeze', Expression(squeeze_final_output))
init.xavier_uniform_(self.conv_time.weight, gain=1)
if self.split_first_layer or not self.batch_norm:
init.constant_(self.conv_time.bias, 0)
if self.split_first_layer:
init.xavier_uniform_(self.conv_spat.weight, gain=1)
if not self.batch_norm:
init.constant_(self.conv_spat.bias, 0)
if self.batch_norm:
init.constant_(self.bnorm.weight, 1)
init.constant_(self.bnorm.bias, 0)
param_dict = dict(list(self.named_parameters()))
for block_nr in range(2, 5):
conv_weight = param_dict['conv_{:d}.weight'.format(block_nr)]
init.xavier_uniform_(conv_weight, gain=1)
if not self.batch_norm:
conv_bias = param_dict['conv_{:d}.bias'.format(block_nr)]
init.constant_(conv_bias, 0)
else:
bnorm_weight = param_dict['bnorm_{:d}.weight'.format(block_nr)]
bnorm_bias = param_dict['bnorm_{:d}.bias'.format(block_nr)]
init.constant_(bnorm_weight, 1)
init.constant_(bnorm_bias, 0)
init.xavier_uniform_(self.conv_classifier.weight, gain=1)
init.constant_(self.conv_classifier.bias, 0)
self.eval()
|
braindecode
|
positive
|
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
<DeepExtract>
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
(synced_sum, synced_sumsq, synced_count) = (local_sum, local_sumsq, local_count)
</DeepExtract>
self.sess.run(self.update_op, feed_dict={self.count_pl: synced_count, self.sum_pl: synced_sum, self.sumsq_pl: synced_sumsq})
self.sess.run(self.recompute_op)
|
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
(synced_sum, synced_sumsq, synced_count) = (local_sum, local_sumsq, local_count)
self.sess.run(self.update_op, feed_dict={self.count_pl: synced_count, self.sum_pl: synced_sum, self.sumsq_pl: synced_sumsq})
self.sess.run(self.recompute_op)
|
CHER
|
positive
|
def concat(adatas: Union[Collection[AnnData], 'typing.Mapping[str, AnnData]'], *, axis: Literal[0, 1]=0, join: Literal['inner', 'outer']='inner', merge: Union[StrategiesLiteral, Callable, None]=None, uns_merge: Union[StrategiesLiteral, Callable, None]=None, label: Optional[str]=None, keys: Optional[Collection]=None, index_unique: Optional[str]=None, fill_value: Optional[Any]=None, pairwise: bool=False) -> AnnData:
"""Concatenates AnnData objects along an axis.
See the :doc:`concatenation <../concatenation>` section in the docs for a more in-depth description.
Params
------
adatas
The objects to be concatenated. If a Mapping is passed, keys are used for the `keys`
argument and values are concatenated.
axis
Which axis to concatenate along.
join
How to align values when concatenating. If "outer", the union of the other axis
is taken. If "inner", the intersection. See :doc:`concatenation <../concatenation>`
for more.
merge
How elements not aligned to the axis being concatenated along are selected.
Currently implemented strategies include:
* `None`: No elements are kept.
* `"same"`: Elements that are the same in each of the objects.
* `"unique"`: Elements for which there is only one possible value.
* `"first"`: The first element seen at each from each position.
* `"only"`: Elements that show up in only one of the objects.
uns_merge
How the elements of `.uns` are selected. Uses the same set of strategies as
the `merge` argument, except applied recursively.
label
Column in axis annotation (i.e. `.obs` or `.var`) to place batch information in.
If it's None, no column is added.
keys
Names for each object being added. These values are used for column values for
`label` or appended to the index if `index_unique` is not `None`. Defaults to
incrementing integer labels.
index_unique
Whether to make the index unique by using the keys. If provided, this
is the delimeter between "{orig_idx}{index_unique}{key}". When `None`,
the original indices are kept.
fill_value
When `join="outer"`, this is the value that will be used to fill the introduced
indices. By default, sparse arrays are padded with zeros, while dense arrays and
DataFrames are padded with missing values.
pairwise
Whether pairwise elements along the concatenated dimension should be included.
This is False by default, since the resulting arrays are often not meaningful.
Notes
-----
.. warning::
If you use `join='outer'` this fills 0s for sparse data when
variables are absent in a batch. Use this with care. Dense data is
filled with `NaN`.
Examples
--------
Preparing example objects
>>> import anndata as ad, pandas as pd, numpy as np
>>> from scipy import sparse
>>> a = ad.AnnData(
... X=sparse.csr_matrix(np.array([[0, 1], [2, 3]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var1", "var2"]),
... varm={"ones": np.ones((2, 5)), "rand": np.random.randn(2, 3), "zeros": np.zeros((2, 5))},
... uns={"a": 1, "b": 2, "c": {"c.a": 3, "c.b": 4}},
... )
>>> b = ad.AnnData(
... X=sparse.csr_matrix(np.array([[4, 5, 6], [7, 8, 9]])),
... obs=pd.DataFrame({"group": ["b", "c"], "measure": [1.2, 4.3]}, index=["s3", "s4"]),
... var=pd.DataFrame(index=["var1", "var2", "var3"]),
... varm={"ones": np.ones((3, 5)), "rand": np.random.randn(3, 5)},
... uns={"a": 1, "b": 3, "c": {"c.b": 4}},
... )
>>> c = ad.AnnData(
... X=sparse.csr_matrix(np.array([[10, 11], [12, 13]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var3", "var4"]),
... uns={"a": 1, "b": 4, "c": {"c.a": 3, "c.b": 4, "c.c": 5}},
... )
Concatenating along different axes
>>> ad.concat([a, b]).to_df()
var1 var2
s1 0 1
s2 2 3
s3 4 5
s4 7 8
>>> ad.concat([a, c], axis=1).to_df()
var1 var2 var3 var4
s1 0 1 10 11
s2 2 3 12 13
Inner and outer joins
>>> inner = ad.concat([a, b]) # Joining on intersection of variables
>>> inner
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
>>> (inner.obs_names, inner.var_names) # doctest: +NORMALIZE_WHITESPACE
(Index(['s1', 's2', 's3', 's4'], dtype='object'),
Index(['var1', 'var2'], dtype='object'))
>>> outer = ad.concat([a, b], join="outer") # Joining on union of variables
>>> outer
AnnData object with n_obs × n_vars = 4 × 3
obs: 'group', 'measure'
>>> outer.var_names
Index(['var1', 'var2', 'var3'], dtype='object')
>>> outer.to_df() # Sparse arrays are padded with zeroes by default
var1 var2 var3
s1 0 1 0
s2 2 3 0
s3 4 5 6
s4 7 8 9
Keeping track of source objects
>>> ad.concat({"a": a, "b": b}, label="batch").obs
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat([a, b], label="batch", keys=["a", "b"]).obs # Equivalent to previous
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat({"a": a, "b": b}, index_unique="-").obs
group
s1-a a
s2-a b
s3-b b
s4-b c
Combining values not aligned to axis of concatenation
>>> ad.concat([a, b], merge="same")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones'
>>> ad.concat([a, b], merge="unique")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'zeros'
>>> ad.concat([a, b], merge="first")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'rand', 'zeros'
>>> ad.concat([a, b], merge="only")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'zeros'
The same merge strategies can be used for elements in `.uns`
>>> dict(ad.concat([a, b, c], uns_merge="same").uns)
{'a': 1, 'c': {'c.b': 4}}
>>> dict(ad.concat([a, b, c], uns_merge="unique").uns)
{'a': 1, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="only").uns)
{'c': {'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="first").uns)
{'a': 1, 'b': 2, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
"""
<DeepExtract>
if not isinstance(merge, Callable):
merge = MERGE_STRATEGIES[merge]
merge = merge
</DeepExtract>
<DeepExtract>
if not isinstance(uns_merge, Callable):
uns_merge = MERGE_STRATEGIES[uns_merge]
uns_merge = uns_merge
</DeepExtract>
if isinstance(adatas, Mapping):
if keys is not None:
raise TypeError('Cannot specify categories in both mapping keys and using `keys`. Only specify this once.')
(keys, adatas) = (list(adatas.keys()), list(adatas.values()))
else:
adatas = list(adatas)
if keys is None:
keys = np.arange(len(adatas)).astype(str)
<DeepExtract>
_dims = ('obs', 'var')
if dim is None and axis is None or (dim is not None and axis is not None):
raise ValueError(f'Must pass exactly one of `dim` or `axis`. Got: dim={dim}, axis={axis}.')
elif dim is not None and dim not in _dims:
raise ValueError(f"`dim` must be one of ('obs', 'var'), was {dim}")
elif axis is not None and axis not in (0, 1):
raise ValueError(f'`axis` must be either 0 or 1, was {axis}')
if dim is not None:
(axis, dim) = (_dims.index(dim), dim)
else:
(axis, dim) = (axis, _dims[axis])
</DeepExtract>
<DeepExtract>
_dims = ('obs', 'var')
if dim is None and 1 - axis is None or (dim is not None and 1 - axis is not None):
raise ValueError(f'Must pass exactly one of `dim` or `axis`. Got: dim={dim}, axis={1 - axis}.')
elif dim is not None and dim not in _dims:
raise ValueError(f"`dim` must be one of ('obs', 'var'), was {dim}")
elif 1 - axis is not None and 1 - axis not in (0, 1):
raise ValueError(f'`axis` must be either 0 or 1, was {1 - axis}')
if dim is not None:
(alt_axis, alt_dim) = (_dims.index(dim), dim)
else:
(alt_axis, alt_dim) = (1 - axis, _dims[1 - axis])
</DeepExtract>
label_col = pd.Categorical.from_codes(np.repeat(np.arange(len(adatas)), [a.shape[axis] for a in adatas]), categories=keys)
concat_indices = pd.concat([pd.Series(dim_indices(a, axis=axis)) for a in adatas], ignore_index=True)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
<DeepExtract>
if join == 'inner':
alt_indices = reduce(lambda x, y: x.intersection(y), [dim_indices(a, axis=alt_axis) for a in adatas])
elif join == 'outer':
alt_indices = reduce(lambda x, y: x.union(y), [dim_indices(a, axis=alt_axis) for a in adatas])
else:
raise ValueError()
</DeepExtract>
reindexers = [gen_reindexer(alt_indices, dim_indices(a, axis=alt_axis)) for a in adatas]
concat_annot = pd.concat(unify_categorical_dtypes([getattr(a, dim) for a in adatas]), join=join, ignore_index=True)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
<DeepExtract>
[getattr(a, alt_dim) for a in adatas] = [df.reindex(index=alt_indices) for df in [getattr(a, alt_dim) for a in adatas]]
new_df = pd.DataFrame(merge([getattr(a, alt_dim) for a in adatas]), index=alt_indices)
alt_annot = new_df
</DeepExtract>
<DeepExtract>
Xs = [a.X for a in adatas]
if all((X is None for X in Xs)):
X = None
elif any((X is None for X in Xs)):
raise NotImplementedError("Some (but not all) of the AnnData's to be concatenated had no .X value. Concatenation is currently only implemented for cases where all or none of the AnnData's have .X assigned.")
else:
X = concat_arrays(Xs, reindexers, axis=axis, fill_value=fill_value)
</DeepExtract>
if join == 'inner':
<DeepExtract>
result = {}
for k in intersect_keys([a.layers for a in adatas]):
els = [m[k] for m in [a.layers for a in adatas]]
if reindexers is None:
cur_reindexers = gen_inner_reindexers(els, new_index=index, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays(els, cur_reindexers, index=index, axis=axis)
layers = result
</DeepExtract>
<DeepExtract>
result = {}
for k in intersect_keys([getattr(a, f'{dim}m') for a in adatas]):
els = [m[k] for m in [getattr(a, f'{dim}m') for a in adatas]]
if reindexers is None:
cur_reindexers = gen_inner_reindexers(els, new_index=concat_indices, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays(els, cur_reindexers, index=concat_indices, axis=axis)
concat_mapping = result
</DeepExtract>
if pairwise:
<DeepExtract>
result = {}
for k in intersect_keys([getattr(a, f'{dim}p') for a in adatas]):
els = [m.get(k, sparse.csr_matrix((s, s), dtype=bool)) for (m, s) in zip([getattr(a, f'{dim}p') for a in adatas], [a.shape[axis] for a in adatas])]
result[k] = sparse.block_diag(els, format='csr')
concat_pairwise = result
</DeepExtract>
else:
concat_pairwise = {}
elif join == 'outer':
<DeepExtract>
result = {}
ns = [m.parent.shape[axis] for m in [a.layers for a in adatas]]
for k in union_keys([a.layers for a in adatas]):
els = [m.get(k, MissingVal) for m in [a.layers for a in adatas]]
if reindexers is None:
cur_reindexers = gen_outer_reindexers(els, ns, new_index=index, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)], cur_reindexers, axis=axis, index=index, fill_value=fill_value)
layers = result
</DeepExtract>
<DeepExtract>
result = {}
ns = [m.parent.shape[axis] for m in [getattr(a, f'{dim}m') for a in adatas]]
for k in union_keys([getattr(a, f'{dim}m') for a in adatas]):
els = [m.get(k, MissingVal) for m in [getattr(a, f'{dim}m') for a in adatas]]
if reindexers is None:
cur_reindexers = gen_outer_reindexers(els, ns, new_index=concat_indices, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)], cur_reindexers, axis=axis, index=concat_indices, fill_value=fill_value)
concat_mapping = result
</DeepExtract>
if pairwise:
<DeepExtract>
result = {}
for k in union_keys([getattr(a, f'{dim}p') for a in adatas]):
els = [m.get(k, sparse.csr_matrix((s, s), dtype=bool)) for (m, s) in zip([getattr(a, f'{dim}p') for a in adatas], [a.shape[axis] for a in adatas])]
result[k] = sparse.block_diag(els, format='csr')
concat_pairwise = result
</DeepExtract>
else:
concat_pairwise = {}
alt_mapping = merge([{k: r(v, axis=0) for (k, v) in getattr(a, f'{alt_dim}m').items()} for (r, a) in zip(reindexers, adatas)])
alt_pairwise = merge([{k: r(r(v, axis=0), axis=1) for (k, v) in getattr(a, f'{alt_dim}p').items()} for (r, a) in zip(reindexers, adatas)])
uns = uns_merge([a.uns for a in adatas])
raw = None
has_raw = [a.raw is not None for a in adatas]
if all(has_raw):
<DeepExtract>
merge = resolve_merge_strategy(merge)
uns_merge = resolve_merge_strategy(uns_merge)
if isinstance([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas], Mapping):
if keys is not None:
raise TypeError('Cannot specify categories in both mapping keys and using `keys`. Only specify this once.')
(keys, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]) = (list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas].keys()), list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas].values()))
else:
[AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas] = list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])
if keys is None:
keys = np.arange(len([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])).astype(str)
(axis, dim) = _resolve_dim(axis=axis)
(alt_axis, alt_dim) = _resolve_dim(axis=1 - axis)
label_col = pd.Categorical.from_codes(np.repeat(np.arange(len([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])), [a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]), categories=keys)
concat_indices = pd.concat([pd.Series(dim_indices(a, axis=axis)) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], ignore_index=True)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
alt_indices = merge_indices([dim_indices(a, axis=alt_axis) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join=join)
reindexers = [gen_reindexer(alt_indices, dim_indices(a, axis=alt_axis)) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]
concat_annot = pd.concat(unify_categorical_dtypes([getattr(a, dim) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]), join=join, ignore_index=True)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
alt_annot = merge_dataframes([getattr(a, alt_dim) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], alt_indices, merge)
X = concat_Xs([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas], reindexers, axis=axis, fill_value=fill_value)
if join == 'inner':
layers = inner_concat_aligned_mapping([a.layers for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], axis=axis, reindexers=reindexers)
concat_mapping = inner_concat_aligned_mapping([getattr(a, f'{dim}m') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], index=concat_indices)
if pairwise:
concat_pairwise = concat_pairwise_mapping(mappings=[getattr(a, f'{dim}p') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], shapes=[a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join_keys=intersect_keys)
else:
concat_pairwise = {}
elif join == 'outer':
layers = outer_concat_aligned_mapping([a.layers for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], reindexers, axis=axis, fill_value=fill_value)
concat_mapping = outer_concat_aligned_mapping([getattr(a, f'{dim}m') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], index=concat_indices, fill_value=fill_value)
if pairwise:
concat_pairwise = concat_pairwise_mapping(mappings=[getattr(a, f'{dim}p') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], shapes=[a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join_keys=union_keys)
else:
concat_pairwise = {}
alt_mapping = merge([{k: r(v, axis=0) for (k, v) in getattr(a, f'{alt_dim}m').items()} for (r, a) in zip(reindexers, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])])
alt_pairwise = merge([{k: r(r(v, axis=0), axis=1) for (k, v) in getattr(a, f'{alt_dim}p').items()} for (r, a) in zip(reindexers, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])])
uns = uns_merge([a.uns for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]])
raw = None
has_raw = [a.raw is not None for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]
if all(has_raw):
raw = concat([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join=join, label=label, keys=keys, index_unique=index_unique, fill_value=fill_value, axis=axis)
elif any(has_raw):
warn('Only some AnnData objects have `.raw` attribute, not concatenating `.raw` attributes.', UserWarning)
raw = AnnData(**{'X': X, 'layers': layers, dim: concat_annot, alt_dim: alt_annot, f'{dim}m': concat_mapping, f'{alt_dim}m': alt_mapping, f'{dim}p': concat_pairwise, f'{alt_dim}p': alt_pairwise, 'uns': uns, 'raw': raw})
</DeepExtract>
elif any(has_raw):
warn('Only some AnnData objects have `.raw` attribute, not concatenating `.raw` attributes.', UserWarning)
return AnnData(**{'X': X, 'layers': layers, dim: concat_annot, alt_dim: alt_annot, f'{dim}m': concat_mapping, f'{alt_dim}m': alt_mapping, f'{dim}p': concat_pairwise, f'{alt_dim}p': alt_pairwise, 'uns': uns, 'raw': raw})
|
def concat(adatas: Union[Collection[AnnData], 'typing.Mapping[str, AnnData]'], *, axis: Literal[0, 1]=0, join: Literal['inner', 'outer']='inner', merge: Union[StrategiesLiteral, Callable, None]=None, uns_merge: Union[StrategiesLiteral, Callable, None]=None, label: Optional[str]=None, keys: Optional[Collection]=None, index_unique: Optional[str]=None, fill_value: Optional[Any]=None, pairwise: bool=False) -> AnnData:
"""Concatenates AnnData objects along an axis.
See the :doc:`concatenation <../concatenation>` section in the docs for a more in-depth description.
Params
------
adatas
The objects to be concatenated. If a Mapping is passed, keys are used for the `keys`
argument and values are concatenated.
axis
Which axis to concatenate along.
join
How to align values when concatenating. If "outer", the union of the other axis
is taken. If "inner", the intersection. See :doc:`concatenation <../concatenation>`
for more.
merge
How elements not aligned to the axis being concatenated along are selected.
Currently implemented strategies include:
* `None`: No elements are kept.
* `"same"`: Elements that are the same in each of the objects.
* `"unique"`: Elements for which there is only one possible value.
* `"first"`: The first element seen at each from each position.
* `"only"`: Elements that show up in only one of the objects.
uns_merge
How the elements of `.uns` are selected. Uses the same set of strategies as
the `merge` argument, except applied recursively.
label
Column in axis annotation (i.e. `.obs` or `.var`) to place batch information in.
If it's None, no column is added.
keys
Names for each object being added. These values are used for column values for
`label` or appended to the index if `index_unique` is not `None`. Defaults to
incrementing integer labels.
index_unique
Whether to make the index unique by using the keys. If provided, this
is the delimeter between "{orig_idx}{index_unique}{key}". When `None`,
the original indices are kept.
fill_value
When `join="outer"`, this is the value that will be used to fill the introduced
indices. By default, sparse arrays are padded with zeros, while dense arrays and
DataFrames are padded with missing values.
pairwise
Whether pairwise elements along the concatenated dimension should be included.
This is False by default, since the resulting arrays are often not meaningful.
Notes
-----
.. warning::
If you use `join='outer'` this fills 0s for sparse data when
variables are absent in a batch. Use this with care. Dense data is
filled with `NaN`.
Examples
--------
Preparing example objects
>>> import anndata as ad, pandas as pd, numpy as np
>>> from scipy import sparse
>>> a = ad.AnnData(
... X=sparse.csr_matrix(np.array([[0, 1], [2, 3]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var1", "var2"]),
... varm={"ones": np.ones((2, 5)), "rand": np.random.randn(2, 3), "zeros": np.zeros((2, 5))},
... uns={"a": 1, "b": 2, "c": {"c.a": 3, "c.b": 4}},
... )
>>> b = ad.AnnData(
... X=sparse.csr_matrix(np.array([[4, 5, 6], [7, 8, 9]])),
... obs=pd.DataFrame({"group": ["b", "c"], "measure": [1.2, 4.3]}, index=["s3", "s4"]),
... var=pd.DataFrame(index=["var1", "var2", "var3"]),
... varm={"ones": np.ones((3, 5)), "rand": np.random.randn(3, 5)},
... uns={"a": 1, "b": 3, "c": {"c.b": 4}},
... )
>>> c = ad.AnnData(
... X=sparse.csr_matrix(np.array([[10, 11], [12, 13]])),
... obs=pd.DataFrame({"group": ["a", "b"]}, index=["s1", "s2"]),
... var=pd.DataFrame(index=["var3", "var4"]),
... uns={"a": 1, "b": 4, "c": {"c.a": 3, "c.b": 4, "c.c": 5}},
... )
Concatenating along different axes
>>> ad.concat([a, b]).to_df()
var1 var2
s1 0 1
s2 2 3
s3 4 5
s4 7 8
>>> ad.concat([a, c], axis=1).to_df()
var1 var2 var3 var4
s1 0 1 10 11
s2 2 3 12 13
Inner and outer joins
>>> inner = ad.concat([a, b]) # Joining on intersection of variables
>>> inner
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
>>> (inner.obs_names, inner.var_names) # doctest: +NORMALIZE_WHITESPACE
(Index(['s1', 's2', 's3', 's4'], dtype='object'),
Index(['var1', 'var2'], dtype='object'))
>>> outer = ad.concat([a, b], join="outer") # Joining on union of variables
>>> outer
AnnData object with n_obs × n_vars = 4 × 3
obs: 'group', 'measure'
>>> outer.var_names
Index(['var1', 'var2', 'var3'], dtype='object')
>>> outer.to_df() # Sparse arrays are padded with zeroes by default
var1 var2 var3
s1 0 1 0
s2 2 3 0
s3 4 5 6
s4 7 8 9
Keeping track of source objects
>>> ad.concat({"a": a, "b": b}, label="batch").obs
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat([a, b], label="batch", keys=["a", "b"]).obs # Equivalent to previous
group batch
s1 a a
s2 b a
s3 b b
s4 c b
>>> ad.concat({"a": a, "b": b}, index_unique="-").obs
group
s1-a a
s2-a b
s3-b b
s4-b c
Combining values not aligned to axis of concatenation
>>> ad.concat([a, b], merge="same")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones'
>>> ad.concat([a, b], merge="unique")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'zeros'
>>> ad.concat([a, b], merge="first")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'ones', 'rand', 'zeros'
>>> ad.concat([a, b], merge="only")
AnnData object with n_obs × n_vars = 4 × 2
obs: 'group'
varm: 'zeros'
The same merge strategies can be used for elements in `.uns`
>>> dict(ad.concat([a, b, c], uns_merge="same").uns)
{'a': 1, 'c': {'c.b': 4}}
>>> dict(ad.concat([a, b, c], uns_merge="unique").uns)
{'a': 1, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="only").uns)
{'c': {'c.c': 5}}
>>> dict(ad.concat([a, b, c], uns_merge="first").uns)
{'a': 1, 'b': 2, 'c': {'c.a': 3, 'c.b': 4, 'c.c': 5}}
"""
if not isinstance(merge, Callable):
merge = MERGE_STRATEGIES[merge]
merge = merge
if not isinstance(uns_merge, Callable):
uns_merge = MERGE_STRATEGIES[uns_merge]
uns_merge = uns_merge
if isinstance(adatas, Mapping):
if keys is not None:
raise TypeError('Cannot specify categories in both mapping keys and using `keys`. Only specify this once.')
(keys, adatas) = (list(adatas.keys()), list(adatas.values()))
else:
adatas = list(adatas)
if keys is None:
keys = np.arange(len(adatas)).astype(str)
_dims = ('obs', 'var')
if dim is None and axis is None or (dim is not None and axis is not None):
raise ValueError(f'Must pass exactly one of `dim` or `axis`. Got: dim={dim}, axis={axis}.')
elif dim is not None and dim not in _dims:
raise ValueError(f"`dim` must be one of ('obs', 'var'), was {dim}")
elif axis is not None and axis not in (0, 1):
raise ValueError(f'`axis` must be either 0 or 1, was {axis}')
if dim is not None:
(axis, dim) = (_dims.index(dim), dim)
else:
(axis, dim) = (axis, _dims[axis])
_dims = ('obs', 'var')
if dim is None and 1 - axis is None or (dim is not None and 1 - axis is not None):
raise ValueError(f'Must pass exactly one of `dim` or `axis`. Got: dim={dim}, axis={1 - axis}.')
elif dim is not None and dim not in _dims:
raise ValueError(f"`dim` must be one of ('obs', 'var'), was {dim}")
elif 1 - axis is not None and 1 - axis not in (0, 1):
raise ValueError(f'`axis` must be either 0 or 1, was {1 - axis}')
if dim is not None:
(alt_axis, alt_dim) = (_dims.index(dim), dim)
else:
(alt_axis, alt_dim) = (1 - axis, _dims[1 - axis])
label_col = pd.Categorical.from_codes(np.repeat(np.arange(len(adatas)), [a.shape[axis] for a in adatas]), categories=keys)
concat_indices = pd.concat([pd.Series(dim_indices(a, axis=axis)) for a in adatas], ignore_index=True)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
if join == 'inner':
alt_indices = reduce(lambda x, y: x.intersection(y), [dim_indices(a, axis=alt_axis) for a in adatas])
elif join == 'outer':
alt_indices = reduce(lambda x, y: x.union(y), [dim_indices(a, axis=alt_axis) for a in adatas])
else:
raise ValueError()
reindexers = [gen_reindexer(alt_indices, dim_indices(a, axis=alt_axis)) for a in adatas]
concat_annot = pd.concat(unify_categorical_dtypes([getattr(a, dim) for a in adatas]), join=join, ignore_index=True)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
[getattr(a, alt_dim) for a in adatas] = [df.reindex(index=alt_indices) for df in [getattr(a, alt_dim) for a in adatas]]
new_df = pd.DataFrame(merge([getattr(a, alt_dim) for a in adatas]), index=alt_indices)
alt_annot = new_df
Xs = [a.X for a in adatas]
if all((X is None for X in Xs)):
X = None
elif any((X is None for X in Xs)):
raise NotImplementedError("Some (but not all) of the AnnData's to be concatenated had no .X value. Concatenation is currently only implemented for cases where all or none of the AnnData's have .X assigned.")
else:
X = concat_arrays(Xs, reindexers, axis=axis, fill_value=fill_value)
if join == 'inner':
result = {}
for k in intersect_keys([a.layers for a in adatas]):
els = [m[k] for m in [a.layers for a in adatas]]
if reindexers is None:
cur_reindexers = gen_inner_reindexers(els, new_index=index, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays(els, cur_reindexers, index=index, axis=axis)
layers = result
result = {}
for k in intersect_keys([getattr(a, f'{dim}m') for a in adatas]):
els = [m[k] for m in [getattr(a, f'{dim}m') for a in adatas]]
if reindexers is None:
cur_reindexers = gen_inner_reindexers(els, new_index=concat_indices, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays(els, cur_reindexers, index=concat_indices, axis=axis)
concat_mapping = result
if pairwise:
result = {}
for k in intersect_keys([getattr(a, f'{dim}p') for a in adatas]):
els = [m.get(k, sparse.csr_matrix((s, s), dtype=bool)) for (m, s) in zip([getattr(a, f'{dim}p') for a in adatas], [a.shape[axis] for a in adatas])]
result[k] = sparse.block_diag(els, format='csr')
concat_pairwise = result
else:
concat_pairwise = {}
elif join == 'outer':
result = {}
ns = [m.parent.shape[axis] for m in [a.layers for a in adatas]]
for k in union_keys([a.layers for a in adatas]):
els = [m.get(k, MissingVal) for m in [a.layers for a in adatas]]
if reindexers is None:
cur_reindexers = gen_outer_reindexers(els, ns, new_index=index, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)], cur_reindexers, axis=axis, index=index, fill_value=fill_value)
layers = result
result = {}
ns = [m.parent.shape[axis] for m in [getattr(a, f'{dim}m') for a in adatas]]
for k in union_keys([getattr(a, f'{dim}m') for a in adatas]):
els = [m.get(k, MissingVal) for m in [getattr(a, f'{dim}m') for a in adatas]]
if reindexers is None:
cur_reindexers = gen_outer_reindexers(els, ns, new_index=concat_indices, axis=axis)
else:
cur_reindexers = reindexers
result[k] = concat_arrays([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)], cur_reindexers, axis=axis, index=concat_indices, fill_value=fill_value)
concat_mapping = result
if pairwise:
result = {}
for k in union_keys([getattr(a, f'{dim}p') for a in adatas]):
els = [m.get(k, sparse.csr_matrix((s, s), dtype=bool)) for (m, s) in zip([getattr(a, f'{dim}p') for a in adatas], [a.shape[axis] for a in adatas])]
result[k] = sparse.block_diag(els, format='csr')
concat_pairwise = result
else:
concat_pairwise = {}
alt_mapping = merge([{k: r(v, axis=0) for (k, v) in getattr(a, f'{alt_dim}m').items()} for (r, a) in zip(reindexers, adatas)])
alt_pairwise = merge([{k: r(r(v, axis=0), axis=1) for (k, v) in getattr(a, f'{alt_dim}p').items()} for (r, a) in zip(reindexers, adatas)])
uns = uns_merge([a.uns for a in adatas])
raw = None
has_raw = [a.raw is not None for a in adatas]
if all(has_raw):
merge = resolve_merge_strategy(merge)
uns_merge = resolve_merge_strategy(uns_merge)
if isinstance([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas], Mapping):
if keys is not None:
raise TypeError('Cannot specify categories in both mapping keys and using `keys`. Only specify this once.')
(keys, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]) = (list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas].keys()), list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas].values()))
else:
[AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas] = list([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])
if keys is None:
keys = np.arange(len([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])).astype(str)
(axis, dim) = _resolve_dim(axis=axis)
(alt_axis, alt_dim) = _resolve_dim(axis=1 - axis)
label_col = pd.Categorical.from_codes(np.repeat(np.arange(len([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])), [a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]), categories=keys)
concat_indices = pd.concat([pd.Series(dim_indices(a, axis=axis)) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], ignore_index=True)
if index_unique is not None:
concat_indices = concat_indices.str.cat(label_col.map(str), sep=index_unique)
concat_indices = pd.Index(concat_indices)
alt_indices = merge_indices([dim_indices(a, axis=alt_axis) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join=join)
reindexers = [gen_reindexer(alt_indices, dim_indices(a, axis=alt_axis)) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]
concat_annot = pd.concat(unify_categorical_dtypes([getattr(a, dim) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]), join=join, ignore_index=True)
concat_annot.index = concat_indices
if label is not None:
concat_annot[label] = label_col
alt_annot = merge_dataframes([getattr(a, alt_dim) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], alt_indices, merge)
X = concat_Xs([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas], reindexers, axis=axis, fill_value=fill_value)
if join == 'inner':
layers = inner_concat_aligned_mapping([a.layers for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], axis=axis, reindexers=reindexers)
concat_mapping = inner_concat_aligned_mapping([getattr(a, f'{dim}m') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], index=concat_indices)
if pairwise:
concat_pairwise = concat_pairwise_mapping(mappings=[getattr(a, f'{dim}p') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], shapes=[a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join_keys=intersect_keys)
else:
concat_pairwise = {}
elif join == 'outer':
layers = outer_concat_aligned_mapping([a.layers for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], reindexers, axis=axis, fill_value=fill_value)
concat_mapping = outer_concat_aligned_mapping([getattr(a, f'{dim}m') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], index=concat_indices, fill_value=fill_value)
if pairwise:
concat_pairwise = concat_pairwise_mapping(mappings=[getattr(a, f'{dim}p') for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], shapes=[a.shape[axis] for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join_keys=union_keys)
else:
concat_pairwise = {}
alt_mapping = merge([{k: r(v, axis=0) for (k, v) in getattr(a, f'{alt_dim}m').items()} for (r, a) in zip(reindexers, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])])
alt_pairwise = merge([{k: r(r(v, axis=0), axis=1) for (k, v) in getattr(a, f'{alt_dim}p').items()} for (r, a) in zip(reindexers, [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas])])
uns = uns_merge([a.uns for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]])
raw = None
has_raw = [a.raw is not None for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]]
if all(has_raw):
raw = concat([AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in [AnnData(X=a.raw.X, obs=pd.DataFrame(index=a.obs_names), var=a.raw.var, varm=a.raw.varm) for a in adatas]], join=join, label=label, keys=keys, index_unique=index_unique, fill_value=fill_value, axis=axis)
elif any(has_raw):
warn('Only some AnnData objects have `.raw` attribute, not concatenating `.raw` attributes.', UserWarning)
raw = AnnData(**{'X': X, 'layers': layers, dim: concat_annot, alt_dim: alt_annot, f'{dim}m': concat_mapping, f'{alt_dim}m': alt_mapping, f'{dim}p': concat_pairwise, f'{alt_dim}p': alt_pairwise, 'uns': uns, 'raw': raw})
elif any(has_raw):
warn('Only some AnnData objects have `.raw` attribute, not concatenating `.raw` attributes.', UserWarning)
return AnnData(**{'X': X, 'layers': layers, dim: concat_annot, alt_dim: alt_annot, f'{dim}m': concat_mapping, f'{alt_dim}m': alt_mapping, f'{dim}p': concat_pairwise, f'{alt_dim}p': alt_pairwise, 'uns': uns, 'raw': raw})
|
anndata
|
positive
|
def prep_workspace():
global TEST_DIR
if TEST_DIR is None or not os.path.isdir(TEST_DIR):
TEST_DIR = tempfile.mkdtemp()
logging.debug('temp dir is: %s' % TEST_DIR)
else:
<DeepExtract>
global TEST_DIR
if TEST_DIR is None:
return
if os.path.isdir(TEST_DIR):
RemoveTree(TEST_DIR)
TEST_DIR = None
</DeepExtract>
TEST_DIR = tempfile.mkdtemp()
|
def prep_workspace():
global TEST_DIR
if TEST_DIR is None or not os.path.isdir(TEST_DIR):
TEST_DIR = tempfile.mkdtemp()
logging.debug('temp dir is: %s' % TEST_DIR)
else:
global TEST_DIR
if TEST_DIR is None:
return
if os.path.isdir(TEST_DIR):
RemoveTree(TEST_DIR)
TEST_DIR = None
TEST_DIR = tempfile.mkdtemp()
|
edk2-pytool-extensions
|
positive
|
def test_monitor_will_add_new_workers_when_all_workers_are_working(self):
messages = [createWorkerStatusMessage('tcp://127.0.0.1:1', 'en-GB', 'WORKING', 1).SerializeToString()]
<DeepExtract>
self.poller.add_messages([{'master': message} for message in messages])
self.monitor.run()
</DeepExtract>
expected_messages = [{'en-GB': +1}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
|
def test_monitor_will_add_new_workers_when_all_workers_are_working(self):
messages = [createWorkerStatusMessage('tcp://127.0.0.1:1', 'en-GB', 'WORKING', 1).SerializeToString()]
self.poller.add_messages([{'master': message} for message in messages])
self.monitor.run()
expected_messages = [{'en-GB': +1}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
|
cloud-asr
|
positive
|
def y():
try:
<DeepExtract>
raise Exception('z')
</DeepExtract>
except Exception:
raise ArithmeticError('y')
|
def y():
try:
raise Exception('z')
except Exception:
raise ArithmeticError('y')
|
bugsnag-python
|
positive
|
def run_recursively_get_imports(lib, recur=recursively_get_imports, fast=False, **kwargs):
<DeepExtract>
rtn = {'libnames': DEFAULT_LIBNAMES, 'non_recursive_libnames': tuple(), 'ocaml_dirnames': tuple(), 'log': DEFAULT_LOG, 'coqc': 'coqc', 'coq_makefile': 'coq_makefile', 'coqdep': 'coqdep', 'walk_tree': True, 'coqc_args': tuple()}
rtn.update(kwargs)
if for_makefile:
if 'make_coqc' in rtn.keys():
rtn['coqc'] = rtn['make_coqc']
if 'passing_make_coqc' in rtn.keys():
rtn['passing_coqc'] = rtn['passing_make_coqc']
kwargs = rtn
</DeepExtract>
<DeepExtract>
kwargs = fill_kwargs(kwargs)
filename = filename_of_lib(lib, **kwargs)
if os.path.isfile(filename):
lib = lib_of_filename(filename, **kwargs)
else:
lib = lib
</DeepExtract>
<DeepExtract>
kwargs = fill_kwargs(kwargs)
glob_name = filename_of_lib_helper(lib, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), ext='.glob')
</DeepExtract>
<DeepExtract>
kwargs = fill_kwargs(kwargs)
v_name = filename_of_lib_helper(lib, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), ext='.v')
</DeepExtract>
if os.path.isfile(v_name):
<DeepExtract>
kwargs = fill_kwargs(kwargs)
lib = norm_libname(lib, **kwargs)
glob_name = filename_of_lib(lib, ext='.glob', **kwargs)
v_name = filename_of_lib(lib, ext='.v', **kwargs)
if not fast:
get_require_dict(lib, **kwargs)
if lib in lib_imports_slow.keys():
imports = tuple((k for (k, v) in sorted(lib_imports_slow[lib].items(), key=lambda kv: kv[1])))
if lib not in lib_imports_fast.keys():
contents = get_file(v_name, **kwargs)
imports_string = re.sub('\\s+', ' ', ' '.join(IMPORT_LINE_REG.findall(contents))).strip()
lib_imports_fast[lib] = tuple(sorted(set((norm_libname(i, **kwargs) for i in imports_string.split(' ') if i != ''))))
imports = lib_imports_fast[lib]
</DeepExtract>
if not fast:
<DeepExtract>
kwargs = fill_kwargs(kwargs)
existing_logical_names = [i for i in imports if os.path.isfile(filename_of_lib(i, ext='.v', **kwargs))]
if len(existing_logical_names) == 0:
return
filenames_vo_v_glob = [(filename_of_lib(i, ext='.vo', **kwargs), filename_of_lib(i, ext='.v', **kwargs), filename_of_lib(i, ext='.glob', **kwargs)) for i in existing_logical_names]
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.isfile(glob_name) and os.path.getmtime(glob_name) > os.path.getmtime(v_name))]
for (vo_name, v_name, glob_name) in filenames_vo_v_glob:
if os.path.isfile(glob_name) and (not os.path.getmtime(glob_name) > os.path.getmtime(v_name)):
if os.path.getmtime(v_name) > time.time():
kwargs['log']('WARNING: The file %s comes from the future! (%d > %d)' % (v_name, os.path.getmtime(v_name), time.time()), level=LOG_ALWAYS)
remove_if_local(glob_name, **kwargs)
if os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name):
make_one_glob_file(v_name, **kwargs)
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name))]
filenames_v = [v_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob]
filenames_glob = [glob_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob]
if len(filenames_vo_v_glob) == 0:
return
extra_filenames_v = get_all_v_files('.', filenames_v) if kwargs['walk_tree'] else []
(stdout_make, stderr_make) = run_coq_makefile_and_make(tuple(sorted(list(filenames_v) + list(extra_filenames_v))), filenames_glob, **kwargs)
</DeepExtract>
imports_list = [recur(k, fast=fast, **kwargs) for k in imports]
return merge_imports(tuple(map(tuple, imports_list + [[lib]])), **kwargs)
return [lib]
|
def run_recursively_get_imports(lib, recur=recursively_get_imports, fast=False, **kwargs):
rtn = {'libnames': DEFAULT_LIBNAMES, 'non_recursive_libnames': tuple(), 'ocaml_dirnames': tuple(), 'log': DEFAULT_LOG, 'coqc': 'coqc', 'coq_makefile': 'coq_makefile', 'coqdep': 'coqdep', 'walk_tree': True, 'coqc_args': tuple()}
rtn.update(kwargs)
if for_makefile:
if 'make_coqc' in rtn.keys():
rtn['coqc'] = rtn['make_coqc']
if 'passing_make_coqc' in rtn.keys():
rtn['passing_coqc'] = rtn['passing_make_coqc']
kwargs = rtn
kwargs = fill_kwargs(kwargs)
filename = filename_of_lib(lib, **kwargs)
if os.path.isfile(filename):
lib = lib_of_filename(filename, **kwargs)
else:
lib = lib
kwargs = fill_kwargs(kwargs)
glob_name = filename_of_lib_helper(lib, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), ext='.glob')
kwargs = fill_kwargs(kwargs)
v_name = filename_of_lib_helper(lib, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), ext='.v')
if os.path.isfile(v_name):
kwargs = fill_kwargs(kwargs)
lib = norm_libname(lib, **kwargs)
glob_name = filename_of_lib(lib, ext='.glob', **kwargs)
v_name = filename_of_lib(lib, ext='.v', **kwargs)
if not fast:
get_require_dict(lib, **kwargs)
if lib in lib_imports_slow.keys():
imports = tuple((k for (k, v) in sorted(lib_imports_slow[lib].items(), key=lambda kv: kv[1])))
if lib not in lib_imports_fast.keys():
contents = get_file(v_name, **kwargs)
imports_string = re.sub('\\s+', ' ', ' '.join(IMPORT_LINE_REG.findall(contents))).strip()
lib_imports_fast[lib] = tuple(sorted(set((norm_libname(i, **kwargs) for i in imports_string.split(' ') if i != ''))))
imports = lib_imports_fast[lib]
if not fast:
kwargs = fill_kwargs(kwargs)
existing_logical_names = [i for i in imports if os.path.isfile(filename_of_lib(i, ext='.v', **kwargs))]
if len(existing_logical_names) == 0:
return
filenames_vo_v_glob = [(filename_of_lib(i, ext='.vo', **kwargs), filename_of_lib(i, ext='.v', **kwargs), filename_of_lib(i, ext='.glob', **kwargs)) for i in existing_logical_names]
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.isfile(glob_name) and os.path.getmtime(glob_name) > os.path.getmtime(v_name))]
for (vo_name, v_name, glob_name) in filenames_vo_v_glob:
if os.path.isfile(glob_name) and (not os.path.getmtime(glob_name) > os.path.getmtime(v_name)):
if os.path.getmtime(v_name) > time.time():
kwargs['log']('WARNING: The file %s comes from the future! (%d > %d)' % (v_name, os.path.getmtime(v_name), time.time()), level=LOG_ALWAYS)
remove_if_local(glob_name, **kwargs)
if os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name):
make_one_glob_file(v_name, **kwargs)
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for (vo_name, v_name, glob_name) in filenames_vo_v_glob if not (os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name))]
filenames_v = [v_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob]
filenames_glob = [glob_name for (vo_name, v_name, glob_name) in filenames_vo_v_glob]
if len(filenames_vo_v_glob) == 0:
return
extra_filenames_v = get_all_v_files('.', filenames_v) if kwargs['walk_tree'] else []
(stdout_make, stderr_make) = run_coq_makefile_and_make(tuple(sorted(list(filenames_v) + list(extra_filenames_v))), filenames_glob, **kwargs)
imports_list = [recur(k, fast=fast, **kwargs) for k in imports]
return merge_imports(tuple(map(tuple, imports_list + [[lib]])), **kwargs)
return [lib]
|
coq-tools
|
positive
|
def restart_proc(self):
<DeepExtract>
session = {'session': self._app_window.session_manager.session.session_type, 'package': self._package, 'user_script': self._app_window.console_panel.get_js_console().function_content}
</DeepExtract>
<DeepExtract>
self.database = Database()
self.io = IO(self)
self._pid = 0
self._package = None
self._process = None
self._script = None
self._spawned = False
self._resumed = False
self.java_available = False
self._device = None
self._process = None
self._script = None
self.breakpoints = {}
self.java_breakpoints = {}
self.module_initialization_breakpoints = {}
self.java_class_initialization_breakpoints = {}
self.context_tid = 0
</DeepExtract>
self._app_window.session_manager._session = None
self._app_window.session_stopped()
self._app_window._restore_session(session)
|
def restart_proc(self):
session = {'session': self._app_window.session_manager.session.session_type, 'package': self._package, 'user_script': self._app_window.console_panel.get_js_console().function_content}
self.database = Database()
self.io = IO(self)
self._pid = 0
self._package = None
self._process = None
self._script = None
self._spawned = False
self._resumed = False
self.java_available = False
self._device = None
self._process = None
self._script = None
self.breakpoints = {}
self.java_breakpoints = {}
self.module_initialization_breakpoints = {}
self.java_class_initialization_breakpoints = {}
self.context_tid = 0
self._app_window.session_manager._session = None
self._app_window.session_stopped()
self._app_window._restore_session(session)
|
Dwarf
|
positive
|
def visit_Softmax(self, operation: operations.Softmax) -> None:
<DeepExtract>
for value in operation.__dict__.values():
if isinstance(value, Operation):
self.visit(value)
elif isinstance(value, (list, tuple)):
for sub_value in value:
if isinstance(sub_value, Operation):
self.visit(sub_value)
return operation
</DeepExtract>
<DeepExtract>
op_id = self.get_op_id(operation)
print(f'{op_id:32s}', end=': ')
</DeepExtract>
print(f'Softmax({self.get_op_id(operation.x)}, axis={operation.axis})')
|
def visit_Softmax(self, operation: operations.Softmax) -> None:
for value in operation.__dict__.values():
if isinstance(value, Operation):
self.visit(value)
elif isinstance(value, (list, tuple)):
for sub_value in value:
if isinstance(sub_value, Operation):
self.visit(sub_value)
return operation
op_id = self.get_op_id(operation)
print(f'{op_id:32s}', end=': ')
print(f'Softmax({self.get_op_id(operation.x)}, axis={operation.axis})')
|
DNNV
|
positive
|
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
<DeepExtract>
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields('labels')
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
</DeepExtract>
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1
regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, proposals_per_image.bbox)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return (labels, regression_targets)
|
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields('labels')
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1
regression_targets_per_image = self.box_coder.encode(matched_targets.bbox, proposals_per_image.bbox)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
return (labels, regression_targets)
|
DetNAS
|
positive
|
def _rename(self, old_name, new_name):
""" Renames an object in this context. """
old_filename = self._name_to_filename_map[old_name]
old_file = File(join(self.path, old_filename))
<DeepExtract>
if old_name in self._cache:
obj = self._cache[old_name]
else:
path = join(self.path, self._name_to_filename_map[old_name])
for serializer in self._get_object_serializers():
if serializer.can_load(path):
try:
state = serializer.load(path)
except:
state = File(path)
logger.exception('Error loading resource at %s' % path)
break
else:
if os.path.isdir(path):
state = self._context_factory(old_name, path)
elif os.path.isfile(path):
state = File(path)
else:
raise ValueError('unrecognized file for %s' % old_name)
obj = naming_manager.get_object_instance(state, old_name, self)
self._cache[old_name] = obj
obj = obj
</DeepExtract>
if old_file.is_folder:
new_filename = new_name
new_file = File(join(self.path, new_filename))
old_file.move(new_file)
obj.path = new_file.path
self._cache[new_name] = obj
del self._cache[old_name]
obj.refresh()
elif isinstance(obj, File):
new_filename = new_name
new_file = File(join(self.path, new_filename))
old_file.move(new_file)
obj.path = new_file.path
self._cache[new_name] = obj
del self._cache[old_name]
else:
new_filename = new_name + old_file.ext
new_file = File(join(self.path, new_filename))
old_file.delete()
if old_name in self._cache:
self._cache[new_name] = self._cache[old_name]
del self._cache[old_name]
<DeepExtract>
self._bind(new_name, obj)
</DeepExtract>
del self._name_to_filename_map[old_name]
self._name_to_filename_map[new_name] = new_filename
if old_name in self._attributes:
self._attributes[new_name] = self._attributes[old_name]
del self._attributes[old_name]
<DeepExtract>
path = join(self.path, self.ATTRIBUTES_FILE)
f = open(path, 'wb')
pickle.dump(self._attributes, f, 1)
f.close()
</DeepExtract>
|
def _rename(self, old_name, new_name):
""" Renames an object in this context. """
old_filename = self._name_to_filename_map[old_name]
old_file = File(join(self.path, old_filename))
if old_name in self._cache:
obj = self._cache[old_name]
else:
path = join(self.path, self._name_to_filename_map[old_name])
for serializer in self._get_object_serializers():
if serializer.can_load(path):
try:
state = serializer.load(path)
except:
state = File(path)
logger.exception('Error loading resource at %s' % path)
break
else:
if os.path.isdir(path):
state = self._context_factory(old_name, path)
elif os.path.isfile(path):
state = File(path)
else:
raise ValueError('unrecognized file for %s' % old_name)
obj = naming_manager.get_object_instance(state, old_name, self)
self._cache[old_name] = obj
obj = obj
if old_file.is_folder:
new_filename = new_name
new_file = File(join(self.path, new_filename))
old_file.move(new_file)
obj.path = new_file.path
self._cache[new_name] = obj
del self._cache[old_name]
obj.refresh()
elif isinstance(obj, File):
new_filename = new_name
new_file = File(join(self.path, new_filename))
old_file.move(new_file)
obj.path = new_file.path
self._cache[new_name] = obj
del self._cache[old_name]
else:
new_filename = new_name + old_file.ext
new_file = File(join(self.path, new_filename))
old_file.delete()
if old_name in self._cache:
self._cache[new_name] = self._cache[old_name]
del self._cache[old_name]
self._bind(new_name, obj)
del self._name_to_filename_map[old_name]
self._name_to_filename_map[new_name] = new_filename
if old_name in self._attributes:
self._attributes[new_name] = self._attributes[old_name]
del self._attributes[old_name]
path = join(self.path, self.ATTRIBUTES_FILE)
f = open(path, 'wb')
pickle.dump(self._attributes, f, 1)
f.close()
</DeepExtract>
|
apptools
|
positive
|
def getTransactionSettings(self):
"""
Return distributed transaction settings currently in force.
:return: A settings object.
:rtype: TransactionSettings
"""
<DeepExtract>
if dedicated and (not self.has_dedicated_mini_repository):
self.mini_repository = copy.copy(self.mini_repository)
self.has_dedicated_mini_repository = True
client = self.mini_repository
</DeepExtract>
return client.transaction_settings
|
def getTransactionSettings(self):
"""
Return distributed transaction settings currently in force.
:return: A settings object.
:rtype: TransactionSettings
"""
if dedicated and (not self.has_dedicated_mini_repository):
self.mini_repository = copy.copy(self.mini_repository)
self.has_dedicated_mini_repository = True
client = self.mini_repository
return client.transaction_settings
|
agraph-python
|
positive
|
def __call__(self, imgs):
<DeepExtract>
(t, h, w, c) = imgs.shape
(th, tw) = self.size
if w == tw and h == th:
(i, j, h, w) = (0, 0, h, w)
i = random.randint(0, h - th) if h != th else 0
j = random.randint(0, w - tw) if w != tw else 0
(i, j, h, w) = (i, j, th, tw)
</DeepExtract>
imgs = imgs[:, i:i + h, j:j + w, :]
return imgs
|
def __call__(self, imgs):
(t, h, w, c) = imgs.shape
(th, tw) = self.size
if w == tw and h == th:
(i, j, h, w) = (0, 0, h, w)
i = random.randint(0, h - th) if h != th else 0
j = random.randint(0, w - tw) if w != tw else 0
(i, j, h, w) = (i, j, th, tw)
imgs = imgs[:, i:i + h, j:j + w, :]
return imgs
|
deep-smoke-machine
|
positive
|
def visit_Import(self, node):
<DeepExtract>
self.new_lines = max(self.new_lines, n)
</DeepExtract>
for item in node.names:
<DeepExtract>
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append('import ')
</DeepExtract>
<DeepExtract>
f = self.get_visitor(item)
if f is not None:
return f(item)
return self.generic_visit(item)
</DeepExtract>
|
def visit_Import(self, node):
self.new_lines = max(self.new_lines, n)
for item in node.names:
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append('import ')
f = self.get_visitor(item)
if f is not None:
return f(item)
return self.generic_visit(item)
</DeepExtract>
|
atsf4g-co
|
positive
|
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
<DeepExtract>
new_x_shape = mixed_query_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_query_layer = mixed_query_layer.view(*new_x_shape)
query_layer = mixed_query_layer.permute(0, 2, 1, 3)
</DeepExtract>
<DeepExtract>
new_x_shape = mixed_key_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_key_layer = mixed_key_layer.view(*new_x_shape)
key_layer = mixed_key_layer.permute(0, 2, 1, 3)
</DeepExtract>
<DeepExtract>
new_x_shape = mixed_value_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_value_layer = mixed_value_layer.view(*new_x_shape)
value_layer = mixed_value_layer.permute(0, 2, 1, 3)
</DeepExtract>
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
|
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
new_x_shape = mixed_query_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_query_layer = mixed_query_layer.view(*new_x_shape)
query_layer = mixed_query_layer.permute(0, 2, 1, 3)
new_x_shape = mixed_key_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_key_layer = mixed_key_layer.view(*new_x_shape)
key_layer = mixed_key_layer.permute(0, 2, 1, 3)
new_x_shape = mixed_value_layer.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
mixed_value_layer = mixed_value_layer.view(*new_x_shape)
value_layer = mixed_value_layer.permute(0, 2, 1, 3)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
|
AAAI_2020_CommonsenseQA
|
positive
|
def test_defaults(self):
<DeepExtract>
msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2)
print('{edge}{msg}{edge}'.format(edge='|', msg=msg))
</DeepExtract>
format = MenuFormatBuilder()
items = [MenuItem('This is Item 1'), MenuItem('This is Item 2'), MenuItem('This is Item 3')]
Screen().printf(format.format(title='This is My Title', subtitle='This is My Little Subtitle', items=items))
|
def test_defaults(self):
msg = '{title:{fill}^{width}}'.format(title='simulate screen edges', fill='-', width=width - 2)
print('{edge}{msg}{edge}'.format(edge='|', msg=msg))
format = MenuFormatBuilder()
items = [MenuItem('This is Item 1'), MenuItem('This is Item 2'), MenuItem('This is Item 3')]
Screen().printf(format.format(title='This is My Title', subtitle='This is My Little Subtitle', items=items))
|
console-menu
|
positive
|
def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_log_prob', 'end_log_prob'])
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_log_prob', 'end_log_prob'])
logger.info('Writing predictions to: %s', output_prediction_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob))
prelim_predictions = sorted(prelim_predictions, key=lambda x: x.start_log_prob + x.end_log_prob, reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:pred.end_index + 1]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:orig_doc_end + 1]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
<DeepExtract>
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
final_text = (ns_text, ns_to_s_map)
tokenizer = BasicTokenizer(do_lower_case=tokenizer.do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(tok_text)
if start_position == -1:
if verbose_logging:
logger.info("Unable to find text: '%s' in '%s'" % (tok_text, orig_text))
final_text = orig_text
end_position = start_position + len(tok_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
final_text = orig_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
final_text = orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
final_text = orig_text
output_text = orig_text[orig_start_position:orig_end_position + 1]
final_text = output_text
</DeepExtract>
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob))
if not nbest:
nbest.append(_NbestPrediction(text='', start_log_prob=-1000000.0, end_log_prob=-1000000.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
<DeepExtract>
if not total_scores:
probs = []
max_score = None
for score in total_scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in total_scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
probs = probs
</DeepExtract>
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_log_prob'] = entry.start_log_prob
output['end_log_prob'] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
with open(orig_data_file, 'r', encoding='utf-8') as reader:
orig_data = json.load(reader)['data']
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for (k, v) in qid_to_has_ans.items() if v]
no_ans_qids = [k for (k, v) in qid_to_has_ans.items() if not v]
(exact_raw, f1_raw) = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval
|
def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_log_prob', 'end_log_prob'])
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_log_prob', 'end_log_prob'])
logger.info('Writing predictions to: %s', output_prediction_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_log_prob=start_log_prob, end_log_prob=end_log_prob))
prelim_predictions = sorted(prelim_predictions, key=lambda x: x.start_log_prob + x.end_log_prob, reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index:pred.end_index + 1]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:orig_doc_end + 1]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
final_text = (ns_text, ns_to_s_map)
tokenizer = BasicTokenizer(do_lower_case=tokenizer.do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(tok_text)
if start_position == -1:
if verbose_logging:
logger.info("Unable to find text: '%s' in '%s'" % (tok_text, orig_text))
final_text = orig_text
end_position = start_position + len(tok_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
final_text = orig_text
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
final_text = orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
final_text = orig_text
output_text = orig_text[orig_start_position:orig_end_position + 1]
final_text = output_text
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob))
if not nbest:
nbest.append(_NbestPrediction(text='', start_log_prob=-1000000.0, end_log_prob=-1000000.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
if not total_scores:
probs = []
max_score = None
for score in total_scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in total_scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
probs = probs
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_log_prob'] = entry.start_log_prob
output['end_log_prob'] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write(json.dumps(all_predictions, indent=4) + '\n')
with open(output_nbest_file, 'w') as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + '\n')
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + '\n')
with open(orig_data_file, 'r', encoding='utf-8') as reader:
orig_data = json.load(reader)['data']
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for (k, v) in qid_to_has_ans.items() if v]
no_ans_qids = [k for (k, v) in qid_to_has_ans.items() if not v]
(exact_raw, f1_raw) = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval
|
AAAI_2020_CommonsenseQA
|
positive
|
def test_save_lecture_with_update(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/teacher/course/1/save_lecture', {'lecture_id': 1, 'title': 'Blade vs Evil', 'week_num': 1, 'lecture_num': 1, 'description': 'Video of a fight', 'youtube_url': '', 'preferred_service': settings.YOUTUBE_VIDEO_PLAYER}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
|
def test_save_lecture_with_update(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/teacher/course/1/save_lecture', {'lecture_id': 1, 'title': 'Blade vs Evil', 'week_num': 1, 'lecture_num': 1, 'description': 'Video of a fight', 'youtube_url': '', 'preferred_service': settings.YOUTUBE_VIDEO_PLAYER}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'saved')
self.assertEqual(array['status'], 'success')
|
academicstoday-django
|
positive
|
def test_dogleg_rosen(self):
<DeepExtract>
args = {'x1': Ch(-120.0), 'x2': Ch(-100.0)}
r1 = Ch(lambda x1, x2: (x2 - x1 ** 2.0) * 10.0, args)
r2 = Ch(lambda x1: x1 * -1.0 + 1, args)
func = [r1, r2]
(obj, freevars) = (func, [args['x1'], args['x2']])
</DeepExtract>
minimize(fun=obj, x0=freevars, method='dogleg', options={'maxiter': 337, 'disp': False})
self.assertTrue(freevars[0].r[0] == 1.0)
self.assertTrue(freevars[1].r[0] == 1.0)
|
def test_dogleg_rosen(self):
args = {'x1': Ch(-120.0), 'x2': Ch(-100.0)}
r1 = Ch(lambda x1, x2: (x2 - x1 ** 2.0) * 10.0, args)
r2 = Ch(lambda x1: x1 * -1.0 + 1, args)
func = [r1, r2]
(obj, freevars) = (func, [args['x1'], args['x2']])
minimize(fun=obj, x0=freevars, method='dogleg', options={'maxiter': 337, 'disp': False})
self.assertTrue(freevars[0].r[0] == 1.0)
self.assertTrue(freevars[1].r[0] == 1.0)
|
chumpy
|
positive
|
def fail(self, value):
"""
Checks that the value **does not** respect this contract.
Raises an exception if it does.
:raise: ValueError
"""
try:
<DeepExtract>
context = self.check_contract({}, value, silent=False)
</DeepExtract>
except ContractNotRespected:
pass
else:
msg = 'I did not expect that this value would satisfy this contract.\n'
msg += '- value: %s\n' % describe_value(value)
msg += '- contract: %s\n' % self
msg += '- context: %r' % context
raise ValueError(msg)
|
def fail(self, value):
"""
Checks that the value **does not** respect this contract.
Raises an exception if it does.
:raise: ValueError
"""
try:
context = self.check_contract({}, value, silent=False)
except ContractNotRespected:
pass
else:
msg = 'I did not expect that this value would satisfy this contract.\n'
msg += '- value: %s\n' % describe_value(value)
msg += '- contract: %s\n' % self
msg += '- context: %r' % context
raise ValueError(msg)
|
contracts
|
positive
|
def _install(tarball):
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
<DeepExtract>
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448
self.extract(tarinfo, tar)
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
for tarinfo in directories:
dirpath = os.path.join(tar, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, 'tarfile: %s' % e)
</DeepExtract>
tar.close()
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
|
def _install(tarball):
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448
self.extract(tarinfo, tar)
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
for tarinfo in directories:
dirpath = os.path.join(tar, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, 'tarfile: %s' % e)
tar.close()
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
|
Dust
|
positive
|
def update_route53_pointer(x_hosted_zone, validation_setting, route53_stack, target_cert, acm_stack, settings):
"""
Iterates over each HostedZone and upon finding the right one
:param ecs_composex.route53.route53_zone.HostedZone x_hosted_zone: List of HostedZones defined
:param troposphere.certificatemanager.DomainValidationOption validation_setting:
:param XStack route53_stack:
:param ComposeXStack acm_stack:
:param ecs_composex.common.settings.ComposeXSettings settings:
"""
if x_hosted_zone.cfn_resource and route53_stack.title not in acm_stack.DependsOn:
acm_stack.DependsOn.append(route53_stack.title)
validate_domain_name(validation_setting.DomainName, x_hosted_zone.zone_name)
if x_hosted_zone.cfn_resource:
<DeepExtract>
zone_id_attribute = x_hosted_zone.attributes_outputs[PUBLIC_DNS_ZONE_ID]
add_parameters(acm_stack.stack_template, [zone_id_attribute['ImportParameter']])
acm_stack.Parameters.update({zone_id_attribute['ImportParameter'].title: zone_id_attribute['ImportValue']})
setattr(target_cert, 'HostedZoneId', Ref(zone_id_attribute['ImportParameter']))
</DeepExtract>
elif x_hosted_zone.mappings:
<DeepExtract>
add_update_mapping(acm_stack.stack_template, x_hosted_zone.module.mapping_key, settings.mappings[x_hosted_zone.module.mapping_key])
zone_id_attribute = x_hosted_zone.attributes_outputs[PUBLIC_DNS_ZONE_ID]
setattr(validation_setting, 'HostedZoneId', zone_id_attribute['ImportValue'])
</DeepExtract>
else:
raise RuntimeError('Failed to associate route53 zone to acm validation option')
|
def update_route53_pointer(x_hosted_zone, validation_setting, route53_stack, target_cert, acm_stack, settings):
"""
Iterates over each HostedZone and upon finding the right one
:param ecs_composex.route53.route53_zone.HostedZone x_hosted_zone: List of HostedZones defined
:param troposphere.certificatemanager.DomainValidationOption validation_setting:
:param XStack route53_stack:
:param ComposeXStack acm_stack:
:param ecs_composex.common.settings.ComposeXSettings settings:
"""
if x_hosted_zone.cfn_resource and route53_stack.title not in acm_stack.DependsOn:
acm_stack.DependsOn.append(route53_stack.title)
validate_domain_name(validation_setting.DomainName, x_hosted_zone.zone_name)
if x_hosted_zone.cfn_resource:
zone_id_attribute = x_hosted_zone.attributes_outputs[PUBLIC_DNS_ZONE_ID]
add_parameters(acm_stack.stack_template, [zone_id_attribute['ImportParameter']])
acm_stack.Parameters.update({zone_id_attribute['ImportParameter'].title: zone_id_attribute['ImportValue']})
setattr(target_cert, 'HostedZoneId', Ref(zone_id_attribute['ImportParameter']))
elif x_hosted_zone.mappings:
add_update_mapping(acm_stack.stack_template, x_hosted_zone.module.mapping_key, settings.mappings[x_hosted_zone.module.mapping_key])
zone_id_attribute = x_hosted_zone.attributes_outputs[PUBLIC_DNS_ZONE_ID]
setattr(validation_setting, 'HostedZoneId', zone_id_attribute['ImportValue'])
else:
raise RuntimeError('Failed to associate route53 zone to acm validation option')
|
ecs_composex
|
positive
|
def proxymode():
global choice2
choice2 = input('Proxy y/n: ')
if choice2 == 'y':
<DeepExtract>
global choice3
choice3 = input('Start 1: ')
if choice3 == '0':
choicedownproxy()
elif choice3 == '1':
choicedownsocks()
else:
print('You mistyped, try again.')
choiceproxysocks()
</DeepExtract>
else:
<DeepExtract>
global threads
try:
threads = int(input('Insert number of threads (1000): '))
except ValueError:
threads = 1000
print('1000 threads selected.\n')
multiplication()
</DeepExtract>
|
def proxymode():
global choice2
choice2 = input('Proxy y/n: ')
if choice2 == 'y':
global choice3
choice3 = input('Start 1: ')
if choice3 == '0':
choicedownproxy()
elif choice3 == '1':
choicedownsocks()
else:
print('You mistyped, try again.')
choiceproxysocks()
else:
global threads
try:
threads = int(input('Insert number of threads (1000): '))
except ValueError:
threads = 1000
print('1000 threads selected.\n')
multiplication()
</DeepExtract>
|
DDOS-RootSec
|
positive
|
def prep_data(plot_data, cache_file, only_executed, clipped_start_time, clipped_end_time, plot_params_dict, compute_impact_stats=False):
""" Prepares and caches POV market impact experiment output files for further aggregation and processing.
:param plot_data: Data structure holding paths to relevant ABIDES output files, see e.g. plot_configs/single_day/pov_single_day_config.example.json["PLOT_DATA"]
:param cache_file: Path to file where processed data will be cached.
:param only_executed: Switch as to only include transacted volume as opposed to including limit orders.
:param clipped_start_time: Starting time at which to clip data in format 'HH:MM:SS'
:param clipped_end_time: Finishing time at which to clip data in format 'HH:MM:SS'
:param compute_impact_statistics: Switch whether to compute impact statistics
:type plot_data: list
:type cache_file: str
:type only_executed: bool
:type clipped_start_time: str
:type clipped_end_time: str
:type compute_impact_stats: bool
:return:
"""
out_data = []
for data_dict in plot_data:
print(f"Processing data for POV {data_dict['participation_of_volume']}")
abides_orderbook_df = make_orderbook_for_analysis(data_dict['no_execution_exchange_path'], data_dict['no_execution_orderbook_path'], num_levels=1)
abides_execution_orderbook_df = make_orderbook_for_analysis(data_dict['yes_execution_exchange_path'], data_dict['yes_execution_orderbook_path'], num_levels=1)
if only_executed:
abides_orderbook_df = abides_orderbook_df.loc[abides_orderbook_df['TYPE'] == 'ORDER_EXECUTED']
abides_execution_orderbook_df = abides_execution_orderbook_df.loc[abides_execution_orderbook_df['TYPE'] == 'ORDER_EXECUTED']
historical_date = pd.Timestamp(abides_orderbook_df.index[0].date())
start = historical_date + pd.to_timedelta(clipped_start_time)
end = historical_date + pd.to_timedelta(clipped_end_time)
<DeepExtract>
if not check_date_in_string(plot_params_dict['shade_start_datetime']):
shade_start_time = historical_date + pd.to_timedelta(plot_params_dict['shade_start_datetime'])
shade_start_time = shade_start_time.strftime('%Y-%m-%d %H:%M:%S')
else:
shade_start_time = plot_params_dict['shade_start_datetime']
if not check_date_in_string(plot_params_dict['shade_end_datetime']):
shade_end_time = historical_date + pd.to_timedelta(plot_params_dict['shade_end_datetime'])
shade_end_time = shade_end_time.strftime('%Y-%m-%d %H:%M:%S')
else:
shade_end_time = plot_params_dict['shade_end_datetime']
(shade_start_time, shade_end_time) = (shade_start_time, shade_end_time)
</DeepExtract>
abides_orderbook_df = clip_times(abides_orderbook_df, start, end)
abides_execution_orderbook_df = clip_times(abides_execution_orderbook_df, start, end)
date_str = historical_date.strftime('%Y%m%d')
pov = data_dict['participation_of_volume']
seed = data_dict['seed']
if compute_impact_stats:
stats_dict = compute_impact_statistics(abides_orderbook_df, abides_execution_orderbook_df, shade_start_time, shade_end_time, date_str=date_str, pov=pov, seed=seed, experiment_name=plot_params_dict['experiment_name'], execution_agent_name=plot_params_dict['execution_agent_name'], log_dir=plot_params_dict['log_dir'], spread_lookback=plot_params_dict['spread_lookback'])
print(f"Statistics for participation of volume at level {100 * data_dict['participation_of_volume']}%")
print('Statistics:')
pprint(stats_dict)
out_data.append({'no_execution_df': abides_orderbook_df, 'yes_execution_df': abides_execution_orderbook_df, 'impact_statistics': stats_dict, 'pov': data_dict['participation_of_volume']})
else:
out_data.append({'no_execution_df': abides_orderbook_df, 'yes_execution_df': abides_execution_orderbook_df, 'pov': data_dict['participation_of_volume']})
with open(cache_file, 'wb') as f:
pickle.dump(out_data, f)
return out_data
|
def prep_data(plot_data, cache_file, only_executed, clipped_start_time, clipped_end_time, plot_params_dict, compute_impact_stats=False):
""" Prepares and caches POV market impact experiment output files for further aggregation and processing.
:param plot_data: Data structure holding paths to relevant ABIDES output files, see e.g. plot_configs/single_day/pov_single_day_config.example.json["PLOT_DATA"]
:param cache_file: Path to file where processed data will be cached.
:param only_executed: Switch as to only include transacted volume as opposed to including limit orders.
:param clipped_start_time: Starting time at which to clip data in format 'HH:MM:SS'
:param clipped_end_time: Finishing time at which to clip data in format 'HH:MM:SS'
:param compute_impact_statistics: Switch whether to compute impact statistics
:type plot_data: list
:type cache_file: str
:type only_executed: bool
:type clipped_start_time: str
:type clipped_end_time: str
:type compute_impact_stats: bool
:return:
"""
out_data = []
for data_dict in plot_data:
print(f"Processing data for POV {data_dict['participation_of_volume']}")
abides_orderbook_df = make_orderbook_for_analysis(data_dict['no_execution_exchange_path'], data_dict['no_execution_orderbook_path'], num_levels=1)
abides_execution_orderbook_df = make_orderbook_for_analysis(data_dict['yes_execution_exchange_path'], data_dict['yes_execution_orderbook_path'], num_levels=1)
if only_executed:
abides_orderbook_df = abides_orderbook_df.loc[abides_orderbook_df['TYPE'] == 'ORDER_EXECUTED']
abides_execution_orderbook_df = abides_execution_orderbook_df.loc[abides_execution_orderbook_df['TYPE'] == 'ORDER_EXECUTED']
historical_date = pd.Timestamp(abides_orderbook_df.index[0].date())
start = historical_date + pd.to_timedelta(clipped_start_time)
end = historical_date + pd.to_timedelta(clipped_end_time)
if not check_date_in_string(plot_params_dict['shade_start_datetime']):
shade_start_time = historical_date + pd.to_timedelta(plot_params_dict['shade_start_datetime'])
shade_start_time = shade_start_time.strftime('%Y-%m-%d %H:%M:%S')
else:
shade_start_time = plot_params_dict['shade_start_datetime']
if not check_date_in_string(plot_params_dict['shade_end_datetime']):
shade_end_time = historical_date + pd.to_timedelta(plot_params_dict['shade_end_datetime'])
shade_end_time = shade_end_time.strftime('%Y-%m-%d %H:%M:%S')
else:
shade_end_time = plot_params_dict['shade_end_datetime']
(shade_start_time, shade_end_time) = (shade_start_time, shade_end_time)
abides_orderbook_df = clip_times(abides_orderbook_df, start, end)
abides_execution_orderbook_df = clip_times(abides_execution_orderbook_df, start, end)
date_str = historical_date.strftime('%Y%m%d')
pov = data_dict['participation_of_volume']
seed = data_dict['seed']
if compute_impact_stats:
stats_dict = compute_impact_statistics(abides_orderbook_df, abides_execution_orderbook_df, shade_start_time, shade_end_time, date_str=date_str, pov=pov, seed=seed, experiment_name=plot_params_dict['experiment_name'], execution_agent_name=plot_params_dict['execution_agent_name'], log_dir=plot_params_dict['log_dir'], spread_lookback=plot_params_dict['spread_lookback'])
print(f"Statistics for participation of volume at level {100 * data_dict['participation_of_volume']}%")
print('Statistics:')
pprint(stats_dict)
out_data.append({'no_execution_df': abides_orderbook_df, 'yes_execution_df': abides_execution_orderbook_df, 'impact_statistics': stats_dict, 'pov': data_dict['participation_of_volume']})
else:
out_data.append({'no_execution_df': abides_orderbook_df, 'yes_execution_df': abides_execution_orderbook_df, 'pov': data_dict['participation_of_volume']})
with open(cache_file, 'wb') as f:
pickle.dump(out_data, f)
return out_data
|
abides
|
positive
|
def create(self):
"""Initialize the recipe data in `self.recipe_dir`"""
<DeepExtract>
super(LanguageModel, self).check_parameters()
self._check_level()
self._check_order()
self._check_silence_probability()
self._check_position_dependent()
</DeepExtract>
self.a2k.setup_phones()
self.a2k.setup_silences()
self.a2k.setup_variants()
text = self.a2k.setup_text()
lm_text = os.path.join(self.a2k._local_path(), 'lm_text.txt')
if self.level == 'word':
shutil.copy(text, lm_text)
self.a2k.setup_lexicon()
else:
with utils.open_utf8(lm_text, 'w') as out:
for (k, v) in sorted(self.corpus.phonemize_text().items()):
out.write(u'{} {}\n'.format(k, v))
self.a2k.setup_phone_lexicon()
self.a2k.setup_kaldi_folders()
self.a2k.setup_machine_specific_scripts()
<DeepExtract>
local = os.path.join(self.recipe_dir, 'local')
if not os.path.isdir(local):
os.makedirs(local)
share = pkg_resources.resource_filename(pkg_resources.Requirement.parse('abkhazia'), 'abkhazia/share')
for target in ('prepare_lang_wpdpl.sh', 'validate_lang_wpdpl.pl'):
shutil.copy(os.path.join(share, target), os.path.join(local, target))
</DeepExtract>
|
def create(self):
"""Initialize the recipe data in `self.recipe_dir`"""
super(LanguageModel, self).check_parameters()
self._check_level()
self._check_order()
self._check_silence_probability()
self._check_position_dependent()
self.a2k.setup_phones()
self.a2k.setup_silences()
self.a2k.setup_variants()
text = self.a2k.setup_text()
lm_text = os.path.join(self.a2k._local_path(), 'lm_text.txt')
if self.level == 'word':
shutil.copy(text, lm_text)
self.a2k.setup_lexicon()
else:
with utils.open_utf8(lm_text, 'w') as out:
for (k, v) in sorted(self.corpus.phonemize_text().items()):
out.write(u'{} {}\n'.format(k, v))
self.a2k.setup_phone_lexicon()
self.a2k.setup_kaldi_folders()
self.a2k.setup_machine_specific_scripts()
local = os.path.join(self.recipe_dir, 'local')
if not os.path.isdir(local):
os.makedirs(local)
share = pkg_resources.resource_filename(pkg_resources.Requirement.parse('abkhazia'), 'abkhazia/share')
for target in ('prepare_lang_wpdpl.sh', 'validate_lang_wpdpl.pl'):
shutil.copy(os.path.join(share, target), os.path.join(local, target))
</DeepExtract>
|
abkhazia
|
positive
|
def cdf(self, X, Y):
""" Conditional cumulated probability density function P(Y < y | x) of the underlying probability model
Args:
X: x to be conditioned on - numpy array of shape (n_points, ndim_x)
Y: y target values for witch the cdf shall be evaluated - numpy array of shape (n_points, ndim_y)
Returns:
P(Y < y | x) cumulated density values for the provided X and Y - numpy array of shape (n_points, )
"""
(X, Y) = self._handle_input_dimensionality(X, Y)
<DeepExtract>
mean = np.expand_dims(self.mu + np.mean(self.mu_slope * X), axis=-1)
</DeepExtract>
return np.squeeze(stats.norm.cdf((Y - mean) / self._std(X)))
|
def cdf(self, X, Y):
""" Conditional cumulated probability density function P(Y < y | x) of the underlying probability model
Args:
X: x to be conditioned on - numpy array of shape (n_points, ndim_x)
Y: y target values for witch the cdf shall be evaluated - numpy array of shape (n_points, ndim_y)
Returns:
P(Y < y | x) cumulated density values for the provided X and Y - numpy array of shape (n_points, )
"""
(X, Y) = self._handle_input_dimensionality(X, Y)
mean = np.expand_dims(self.mu + np.mean(self.mu_slope * X), axis=-1)
return np.squeeze(stats.norm.cdf((Y - mean) / self._std(X)))
|
Conditional_Density_Estimation
|
positive
|
def withdraw(self, *args, **kwargs):
<DeepExtract>
if not self.has['withdraw']:
raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'withdraw'))
</DeepExtract>
raise NotImplementedError('BacktestExchange does not support method withdraw')
|
def withdraw(self, *args, **kwargs):
if not self.has['withdraw']:
raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'withdraw'))
raise NotImplementedError('BacktestExchange does not support method withdraw')
|
btrccts
|
positive
|
def open(self, config_file: Optional[str]=None, camera_type: str='FullAreaGray8', **kwargs) -> None:
""""""
super().open(config_file=config_file, camera_type=camera_type, **kwargs)
self.cap.serialWrite('TAGM=5\r\n')
<DeepExtract>
self.cap.serialWrite('BA={}\r\n'.format(format_to_num['8 bits']))
</DeepExtract>
self.cap.set(Cl.FG_CAMERA_LINK_CAMTYP, 208)
self.cap.set(Cl.FG_SENSORREADOUT, 0)
|
def open(self, config_file: Optional[str]=None, camera_type: str='FullAreaGray8', **kwargs) -> None:
""""""
super().open(config_file=config_file, camera_type=camera_type, **kwargs)
self.cap.serialWrite('TAGM=5\r\n')
self.cap.serialWrite('BA={}\r\n'.format(format_to_num['8 bits']))
self.cap.set(Cl.FG_CAMERA_LINK_CAMTYP, 208)
self.cap.set(Cl.FG_SENSORREADOUT, 0)
|
crappy
|
positive
|
def get_user_pv(fs_name: str, plugin_id: str, context: 'Context', team_context: 'TeamContext', vars: Dict[str, Optional[str]]) -> None:
for i in range(0, 15):
<DeepExtract>
try:
output = subprocess.check_output(f'kubectl get pvc -n {team_context.name} {fs_name} -o json > /tmp/pvc.json', stderr=subprocess.STDOUT, shell=True, timeout=29, universal_newlines=True)
except subprocess.CalledProcessError as exc:
_logger.debug('Command failed with exit code {}, stderr: {}'.format(exc.returncode, exc.output))
raise Exception(exc.output)
return output
</DeepExtract>
with open('/tmp/pvc.json', 'r') as f:
pvc = json.load(f)
if 'spec' in pvc and 'volumeName' in pvc['spec'] and pvc['spec']['volumeName']:
volumeName = pvc['spec']['volumeName']
<DeepExtract>
try:
output = subprocess.check_output(f'kubectl get pv {volumeName} -o json > /tmp/pv.json', stderr=subprocess.STDOUT, shell=True, timeout=29, universal_newlines=True)
except subprocess.CalledProcessError as exc:
_logger.debug('Command failed with exit code {}, stderr: {}'.format(exc.returncode, exc.output))
raise Exception(exc.output)
return output
</DeepExtract>
with open('/tmp/pv.json', 'r') as f:
team_pv = json.load(f)
_logger.debug('team pv: %s', json.dumps(team_pv, sort_keys=True, indent=4))
if 'spec' in team_pv:
vars['dnsname'] = team_pv['spec']['csi']['volumeAttributes']['dnsname']
vars['mountname'] = team_pv['spec']['csi']['volumeAttributes']['mountname']
vars['csiProvisionerIdentity'] = team_pv['spec']['csi']['volumeAttributes']['storage.kubernetes.io/csiProvisionerIdentity']
vars['volumeHandle'] = team_pv['spec']['csi']['volumeHandle']
_logger.info(f'FSX Volume is {volumeName}')
break
_logger.info('FSX Volume not ready. Waiting a min')
time.sleep(60)
kubectl.write_kubeconfig(context=context)
else:
raise Exception(f'FSX Volume is not ready for plugin {plugin_id}')
|
def get_user_pv(fs_name: str, plugin_id: str, context: 'Context', team_context: 'TeamContext', vars: Dict[str, Optional[str]]) -> None:
for i in range(0, 15):
try:
output = subprocess.check_output(f'kubectl get pvc -n {team_context.name} {fs_name} -o json > /tmp/pvc.json', stderr=subprocess.STDOUT, shell=True, timeout=29, universal_newlines=True)
except subprocess.CalledProcessError as exc:
_logger.debug('Command failed with exit code {}, stderr: {}'.format(exc.returncode, exc.output))
raise Exception(exc.output)
return output
with open('/tmp/pvc.json', 'r') as f:
pvc = json.load(f)
if 'spec' in pvc and 'volumeName' in pvc['spec'] and pvc['spec']['volumeName']:
volumeName = pvc['spec']['volumeName']
try:
output = subprocess.check_output(f'kubectl get pv {volumeName} -o json > /tmp/pv.json', stderr=subprocess.STDOUT, shell=True, timeout=29, universal_newlines=True)
except subprocess.CalledProcessError as exc:
_logger.debug('Command failed with exit code {}, stderr: {}'.format(exc.returncode, exc.output))
raise Exception(exc.output)
return output
with open('/tmp/pv.json', 'r') as f:
team_pv = json.load(f)
_logger.debug('team pv: %s', json.dumps(team_pv, sort_keys=True, indent=4))
if 'spec' in team_pv:
vars['dnsname'] = team_pv['spec']['csi']['volumeAttributes']['dnsname']
vars['mountname'] = team_pv['spec']['csi']['volumeAttributes']['mountname']
vars['csiProvisionerIdentity'] = team_pv['spec']['csi']['volumeAttributes']['storage.kubernetes.io/csiProvisionerIdentity']
vars['volumeHandle'] = team_pv['spec']['csi']['volumeHandle']
_logger.info(f'FSX Volume is {volumeName}')
break
_logger.info('FSX Volume not ready. Waiting a min')
time.sleep(60)
kubectl.write_kubeconfig(context=context)
else:
raise Exception(f'FSX Volume is not ready for plugin {plugin_id}')
|
aws-orbit-workbench
|
positive
|
def getSVG(shape, opts=None, view_vector=(-1.75, 1.1, 5.0)):
"""
Export a shape to SVG
"""
d = {'width': 800, 'height': 240, 'marginLeft': 200, 'marginTop': 20}
if opts:
d.update(opts)
<DeepExtract>
bb = shape.BoundBox
dimList = [bb.XLength, bb.YLength, bb.ZLength]
if max(dimList) > 10:
uom = UNITS.MM
if min(dimList) < 0.1:
uom = UNITS.IN
if sum(dimList) < 10:
uom = UNITS.IN
uom = UNITS.MM
</DeepExtract>
width = float(d['width'])
height = float(d['height'])
marginLeft = float(d['marginLeft'])
marginTop = float(d['marginTop'])
viewVector = FreeCAD.Base.Vector(view_vector)
(visibleG0, visibleG1, hiddenG0, hiddenG1) = Drawing.project(shape, viewVector)
<DeepExtract>
hiddenPaths = []
visiblePaths = []
if len(Drawing.projectToSVG(shape, viewVector, 'ShowHiddenLines')) > 0:
fullDoc = '<root>%s</root>' % Drawing.projectToSVG(shape, viewVector, 'ShowHiddenLines')
e = ET.ElementTree(ET.fromstring(fullDoc))
segments = e.findall('.//g')
for s in segments:
paths = s.findall('path')
if s.get('stroke-width') == '0.15':
mylist = hiddenPaths
else:
mylist = visiblePaths
for p in paths:
mylist.append(p.get('d'))
(hiddenPaths, visiblePaths) = (hiddenPaths, visiblePaths)
else:
(hiddenPaths, visiblePaths) = ([], [])
</DeepExtract>
bb = visibleG0.BoundBox
bb.add(visibleG1.BoundBox)
bb.add(hiddenG0.BoundBox)
bb.add(hiddenG1.BoundBox)
unitScale = min(width / bb.XLength * 0.75, height / bb.YLength * 0.75)
(xTranslate, yTranslate) = (0 - bb.XMin + marginLeft / unitScale, 0 - bb.YMax - marginTop / unitScale)
hiddenContent = ''
for p in hiddenPaths:
hiddenContent += PATHTEMPLATE % p
visibleContent = ''
for p in visiblePaths:
visibleContent += PATHTEMPLATE % p
svg = SVG_TEMPLATE % {'unitScale': str(unitScale), 'strokeWidth': str(1.0 / unitScale), 'hiddenContent': hiddenContent, 'visibleContent': visibleContent, 'xTranslate': str(xTranslate), 'yTranslate': str(yTranslate), 'width': str(width), 'height': str(height), 'textboxY': str(height - 30), 'uom': str(uom)}
return svg
|
def getSVG(shape, opts=None, view_vector=(-1.75, 1.1, 5.0)):
"""
Export a shape to SVG
"""
d = {'width': 800, 'height': 240, 'marginLeft': 200, 'marginTop': 20}
if opts:
d.update(opts)
bb = shape.BoundBox
dimList = [bb.XLength, bb.YLength, bb.ZLength]
if max(dimList) > 10:
uom = UNITS.MM
if min(dimList) < 0.1:
uom = UNITS.IN
if sum(dimList) < 10:
uom = UNITS.IN
uom = UNITS.MM
width = float(d['width'])
height = float(d['height'])
marginLeft = float(d['marginLeft'])
marginTop = float(d['marginTop'])
viewVector = FreeCAD.Base.Vector(view_vector)
(visibleG0, visibleG1, hiddenG0, hiddenG1) = Drawing.project(shape, viewVector)
hiddenPaths = []
visiblePaths = []
if len(Drawing.projectToSVG(shape, viewVector, 'ShowHiddenLines')) > 0:
fullDoc = '<root>%s</root>' % Drawing.projectToSVG(shape, viewVector, 'ShowHiddenLines')
e = ET.ElementTree(ET.fromstring(fullDoc))
segments = e.findall('.//g')
for s in segments:
paths = s.findall('path')
if s.get('stroke-width') == '0.15':
mylist = hiddenPaths
else:
mylist = visiblePaths
for p in paths:
mylist.append(p.get('d'))
(hiddenPaths, visiblePaths) = (hiddenPaths, visiblePaths)
else:
(hiddenPaths, visiblePaths) = ([], [])
bb = visibleG0.BoundBox
bb.add(visibleG1.BoundBox)
bb.add(hiddenG0.BoundBox)
bb.add(hiddenG1.BoundBox)
unitScale = min(width / bb.XLength * 0.75, height / bb.YLength * 0.75)
(xTranslate, yTranslate) = (0 - bb.XMin + marginLeft / unitScale, 0 - bb.YMax - marginTop / unitScale)
hiddenContent = ''
for p in hiddenPaths:
hiddenContent += PATHTEMPLATE % p
visibleContent = ''
for p in visiblePaths:
visibleContent += PATHTEMPLATE % p
svg = SVG_TEMPLATE % {'unitScale': str(unitScale), 'strokeWidth': str(1.0 / unitScale), 'hiddenContent': hiddenContent, 'visibleContent': visibleContent, 'xTranslate': str(xTranslate), 'yTranslate': str(yTranslate), 'width': str(width), 'height': str(height), 'textboxY': str(height - 30), 'uom': str(uom)}
return svg
|
cadquery
|
positive
|
def prepare_session(self, master, init_op=None, saver=None, checkpoint_dir=None, checkpoint_filename_with_path=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None, init_feed_dict=None, init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
<DeepExtract>
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
print('the supervisor got both checkpoint_dir={} and full_path={}, will restore from the latter'.format(checkpoint_dir, checkpoint_filename_with_path))
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
(sess, is_loaded_from_checkpoint) = (sess, False)
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
(sess, is_loaded_from_checkpoint) = (sess, True)
if not hasattr(self, 'auto_continue'):
print('auto_continue is not allowed, so no need to try to recover from the checkpoint')
(sess, is_loaded_from_checkpoint) = (sess, False)
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info('Waiting for checkpoint to be available.')
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
(sess, is_loaded_from_checkpoint) = (sess, False)
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
(sess, is_loaded_from_checkpoint) = (sess, True)
</DeepExtract>
print('is_loaded_from_checkpoint:', is_loaded_from_checkpoint)
if not is_loaded_from_checkpoint:
if init_op is None and (not init_fn) and (self._local_init_op is None):
raise RuntimeError('Model is not initialized and no init_op or init_fn or local_init_op was given')
if init_op is not None:
print('running the init_op')
sess.run(init_op, feed_dict=init_feed_dict)
print('done the init_op')
if init_fn:
init_fn(sess)
(local_init_success, msg) = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError('Init operations did not make model ready for local_init. Init op: %s, init fn: %s, error: %s' % (_maybe_name(init_op), init_fn, msg))
print('done local_init_op')
(is_ready, msg) = self._model_ready(sess)
if not is_ready:
raise RuntimeError('Init operations did not make model ready. Init op: %s, init fn: %s, local_init_op: %s, error: %s' % (_maybe_name(init_op), init_fn, self._local_init_op, msg))
print('model ready')
return sess
|
def prepare_session(self, master, init_op=None, saver=None, checkpoint_dir=None, checkpoint_filename_with_path=None, wait_for_checkpoint=False, max_wait_secs=7200, config=None, init_feed_dict=None, init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
print('the supervisor got both checkpoint_dir={} and full_path={}, will restore from the latter'.format(checkpoint_dir, checkpoint_filename_with_path))
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
(sess, is_loaded_from_checkpoint) = (sess, False)
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
(sess, is_loaded_from_checkpoint) = (sess, True)
if not hasattr(self, 'auto_continue'):
print('auto_continue is not allowed, so no need to try to recover from the checkpoint')
(sess, is_loaded_from_checkpoint) = (sess, False)
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info('Waiting for checkpoint to be available.')
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
(sess, is_loaded_from_checkpoint) = (sess, False)
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
(sess, is_loaded_from_checkpoint) = (sess, True)
print('is_loaded_from_checkpoint:', is_loaded_from_checkpoint)
if not is_loaded_from_checkpoint:
if init_op is None and (not init_fn) and (self._local_init_op is None):
raise RuntimeError('Model is not initialized and no init_op or init_fn or local_init_op was given')
if init_op is not None:
print('running the init_op')
sess.run(init_op, feed_dict=init_feed_dict)
print('done the init_op')
if init_fn:
init_fn(sess)
(local_init_success, msg) = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError('Init operations did not make model ready for local_init. Init op: %s, init fn: %s, error: %s' % (_maybe_name(init_op), init_fn, msg))
print('done local_init_op')
(is_ready, msg) = self._model_ready(sess)
if not is_ready:
raise RuntimeError('Init operations did not make model ready. Init op: %s, init fn: %s, local_init_op: %s, error: %s' % (_maybe_name(init_op), init_fn, self._local_init_op, msg))
print('model ready')
return sess
|
AOFP
|
positive
|
def ratelimit_key(group, request):
<DeepExtract>
ip = request.META.get('HTTP_CF_CONNECTING_IP', '').strip()
if not ip:
ip = request.META.get('HTTP_X_FORWARDED_FOR', '').strip()
if not ip:
ip = request.META.get('REMOTE_ADDR', '').strip()
else:
ip = ip.split(',')[0]
ip = ip
</DeepExtract>
request.META.get('HTTP_USER_AGENT', '')
key = ip
return key
|
def ratelimit_key(group, request):
ip = request.META.get('HTTP_CF_CONNECTING_IP', '').strip()
if not ip:
ip = request.META.get('HTTP_X_FORWARDED_FOR', '').strip()
if not ip:
ip = request.META.get('REMOTE_ADDR', '').strip()
else:
ip = ip.split(',')[0]
ip = ip
request.META.get('HTTP_USER_AGENT', '')
key = ip
return key
|
brasil.io
|
positive
|
def setup_albumart(self):
"""
Construct the AlbumArt widget and add it to the overlay, but only actually
show it if we're a) Not running a plug-in, and b) The user wants it via
preferences.
Initially invisible, regardless - its visibility is controlled via its
own positioning timer.
"""
self.albumart_widget = AlbumArt(None, status.screen.get_mouse_monitor())
<DeepExtract>
self.overlay.add_overlay(self.albumart_widget)
</DeepExtract>
self.floaters.append(self.albumart_widget)
if settings.get_show_albumart():
self.albumart_widget.start_positioning()
|
def setup_albumart(self):
"""
Construct the AlbumArt widget and add it to the overlay, but only actually
show it if we're a) Not running a plug-in, and b) The user wants it via
preferences.
Initially invisible, regardless - its visibility is controlled via its
own positioning timer.
"""
self.albumart_widget = AlbumArt(None, status.screen.get_mouse_monitor())
self.overlay.add_overlay(self.albumart_widget)
self.floaters.append(self.albumart_widget)
if settings.get_show_albumart():
self.albumart_widget.start_positioning()
|
cinnamon-screensaver
|
positive
|
def process_bedpe(path):
if path.endswith('.bgz'):
bgz_file = path
else:
bgz_file = path + '.bgz'
<DeepExtract>
if not osp.exists(bgz_file):
cmd = f'sort -k1,1 -k4,4 -k2,2n -k5,5n {path} | bgzip > {bgz_file}'
subp.check_call(cmd, shell=True)
</DeepExtract>
if not osp.exists(f'{bgz_file}.px2'):
<DeepExtract>
cmd = f'pairix -f -s 1 -d 4 -b 2 -e 3 -u 5 -v 6 {bgz_file}'.split(' ')
subp.check_call(cmd)
</DeepExtract>
return bgz_file
|
def process_bedpe(path):
if path.endswith('.bgz'):
bgz_file = path
else:
bgz_file = path + '.bgz'
if not osp.exists(bgz_file):
cmd = f'sort -k1,1 -k4,4 -k2,2n -k5,5n {path} | bgzip > {bgz_file}'
subp.check_call(cmd, shell=True)
if not osp.exists(f'{bgz_file}.px2'):
cmd = f'pairix -f -s 1 -d 4 -b 2 -e 3 -u 5 -v 6 {bgz_file}'.split(' ')
subp.check_call(cmd)
return bgz_file
|
CoolBox
|
positive
|
def get_ori_diff_no_xaxis(self):
<DeepExtract>
ori_diff = self.get_finger_ori() - quat2euler(self.sim.data.get_body_xquat('knob_link'))
</DeepExtract>
ori_diff[0] = 0
return ori_diff
|
def get_ori_diff_no_xaxis(self):
ori_diff = self.get_finger_ori() - quat2euler(self.sim.data.get_body_xquat('knob_link'))
ori_diff[0] = 0
return ori_diff
|
DoorGym
|
positive
|
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2 ** i
<DeepExtract>
downsample = None
if stride != 1 or self.inplanes != planes * self.block.expansion:
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.inplanes, planes * self.block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * self.block.expansion)[1])
layers = []
layers.append(self.block(inplanes=self.inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, groups=self.groups, base_width=self.base_width, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, gcb=gcb))
self.inplanes = planes * self.block.expansion
for i in range(1, num_blocks):
layers.append(self.block(inplanes=self.inplanes, planes=planes, stride=1, dilation=dilation, groups=self.groups, base_width=self.base_width, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, gcb=gcb))
res_layer = nn.Sequential(*layers)
</DeepExtract>
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
|
def __init__(self, groups=1, base_width=4, **kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.groups = groups
self.base_width = base_width
self.inplanes = 64
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2 ** i
downsample = None
if stride != 1 or self.inplanes != planes * self.block.expansion:
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.inplanes, planes * self.block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * self.block.expansion)[1])
layers = []
layers.append(self.block(inplanes=self.inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, groups=self.groups, base_width=self.base_width, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, gcb=gcb))
self.inplanes = planes * self.block.expansion
for i in range(1, num_blocks):
layers.append(self.block(inplanes=self.inplanes, planes=planes, stride=1, dilation=dilation, groups=self.groups, base_width=self.base_width, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, gcb=gcb))
res_layer = nn.Sequential(*layers)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
|
EfficientDet-bifpn
|
positive
|
def quotient_representation(self, subgroup_id) -> e2cnn.group.Representation:
"""
Builds the quotient representation of the group with respect to the subgroup identified by the
input ``subgroup_id``.
Similar to :meth:`~e2cnn.group.Group.regular_representation`, the quotient representation
:math:`\\rho_\\text{quot}^{G/H}` of :math:`G` w.r.t. a subgroup :math:`H` acts on :math:`\\R^{|G|/|H|}` by
permuting its axes.
Labeling the axes by the cosets :math:`gH` in the quotient space :math:`G/H`, it can be defined via its action
:math:`\\rho_\\text{quot}^{G/H}(\\tilde{g})e_{gH}=e_{\\tilde{g}gH}`.
Regular and trivial representations are two specific cases of quotient representations obtained by choosing
:math:`H=\\{e\\}` or :math:`H=G`, respectively.
Vectors in the representation space :math:`\\R^{|G|/|H|}` can be viewed as scalar functions on the quotient
space :math:`G/H`.
The quotient representation :math:`\\rho_\\text{quot}^{G/H}` can also be defined as the
:meth:`~e2cnn.group.Group.induced_representation` from the trivial representation of the subgroup :math:`H`.
Args:
subgroup_id: identifier of the subgroup
Returns:
the quotient representation of the group
"""
name = f'quotient[{subgroup_id}]'
if name not in self.representations:
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
supported_nonlinearities = []
if 'pointwise' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('pointwise')
if 'concatenated' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('concatenated')
if 'gated' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('gated')
for nl in subgroup.trivial_representation.supported_nonlinearities:
if nl.startswith('induced_gated'):
supported_nonlinearities.append(nl)
break
else:
supported_nonlinearities.append(f'induced_gated_{subgroup.trivial_representation.size}')
if 'norm' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('norm')
for nl in subgroup.trivial_representation.supported_nonlinearities:
if nl.startswith('induced_norm'):
supported_nonlinearities.append(nl)
break
else:
supported_nonlinearities.append(f'induced_norm_{subgroup.trivial_representation.size}')
if 'gate' in subgroup.trivial_representation.supported_nonlinearities or 'induced_gate' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('induced_gate')
supported_nonlinearities = supported_nonlinearities
</DeepExtract>
<DeepExtract>
assert subgroup.trivial_representation.irreducible
if self.order() < 0:
raise ValueError(f'Only finite group are supported for induction but you tried to induce to the group {self.name} which has an infinite number of elements')
else:
(irreps, change_of_basis, change_of_basis_inv) = e2cnn.group.representation.build_induced_representation(self, subgroup_id, subgroup.trivial_representation)
</DeepExtract>
self.representations[name] = e2cnn.group.Representation(self, name, [r.name for r in irreps], change_of_basis, supported_nonlinearities, change_of_basis_inv=change_of_basis_inv)
return self.representations[name]
|
def quotient_representation(self, subgroup_id) -> e2cnn.group.Representation:
"""
Builds the quotient representation of the group with respect to the subgroup identified by the
input ``subgroup_id``.
Similar to :meth:`~e2cnn.group.Group.regular_representation`, the quotient representation
:math:`\\rho_\\text{quot}^{G/H}` of :math:`G` w.r.t. a subgroup :math:`H` acts on :math:`\\R^{|G|/|H|}` by
permuting its axes.
Labeling the axes by the cosets :math:`gH` in the quotient space :math:`G/H`, it can be defined via its action
:math:`\\rho_\\text{quot}^{G/H}(\\tilde{g})e_{gH}=e_{\\tilde{g}gH}`.
Regular and trivial representations are two specific cases of quotient representations obtained by choosing
:math:`H=\\{e\\}` or :math:`H=G`, respectively.
Vectors in the representation space :math:`\\R^{|G|/|H|}` can be viewed as scalar functions on the quotient
space :math:`G/H`.
The quotient representation :math:`\\rho_\\text{quot}^{G/H}` can also be defined as the
:meth:`~e2cnn.group.Group.induced_representation` from the trivial representation of the subgroup :math:`H`.
Args:
subgroup_id: identifier of the subgroup
Returns:
the quotient representation of the group
"""
name = f'quotient[{subgroup_id}]'
if name not in self.representations:
pass
supported_nonlinearities = []
if 'pointwise' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('pointwise')
if 'concatenated' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('concatenated')
if 'gated' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('gated')
for nl in subgroup.trivial_representation.supported_nonlinearities:
if nl.startswith('induced_gated'):
supported_nonlinearities.append(nl)
break
else:
supported_nonlinearities.append(f'induced_gated_{subgroup.trivial_representation.size}')
if 'norm' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('norm')
for nl in subgroup.trivial_representation.supported_nonlinearities:
if nl.startswith('induced_norm'):
supported_nonlinearities.append(nl)
break
else:
supported_nonlinearities.append(f'induced_norm_{subgroup.trivial_representation.size}')
if 'gate' in subgroup.trivial_representation.supported_nonlinearities or 'induced_gate' in subgroup.trivial_representation.supported_nonlinearities:
supported_nonlinearities.append('induced_gate')
supported_nonlinearities = supported_nonlinearities
assert subgroup.trivial_representation.irreducible
if self.order() < 0:
raise ValueError(f'Only finite group are supported for induction but you tried to induce to the group {self.name} which has an infinite number of elements')
else:
(irreps, change_of_basis, change_of_basis_inv) = e2cnn.group.representation.build_induced_representation(self, subgroup_id, subgroup.trivial_representation)
self.representations[name] = e2cnn.group.Representation(self, name, [r.name for r in irreps], change_of_basis, supported_nonlinearities, change_of_basis_inv=change_of_basis_inv)
return self.representations[name]
|
e2cnn
|
positive
|
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Override Foreign Key Query set on Add/Edit admin-page of Carrier Rate
"""
if db_field.name == 'prefix':
<DeepExtract>
q = Prefix.objects.extra(select={'prefix': 'prefix', 'destination': 'destination', 'ascii_prefix': 'prefix'}, tables=['dialcode_prefix'])
q.group_by = ['prefix']
q = q.extra(order_by=['ascii_prefix', 'prefix', 'destination'])
kwargs['queryset'] = q
</DeepExtract>
return super(VoIPCarrierRateAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
|
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Override Foreign Key Query set on Add/Edit admin-page of Carrier Rate
"""
if db_field.name == 'prefix':
q = Prefix.objects.extra(select={'prefix': 'prefix', 'destination': 'destination', 'ascii_prefix': 'prefix'}, tables=['dialcode_prefix'])
q.group_by = ['prefix']
q = q.extra(order_by=['ascii_prefix', 'prefix', 'destination'])
kwargs['queryset'] = q
return super(VoIPCarrierRateAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
|
cdr-stats
|
positive
|
@stub_and_esp32_function_only
def flash_defl_finish(self, reboot=False):
"""Leave compressed flash mode and run/reboot"""
if not reboot and (not self.IS_STUB):
return
pkt = struct.pack('<I', int(not reboot))
<DeepExtract>
(val, pkt) = self.command(self.ESP_FLASH_DEFL_END, pkt, chk, timeout=timeout)
if len(pkt) < self.STATUS_BYTES_LENGTH:
raise FatalError('Failed to %s. Only got %d byte status response.' % ('leave compressed flash mode', len(pkt)))
status_bytes = pkt[-self.STATUS_BYTES_LENGTH:]
if byte(status_bytes, 0) != 0:
raise FatalError.WithResult('Failed to %s' % 'leave compressed flash mode', status_bytes)
if len(pkt) > self.STATUS_BYTES_LENGTH:
return pkt[:-self.STATUS_BYTES_LENGTH]
else:
return val
</DeepExtract>
self.in_bootloader = False
|
@stub_and_esp32_function_only
def flash_defl_finish(self, reboot=False):
"""Leave compressed flash mode and run/reboot"""
if not reboot and (not self.IS_STUB):
return
pkt = struct.pack('<I', int(not reboot))
(val, pkt) = self.command(self.ESP_FLASH_DEFL_END, pkt, chk, timeout=timeout)
if len(pkt) < self.STATUS_BYTES_LENGTH:
raise FatalError('Failed to %s. Only got %d byte status response.' % ('leave compressed flash mode', len(pkt)))
status_bytes = pkt[-self.STATUS_BYTES_LENGTH:]
if byte(status_bytes, 0) != 0:
raise FatalError.WithResult('Failed to %s' % 'leave compressed flash mode', status_bytes)
if len(pkt) > self.STATUS_BYTES_LENGTH:
return pkt[:-self.STATUS_BYTES_LENGTH]
else:
return val
self.in_bootloader = False
|
esptool
|
positive
|
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
if cfg.FPN.MULTILEVEL_ROIS:
<DeepExtract>
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(inputs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(inputs, 'keypoint_rois', inputs['keypoint_rois'], lvls, lvl_min, lvl_max)
</DeepExtract>
for (k, v) in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
|
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
if cfg.FPN.MULTILEVEL_ROIS:
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(inputs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(inputs, 'keypoint_rois', inputs['keypoint_rois'], lvls, lvl_min, lvl_max)
for (k, v) in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
|
AC-FPN
|
positive
|
def moveZ(Z, F):
global lastDrillPos
if F <= 0:
print('ERROR: F <= 0')
<DeepExtract>
sendLine('G1 Z' + floats(Z) + ' F' + floats(F) + '\n')
waitForOK('G1 Z' + floats(Z) + ' F' + floats(F) + '\n', timeoutResend)
</DeepExtract>
if Emulate:
dist = abs(Z - lastDrillPos[2])
speed = float(F) / 60.0
time.sleep(float(dist) / speed)
lastDrillPos = [lastDrillPos[0], lastDrillPos[1], Z]
|
def moveZ(Z, F):
global lastDrillPos
if F <= 0:
print('ERROR: F <= 0')
sendLine('G1 Z' + floats(Z) + ' F' + floats(F) + '\n')
waitForOK('G1 Z' + floats(Z) + ' F' + floats(F) + '\n', timeoutResend)
if Emulate:
dist = abs(Z - lastDrillPos[2])
speed = float(F) / 60.0
time.sleep(float(dist) / speed)
lastDrillPos = [lastDrillPos[0], lastDrillPos[1], Z]
|
Cyclone-PCB-Factory
|
positive
|
def __build_external_schema_tables_query(catalog: typing.Optional[str], schema_pattern: typing.Optional[str], table_name_pattern: typing.Optional[str], types: list) -> typing.Tuple[str, typing.Tuple[str, ...]]:
sql: str = "SELECT * FROM (SELECT CAST(current_database() AS VARCHAR(124)) AS TABLE_CAT, schemaname AS table_schem, tablename AS TABLE_NAME, 'EXTERNAL TABLE' AS TABLE_TYPE, NULL AS REMARKS, '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, '' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION FROM svv_external_tables) WHERE true "
<DeepExtract>
filter_clause: str = ''
use_schemas: str = 'SCHEMAS'
filter_clause += self._get_catalog_filter_conditions(catalog, True, None)
query_args: typing.List[str] = []
if schema_pattern is not None and schema_pattern != '':
filter_clause += ' AND TABLE_SCHEM LIKE ?'
query_args.append(self.__sanitize_str(schema_pattern))
if table_name_pattern is not None and table_name_pattern != '':
filter_clause += ' AND TABLE_NAME LIKE ?'
query_args.append(self.__sanitize_str(table_name_pattern))
if len(types) > 0:
if 'EXTERNAL_SCHEMA_QUERY' == 'LOCAL_SCHEMA_QUERY':
filter_clause += ' AND (false '
orclause: str = ''
for type in types:
if type not in table_type_clauses.keys():
raise InterfaceError('Invalid type: {} provided. types may only contain: {}'.format(type, table_type_clauses.keys()))
clauses: typing.Optional[typing.Dict[str, str]] = table_type_clauses[type]
if clauses is not None:
cluase: str = clauses[use_schemas]
orclause += ' OR ( {cluase} ) '.format(cluase=cluase)
filter_clause += orclause + ') '
elif 'EXTERNAL_SCHEMA_QUERY' == 'NO_SCHEMA_UNIVERSAL_QUERY' or 'EXTERNAL_SCHEMA_QUERY' == 'EXTERNAL_SCHEMA_QUERY':
filter_clause += ' AND TABLE_TYPE IN ( '
length = len(types)
for type in types:
if type not in table_type_clauses.keys():
raise InterfaceError('Invalid type: {} provided. types may only contain: {}'.format(type, table_type_clauses.keys()))
filter_clause += '?'
query_args.append(type)
length -= 1
if length > 0:
filter_clause += ', '
filter_clause += ') '
(filter_clause, filter_args) = (filter_clause, tuple(query_args))
</DeepExtract>
orderby: str = ' ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME '
sql += filter_clause + orderby
return (sql, filter_args)
|
def __build_external_schema_tables_query(catalog: typing.Optional[str], schema_pattern: typing.Optional[str], table_name_pattern: typing.Optional[str], types: list) -> typing.Tuple[str, typing.Tuple[str, ...]]:
sql: str = "SELECT * FROM (SELECT CAST(current_database() AS VARCHAR(124)) AS TABLE_CAT, schemaname AS table_schem, tablename AS TABLE_NAME, 'EXTERNAL TABLE' AS TABLE_TYPE, NULL AS REMARKS, '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, '' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION FROM svv_external_tables) WHERE true "
filter_clause: str = ''
use_schemas: str = 'SCHEMAS'
filter_clause += self._get_catalog_filter_conditions(catalog, True, None)
query_args: typing.List[str] = []
if schema_pattern is not None and schema_pattern != '':
filter_clause += ' AND TABLE_SCHEM LIKE ?'
query_args.append(self.__sanitize_str(schema_pattern))
if table_name_pattern is not None and table_name_pattern != '':
filter_clause += ' AND TABLE_NAME LIKE ?'
query_args.append(self.__sanitize_str(table_name_pattern))
if len(types) > 0:
if 'EXTERNAL_SCHEMA_QUERY' == 'LOCAL_SCHEMA_QUERY':
filter_clause += ' AND (false '
orclause: str = ''
for type in types:
if type not in table_type_clauses.keys():
raise InterfaceError('Invalid type: {} provided. types may only contain: {}'.format(type, table_type_clauses.keys()))
clauses: typing.Optional[typing.Dict[str, str]] = table_type_clauses[type]
if clauses is not None:
cluase: str = clauses[use_schemas]
orclause += ' OR ( {cluase} ) '.format(cluase=cluase)
filter_clause += orclause + ') '
elif 'EXTERNAL_SCHEMA_QUERY' == 'NO_SCHEMA_UNIVERSAL_QUERY' or 'EXTERNAL_SCHEMA_QUERY' == 'EXTERNAL_SCHEMA_QUERY':
filter_clause += ' AND TABLE_TYPE IN ( '
length = len(types)
for type in types:
if type not in table_type_clauses.keys():
raise InterfaceError('Invalid type: {} provided. types may only contain: {}'.format(type, table_type_clauses.keys()))
filter_clause += '?'
query_args.append(type)
length -= 1
if length > 0:
filter_clause += ', '
filter_clause += ') '
(filter_clause, filter_args) = (filter_clause, tuple(query_args))
orderby: str = ' ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME '
sql += filter_clause + orderby
return (sql, filter_args)
|
amazon-redshift-python-driver
|
positive
|
def delete(self):
<DeepExtract>
config = MASResourceConfig(module=self.module, resource=self.main_nitro_class, attribute_values_dict=self.module.params, attributes_list=self.attribute_config[self.main_nitro_class]['attributes_list'], transforms=self.attribute_config[self.main_nitro_class]['transforms'], api_path='nitro/v2/config')
config = config
</DeepExtract>
if self.main_object_exists(config):
self.module_result['changed'] = True
if not self.module.check_mode:
config.delete(delete_id_attributes=self.attribute_config[self.main_nitro_class]['delete_id_attributes'])
|
def delete(self):
config = MASResourceConfig(module=self.module, resource=self.main_nitro_class, attribute_values_dict=self.module.params, attributes_list=self.attribute_config[self.main_nitro_class]['attributes_list'], transforms=self.attribute_config[self.main_nitro_class]['transforms'], api_path='nitro/v2/config')
config = config
if self.main_object_exists(config):
self.module_result['changed'] = True
if not self.module.check_mode:
config.delete(delete_id_attributes=self.attribute_config[self.main_nitro_class]['delete_id_attributes'])
|
citrix-adc-ansible-modules
|
positive
|
@patch('chaosaws.emr.probes.aws_client', autospec=True)
def test_list_cluster_fleet_instances(self, aws_client):
<DeepExtract>
config = os.path.join(data_path, 'list_instances_1.json')
with open(config) as fh:
mocked_response = loads(fh.read())
</DeepExtract>
client = MagicMock()
aws_client.return_value = client
client.list_instances.return_value = mocked_response
response = list_cluster_fleet_instances(self.cluster_id, self.fleet_id)
group = response['Instances']
assert len(group) == 2
client.list_instances.assert_called_with(ClusterId=self.cluster_id, InstanceFleetId=self.fleet_id)
|
@patch('chaosaws.emr.probes.aws_client', autospec=True)
def test_list_cluster_fleet_instances(self, aws_client):
config = os.path.join(data_path, 'list_instances_1.json')
with open(config) as fh:
mocked_response = loads(fh.read())
client = MagicMock()
aws_client.return_value = client
client.list_instances.return_value = mocked_response
response = list_cluster_fleet_instances(self.cluster_id, self.fleet_id)
group = response['Instances']
assert len(group) == 2
client.list_instances.assert_called_with(ClusterId=self.cluster_id, InstanceFleetId=self.fleet_id)
|
chaostoolkit-aws
|
positive
|
def main():
"""Main function"""
<DeepExtract>
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument('--dataset', dest='dataset', required=True, help='Dataset to use')
parser.add_argument('--cfg', dest='cfg_file', required=True, help='Config file for training (and optionally testing)')
parser.add_argument('--set', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]', default=[], nargs='+')
parser.add_argument('--disp_interval', help='Display training info every N iterations', default=20, type=int)
parser.add_argument('--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
parser.add_argument('--bs', dest='batch_size', help='Explicitly specify to overwrite the value comed from cfg_file.', type=int)
parser.add_argument('--nw', dest='num_workers', help='Explicitly specify to overwrite number of workers to load data. Defaults to 4', type=int)
parser.add_argument('--iter_size', help='Update once every iter_size steps, as in Caffe.', default=1, type=int)
parser.add_argument('--o', dest='optimizer', help='Training optimizer.', default=None)
parser.add_argument('--lr', help='Base learning rate.', default=None, type=float)
parser.add_argument('--lr_decay_gamma', help='Learning rate decay rate.', default=None, type=float)
parser.add_argument('--start_step', help='Starting step count for training epoch. 0-indexed.', default=0, type=int)
parser.add_argument('--resume', help='resume to training on a checkpoint', action='store_true')
parser.add_argument('--no_save', help='do not save anything', action='store_true')
parser.add_argument('--load_ckpt', help='checkpoint path to load')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--use_tfboard', help='Use tensorflow tensorboard to log training info', action='store_true')
args = parser.parse_args()
</DeepExtract>
print('Called with args:')
print(args)
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
log_path = os.path.join(output_dir, 'train.log')
logger = setup_logging_to_file(log_path)
if not torch.cuda.is_available():
sys.exit('Need a CUDA device to run the code.')
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if args.dataset == 'coco2017':
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif args.dataset == 'keypoints_coco2017':
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == 'common':
cfg.TRAIN.DATASETS = ('common_train',)
cfg.MODEL.NUM_CLASSES = 14
elif args.dataset == 'kitti':
cfg.TRAIN.DATASETS = ('kitti_train',)
cfg.MODEL.NUM_CLASSES = 9
else:
raise ValueError('Unexpected args.dataset: {}'.format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert args.batch_size % cfg.NUM_GPUS == 0, 'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n SOLVER.STEPS: {} --> {}\n SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER))
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
timers['roidb'].tic()
(roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True)
dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
gn_params = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
for (key, value) in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if 'gn' in key:
gn_params.append(value)
elif 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
params = [{'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1), 'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}]
param_names = [nonbias_param_names, bias_param_names]
if cfg.SOLVER.TYPE == 'SGD':
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == 'Adam':
optimizer = torch.optim.Adam(params)
if args.load_ckpt:
load_name = args.load_ckpt
logging.info('loading checkpoint %s', load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint:
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size']))
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info('loading Detectron weights %s', args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(args, args.disp_interval, tblogger if args.use_tfboard and (not args.no_save) else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
if decay_steps_ind < len(cfg.SOLVER.STEPS) and step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb':
input_data[key] = list(map(Variable, input_data[key]))
roidb = list(map(lambda x: blob_utils.deserialize(x)[0], input_data['roidb'][0]))
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr, logger)
if (step + 1) % CHECKPOINT_PERIOD == 0:
<DeepExtract>
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
</DeepExtract>
<DeepExtract>
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
</DeepExtract>
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
<DeepExtract>
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
</DeepExtract>
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and (not args.no_save):
tblogger.close()
|
def main():
"""Main function"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument('--dataset', dest='dataset', required=True, help='Dataset to use')
parser.add_argument('--cfg', dest='cfg_file', required=True, help='Config file for training (and optionally testing)')
parser.add_argument('--set', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]', default=[], nargs='+')
parser.add_argument('--disp_interval', help='Display training info every N iterations', default=20, type=int)
parser.add_argument('--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
parser.add_argument('--bs', dest='batch_size', help='Explicitly specify to overwrite the value comed from cfg_file.', type=int)
parser.add_argument('--nw', dest='num_workers', help='Explicitly specify to overwrite number of workers to load data. Defaults to 4', type=int)
parser.add_argument('--iter_size', help='Update once every iter_size steps, as in Caffe.', default=1, type=int)
parser.add_argument('--o', dest='optimizer', help='Training optimizer.', default=None)
parser.add_argument('--lr', help='Base learning rate.', default=None, type=float)
parser.add_argument('--lr_decay_gamma', help='Learning rate decay rate.', default=None, type=float)
parser.add_argument('--start_step', help='Starting step count for training epoch. 0-indexed.', default=0, type=int)
parser.add_argument('--resume', help='resume to training on a checkpoint', action='store_true')
parser.add_argument('--no_save', help='do not save anything', action='store_true')
parser.add_argument('--load_ckpt', help='checkpoint path to load')
parser.add_argument('--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument('--use_tfboard', help='Use tensorflow tensorboard to log training info', action='store_true')
args = parser.parse_args()
print('Called with args:')
print(args)
args.run_name = misc_utils.get_run_name() + '_step'
output_dir = misc_utils.get_output_dir(args, args.run_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
log_path = os.path.join(output_dir, 'train.log')
logger = setup_logging_to_file(log_path)
if not torch.cuda.is_available():
sys.exit('Need a CUDA device to run the code.')
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError('Need Cuda device to run !')
if args.dataset == 'coco2017':
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif args.dataset == 'keypoints_coco2017':
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
elif args.dataset == 'common':
cfg.TRAIN.DATASETS = ('common_train',)
cfg.MODEL.NUM_CLASSES = 14
elif args.dataset == 'kitti':
cfg.TRAIN.DATASETS = ('kitti_train',)
cfg.MODEL.NUM_CLASSES = 9
else:
raise ValueError('Unexpected args.dataset: {}'.format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert args.batch_size % cfg.NUM_GPUS == 0, 'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n SOLVER.STEPS: {} --> {}\n SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS, old_max_iter, cfg.SOLVER.MAX_ITER))
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
timers['roidb'].tic()
(roidb, ratio_list, ratio_index) = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(sampler=MinibatchSampler(ratio_list, ratio_index), batch_size=args.batch_size, drop_last=True)
dataset = RoiDataLoader(roidb, cfg.MODEL.NUM_CLASSES, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=batchSampler, num_workers=cfg.DATA_LOADER.NUM_THREADS, collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
gn_params = []
bias_params = []
bias_param_names = []
nonbias_params = []
nonbias_param_names = []
for (key, value) in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if 'gn' in key:
gn_params.append(value)
elif 'bias' in key:
bias_params.append(value)
bias_param_names.append(key)
else:
nonbias_params.append(value)
nonbias_param_names.append(key)
params = [{'params': nonbias_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY}, {'params': bias_params, 'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1), 'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}, {'params': gn_params, 'lr': 0, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}]
param_names = [nonbias_param_names, bias_param_names]
if cfg.SOLVER.TYPE == 'SGD':
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == 'Adam':
optimizer = torch.optim.Adam(params)
if args.load_ckpt:
load_name = args.load_ckpt
logging.info('loading checkpoint %s', load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint:
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d' % (train_size, checkpoint['train_size']))
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info('loading Detectron weights %s', args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'], minibatch=True)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
maskRCNN.train()
CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(args, args.disp_interval, tblogger if args.use_tfboard and (not args.no_save) else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils.update_learning_rate(optimizer, lr, cfg.SOLVER.BASE_LR)
lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
if decay_steps_ind < len(cfg.SOLVER.STEPS) and step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils.update_learning_rate(optimizer, lr, lr_new)
lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb':
input_data[key] = list(map(Variable, input_data[key]))
roidb = list(map(lambda x: blob_utils.deserialize(x)[0], input_data['roidb'][0]))
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr, logger)
if (step + 1) % CHECKPOINT_PERIOD == 0:
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(maskRCNN, mynn.DataParallel):
maskRCNN = maskRCNN.module
model_state_dict = maskRCNN.state_dict()
torch.save({'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': maskRCNN.state_dict(), 'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and (not args.no_save):
tblogger.close()
|
Amodal-Instance-Segmentation-through-KINS-Dataset
|
positive
|
def postTopDownCodelets(self):
random = self.ctx.random
slipnet = self.ctx.slipnet
for node in slipnet.slipnodes:
if node.activation != 100.0:
continue
for codeletName in node.codelets:
<DeepExtract>
temperature = self.ctx.temperature
workspace = self.ctx.workspace
if codeletName == 'breaker':
probability = 1.0
if 'replacement' in codeletName:
if workspace.numberOfUnreplacedObjects() > 0:
probability = 1.0
probability = 0.0
if 'rule' in codeletName:
if not workspace.rule:
probability = 1.0
probability = workspace.rule.totalWeakness() / 100.0
if 'correspondence' in codeletName:
probability = workspace.interStringUnhappiness / 100.0
if 'description' in codeletName:
probability = (temperature.value() / 100.0) ** 2
probability = workspace.intraStringUnhappiness / 100.0
</DeepExtract>
<DeepExtract>
random = self.ctx.random
workspace = self.ctx.workspace
if codeletName == 'breaker' or 'description' in codeletName:
howMany = 1
if 'translator' in codeletName:
if not workspace.rule:
howMany = 0
howMany = 1
if 'rule' in codeletName:
howMany = 2
if 'group' in codeletName and (not workspace.numberOfBonds()):
howMany = 0
if 'replacement' in codeletName and workspace.rule:
howMany = 0
number = 0
if 'bond' in codeletName:
number = workspace.numberOfUnrelatedObjects()
if 'group' in codeletName:
number = workspace.numberOfUngroupedObjects()
if 'replacement' in codeletName:
number = workspace.numberOfUnreplacedObjects()
if 'correspondence' in codeletName:
number = workspace.numberOfUncorrespondingObjects()
if number < random.sqrtBlur(2.0):
howMany = 1
if number < random.sqrtBlur(4.0):
howMany = 2
howMany = 3
</DeepExtract>
for _ in range(howMany):
if not random.coinFlip(probability):
continue
<DeepExtract>
i = int(node.activation * node.conceptualDepth / 100.0) * NUMBER_OF_BINS / 100
if i >= NUMBER_OF_BINS:
node.activation * node.conceptualDepth / 100.0 = NUMBER_OF_BINS
node.activation * node.conceptualDepth / 100.0 = i + 1
</DeepExtract>
codelet = Codelet(codeletName, urgency, [node], self.codeletsRun)
logging.info('Post top down: %s, with urgency: %d', codelet.name, urgency)
<DeepExtract>
self.codelets += [codelet]
if len(self.codelets) > 100:
oldCodelet = self.chooseOldCodelet()
self.removeCodelet(oldCodelet)
</DeepExtract>
|
def postTopDownCodelets(self):
random = self.ctx.random
slipnet = self.ctx.slipnet
for node in slipnet.slipnodes:
if node.activation != 100.0:
continue
for codeletName in node.codelets:
temperature = self.ctx.temperature
workspace = self.ctx.workspace
if codeletName == 'breaker':
probability = 1.0
if 'replacement' in codeletName:
if workspace.numberOfUnreplacedObjects() > 0:
probability = 1.0
probability = 0.0
if 'rule' in codeletName:
if not workspace.rule:
probability = 1.0
probability = workspace.rule.totalWeakness() / 100.0
if 'correspondence' in codeletName:
probability = workspace.interStringUnhappiness / 100.0
if 'description' in codeletName:
probability = (temperature.value() / 100.0) ** 2
probability = workspace.intraStringUnhappiness / 100.0
random = self.ctx.random
workspace = self.ctx.workspace
if codeletName == 'breaker' or 'description' in codeletName:
howMany = 1
if 'translator' in codeletName:
if not workspace.rule:
howMany = 0
howMany = 1
if 'rule' in codeletName:
howMany = 2
if 'group' in codeletName and (not workspace.numberOfBonds()):
howMany = 0
if 'replacement' in codeletName and workspace.rule:
howMany = 0
number = 0
if 'bond' in codeletName:
number = workspace.numberOfUnrelatedObjects()
if 'group' in codeletName:
number = workspace.numberOfUngroupedObjects()
if 'replacement' in codeletName:
number = workspace.numberOfUnreplacedObjects()
if 'correspondence' in codeletName:
number = workspace.numberOfUncorrespondingObjects()
if number < random.sqrtBlur(2.0):
howMany = 1
if number < random.sqrtBlur(4.0):
howMany = 2
howMany = 3
for _ in range(howMany):
if not random.coinFlip(probability):
continue
i = int(node.activation * node.conceptualDepth / 100.0) * NUMBER_OF_BINS / 100
if i >= NUMBER_OF_BINS:
node.activation * node.conceptualDepth / 100.0 = NUMBER_OF_BINS
node.activation * node.conceptualDepth / 100.0 = i + 1
codelet = Codelet(codeletName, urgency, [node], self.codeletsRun)
logging.info('Post top down: %s, with urgency: %d', codelet.name, urgency)
self.codelets += [codelet]
if len(self.codelets) > 100:
oldCodelet = self.chooseOldCodelet()
self.removeCodelet(oldCodelet)
</DeepExtract>
|
copycat
|
positive
|
def close(self):
HTMLParser.HTMLParser.close(self)
try:
nochr = unicode('')
except NameError:
nochr = str('')
<DeepExtract>
if self.p_p == 0:
self.p_p = 1
</DeepExtract>
<DeepExtract>
if self.abbr_data is not None:
self.abbr_data += ''
if not self.quiet:
if self.google_doc:
lstripped_data = ''.lstrip()
if self.drop_white_space and (not (self.pre or self.code)):
'' = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if 0 and (not self.pre):
'' = re.sub('\\s+', ' ', '')
if '' and ''[0] == ' ':
self.space = 1
'' = ''[1:]
if not '' and (not 'end'):
return
if self.startpre:
if not ''.startswith('\n'):
'' = '\n' + ''
bq = '>' * self.blockquote
if not ('end' and '' and (''[0] == '>')) and self.blockquote:
bq += ' '
if self.pre:
if not self.list:
bq += ' '
for i in range(len(self.list)):
bq += ' '
'' = ''.replace('\n', '\n' + bq)
if self.startpre:
self.startpre = 0
if self.list:
'' = ''.lstrip('\n')
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if 'end' == 'end':
self.p_p = 0
self.out('\n')
self.space = 0
if self.p_p:
self.out((self.br_toggle + '\n' + bq) * self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL:
self.out(' ')
self.space = 0
if self.a and (self.p_p == 2 and self.links_each_paragraph or 'end' == 'end'):
if 'end' == 'end':
self.out('\n')
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(' [' + str(link['count']) + ']: ' + urlparse.urljoin(self.baseurl, link['href']))
if 'title' in link:
self.out(' (' + link['title'] + ')')
self.out('\n')
else:
newa.append(link)
if self.a != newa:
self.out('\n')
self.a = newa
if self.abbr_list and 'end' == 'end':
for (abbr, definition) in self.abbr_list.items():
self.out(' *[' + abbr + ']: ' + definition + '\n')
self.p_p = 0
self.out('')
self.outcount += 1
</DeepExtract>
outtext = nochr.join(self.outtextlist)
if self.unicode_snob:
try:
nbsp = unichr(name2cp('nbsp'))
except NameError:
nbsp = chr(name2cp('nbsp'))
else:
try:
nbsp = unichr(32)
except NameError:
nbsp = chr(32)
try:
outtext = outtext.replace(unicode(' _place_holder;'), nbsp)
except NameError:
outtext = outtext.replace(' _place_holder;', nbsp)
self.outtextlist = []
return outtext
|
def close(self):
HTMLParser.HTMLParser.close(self)
try:
nochr = unicode('')
except NameError:
nochr = str('')
if self.p_p == 0:
self.p_p = 1
if self.abbr_data is not None:
self.abbr_data += ''
if not self.quiet:
if self.google_doc:
lstripped_data = ''.lstrip()
if self.drop_white_space and (not (self.pre or self.code)):
'' = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if 0 and (not self.pre):
'' = re.sub('\\s+', ' ', '')
if '' and ''[0] == ' ':
self.space = 1
'' = ''[1:]
if not '' and (not 'end'):
return
if self.startpre:
if not ''.startswith('\n'):
'' = '\n' + ''
bq = '>' * self.blockquote
if not ('end' and '' and (''[0] == '>')) and self.blockquote:
bq += ' '
if self.pre:
if not self.list:
bq += ' '
for i in range(len(self.list)):
bq += ' '
'' = ''.replace('\n', '\n' + bq)
if self.startpre:
self.startpre = 0
if self.list:
'' = ''.lstrip('\n')
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if 'end' == 'end':
self.p_p = 0
self.out('\n')
self.space = 0
if self.p_p:
self.out((self.br_toggle + '\n' + bq) * self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL:
self.out(' ')
self.space = 0
if self.a and (self.p_p == 2 and self.links_each_paragraph or 'end' == 'end'):
if 'end' == 'end':
self.out('\n')
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(' [' + str(link['count']) + ']: ' + urlparse.urljoin(self.baseurl, link['href']))
if 'title' in link:
self.out(' (' + link['title'] + ')')
self.out('\n')
else:
newa.append(link)
if self.a != newa:
self.out('\n')
self.a = newa
if self.abbr_list and 'end' == 'end':
for (abbr, definition) in self.abbr_list.items():
self.out(' *[' + abbr + ']: ' + definition + '\n')
self.p_p = 0
self.out('')
self.outcount += 1
outtext = nochr.join(self.outtextlist)
if self.unicode_snob:
try:
nbsp = unichr(name2cp('nbsp'))
except NameError:
nbsp = chr(name2cp('nbsp'))
else:
try:
nbsp = unichr(32)
except NameError:
nbsp = chr(32)
try:
outtext = outtext.replace(unicode(' _place_holder;'), nbsp)
except NameError:
outtext = outtext.replace(' _place_holder;', nbsp)
self.outtextlist = []
return outtext
|
alfred_zotquery
|
positive
|
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
<DeepExtract>
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(32, out_planes, stride))
32 = out_planes
self.layers = nn.Sequential(*layers)
</DeepExtract>
self.linear = nn.Linear(1024, num_classes)
|
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(32, out_planes, stride))
32 = out_planes
self.layers = nn.Sequential(*layers)
self.linear = nn.Linear(1024, num_classes)
|
dnn-quant-ocs
|
positive
|
def hash_all_func(data):
if isinstance(data, str):
data = data.encode('ascii')
<DeepExtract>
length = lenpos = len(data)
a = b = c = 3735928559 + length + 0
c += 0
c &= 4294967295
p = 0
while lenpos > 12:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
a &= 4294967295
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
b &= 4294967295
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16) + (data[p + 11] << 24)
c &= 4294967295
(a, b, c) = mix(a, b, c)
p += 12
lenpos -= 12
if lenpos == 12:
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16) + (data[p + 11] << 24)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 11:
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 10:
c += data[p + 8] + (data[p + 9] << 8)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 9:
c += data[p + 8]
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 8:
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 7:
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 6:
b += (data[p + 5] << 8) + data[p + 4]
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 5:
b += data[p + 4]
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 4:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 3:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16)
if lenpos == 2:
a += data[p + 0] + (data[p + 1] << 8)
if lenpos == 1:
a += data[p + 0]
a &= 4294967295
b &= 4294967295
c &= 4294967295
if lenpos == 0:
(c, b) = (c, b)
(a, b, c) = final(a, b, c)
(c, b) = (c, b)
</DeepExtract>
v = mmh3.hash128(key=data, x64arch=True)
return (c, v >> 16 & 281474976710655, int(np.int64(np.uint64(v & 18446744073709551615))))
|
def hash_all_func(data):
if isinstance(data, str):
data = data.encode('ascii')
length = lenpos = len(data)
a = b = c = 3735928559 + length + 0
c += 0
c &= 4294967295
p = 0
while lenpos > 12:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
a &= 4294967295
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
b &= 4294967295
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16) + (data[p + 11] << 24)
c &= 4294967295
(a, b, c) = mix(a, b, c)
p += 12
lenpos -= 12
if lenpos == 12:
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16) + (data[p + 11] << 24)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 11:
c += data[p + 8] + (data[p + 9] << 8) + (data[p + 10] << 16)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 10:
c += data[p + 8] + (data[p + 9] << 8)
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 9:
c += data[p + 8]
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 8:
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16) + (data[p + 7] << 24)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 7:
b += data[p + 4] + (data[p + 5] << 8) + (data[p + 6] << 16)
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 6:
b += (data[p + 5] << 8) + data[p + 4]
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 5:
b += data[p + 4]
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 4:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16) + (data[p + 3] << 24)
if lenpos == 3:
a += data[p + 0] + (data[p + 1] << 8) + (data[p + 2] << 16)
if lenpos == 2:
a += data[p + 0] + (data[p + 1] << 8)
if lenpos == 1:
a += data[p + 0]
a &= 4294967295
b &= 4294967295
c &= 4294967295
if lenpos == 0:
(c, b) = (c, b)
(a, b, c) = final(a, b, c)
(c, b) = (c, b)
v = mmh3.hash128(key=data, x64arch=True)
return (c, v >> 16 & 281474976710655, int(np.int64(np.uint64(v & 18446744073709551615))))
|
deca
|
positive
|
def dot(self):
<DeepExtract>
result = self.next_state
self.next_state += 1
initial_state = result
</DeepExtract>
<DeepExtract>
result = self.next_state
self.next_state += 1
accepting_state = result
</DeepExtract>
transitions = {}
transitions[initial_state] = {accepting_state: DOT_TRANSITION}
return {'states': set([initial_state, accepting_state]), 'transitions': transitions, 'initial_state': initial_state, 'accepting_states': set([accepting_state])}
|
def dot(self):
result = self.next_state
self.next_state += 1
initial_state = result
result = self.next_state
self.next_state += 1
accepting_state = result
transitions = {}
transitions[initial_state] = {accepting_state: DOT_TRANSITION}
return {'states': set([initial_state, accepting_state]), 'transitions': transitions, 'initial_state': initial_state, 'accepting_states': set([accepting_state])}
|
acsploit
|
positive
|
def se_rescale(self, input, internal_neurons, specify_name=True):
if self.data_format == 'NHWC':
pooled_inputs = tf.reduce_mean(input, [1, 2], keep_dims=True)
num_channels = input.get_shape().as_list()[3]
else:
pooled_inputs = tf.reduce_mean(input, [2, 3], keep_dims=True)
num_channels = input.get_shape().as_list()[1]
if specify_name:
up_name = 'seup'
down_name = 'sedown'
up_name_postfix = None
down_name_postfix = None
count_convs = False
else:
up_name = None
down_name = None
up_name_postfix = '_seup'
down_name_postfix = '_sedown'
count_convs = True
<DeepExtract>
if pooled_inputs is None:
pooled_inputs = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if down_name is None:
down_name = 'conv' + str(self.counts['conv'])
if down_name_postfix is not None:
down_name += down_name_postfix
if count_convs:
self.counts['conv'] += 1
with tf.variable_scope(down_name):
strides = [1, 1, 1, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if 'VALID' != 'SAME_RESNET':
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif 1 == 1 and 1 == 1:
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = 1 + (1 - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = 1 + (1 - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
pooled_inputs = tf.pad(pooled_inputs, padding)
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer)
if False is None:
False = self.use_batch_norm
if not False:
if 0.0 is not None:
biases = self.get_variable('biases', [internal_neurons], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(0.0))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = internal_neurons
biased = self.batch_norm(**self.batch_norm_config)
if self.need_record_internal_outputs:
if len(self.internal_outputs_dict) == 0:
self.internal_outputs_dict['input'] = tf.identity(pooled_inputs)
self.internal_outputs_dict['{}${}'.format(self.num_internal_conv_outputs, down_name)] = conv
self.internal_outputs_dict['{}#{}'.format(self.num_internal_conv_outputs, down_name)] = biased
self.num_internal_conv_outputs += 1
if 'relu' == 'relu':
conv1 = tf.nn.relu(biased)
elif 'relu' == 'linear' or 'relu' is None:
conv1 = biased
elif 'relu' == 'tanh':
conv1 = tf.nn.tanh(biased)
elif 'relu' == 'sigmoid':
conv1 = tf.nn.sigmoid(biased)
else:
raise KeyError("Invalid activation type '%s'" % 'relu')
self.top_layer = conv1
self.top_size = internal_neurons
down_inputs = conv1
</DeepExtract>
<DeepExtract>
if down_inputs is None:
down_inputs = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if up_name is None:
up_name = 'conv' + str(self.counts['conv'])
if up_name_postfix is not None:
up_name += up_name_postfix
if count_convs:
self.counts['conv'] += 1
with tf.variable_scope(up_name):
strides = [1, 1, 1, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if 'VALID' != 'SAME_RESNET':
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif 1 == 1 and 1 == 1:
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = 1 + (1 - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = 1 + (1 - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
down_inputs = tf.pad(down_inputs, padding)
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer)
if False is None:
False = self.use_batch_norm
if not False:
if 0.0 is not None:
biases = self.get_variable('biases', [num_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(0.0))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_channels
biased = self.batch_norm(**self.batch_norm_config)
if self.need_record_internal_outputs:
if len(self.internal_outputs_dict) == 0:
self.internal_outputs_dict['input'] = tf.identity(down_inputs)
self.internal_outputs_dict['{}${}'.format(self.num_internal_conv_outputs, up_name)] = conv
self.internal_outputs_dict['{}#{}'.format(self.num_internal_conv_outputs, up_name)] = biased
self.num_internal_conv_outputs += 1
if None == 'relu':
conv1 = tf.nn.relu(biased)
elif None == 'linear' or None is None:
conv1 = biased
elif None == 'tanh':
conv1 = tf.nn.tanh(biased)
elif None == 'sigmoid':
conv1 = tf.nn.sigmoid(biased)
else:
raise KeyError("Invalid activation type '%s'" % None)
self.top_layer = conv1
self.top_size = num_channels
up_inputs = conv1
</DeepExtract>
prob_outputs = tf.nn.sigmoid(up_inputs)
rescaled = tf.multiply(prob_outputs, input)
return rescaled
|
def se_rescale(self, input, internal_neurons, specify_name=True):
if self.data_format == 'NHWC':
pooled_inputs = tf.reduce_mean(input, [1, 2], keep_dims=True)
num_channels = input.get_shape().as_list()[3]
else:
pooled_inputs = tf.reduce_mean(input, [2, 3], keep_dims=True)
num_channels = input.get_shape().as_list()[1]
if specify_name:
up_name = 'seup'
down_name = 'sedown'
up_name_postfix = None
down_name_postfix = None
count_convs = False
else:
up_name = None
down_name = None
up_name_postfix = '_seup'
down_name_postfix = '_sedown'
count_convs = True
if pooled_inputs is None:
pooled_inputs = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if down_name is None:
down_name = 'conv' + str(self.counts['conv'])
if down_name_postfix is not None:
down_name += down_name_postfix
if count_convs:
self.counts['conv'] += 1
with tf.variable_scope(down_name):
strides = [1, 1, 1, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if 'VALID' != 'SAME_RESNET':
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif 1 == 1 and 1 == 1:
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = 1 + (1 - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = 1 + (1 - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
pooled_inputs = tf.pad(pooled_inputs, padding)
conv = self._conv2d_impl(pooled_inputs, num_channels_in, internal_neurons, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer)
if False is None:
False = self.use_batch_norm
if not False:
if 0.0 is not None:
biases = self.get_variable('biases', [internal_neurons], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(0.0))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = internal_neurons
biased = self.batch_norm(**self.batch_norm_config)
if self.need_record_internal_outputs:
if len(self.internal_outputs_dict) == 0:
self.internal_outputs_dict['input'] = tf.identity(pooled_inputs)
self.internal_outputs_dict['{}${}'.format(self.num_internal_conv_outputs, down_name)] = conv
self.internal_outputs_dict['{}#{}'.format(self.num_internal_conv_outputs, down_name)] = biased
self.num_internal_conv_outputs += 1
if 'relu' == 'relu':
conv1 = tf.nn.relu(biased)
elif 'relu' == 'linear' or 'relu' is None:
conv1 = biased
elif 'relu' == 'tanh':
conv1 = tf.nn.tanh(biased)
elif 'relu' == 'sigmoid':
conv1 = tf.nn.sigmoid(biased)
else:
raise KeyError("Invalid activation type '%s'" % 'relu')
self.top_layer = conv1
self.top_size = internal_neurons
down_inputs = conv1
if down_inputs is None:
down_inputs = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if up_name is None:
up_name = 'conv' + str(self.counts['conv'])
if up_name_postfix is not None:
up_name += up_name_postfix
if count_convs:
self.counts['conv'] += 1
with tf.variable_scope(up_name):
strides = [1, 1, 1, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if 'VALID' != 'SAME_RESNET':
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif 1 == 1 and 1 == 1:
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = 1 + (1 - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = 1 + (1 - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
down_inputs = tf.pad(down_inputs, padding)
conv = self._conv2d_impl(down_inputs, num_channels_in, num_channels, kernel_size=[1, 1], strides=[1, 1], padding='VALID', kernel_initializer=kernel_initializer)
if False is None:
False = self.use_batch_norm
if not False:
if 0.0 is not None:
biases = self.get_variable('biases', [num_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(0.0))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_channels
biased = self.batch_norm(**self.batch_norm_config)
if self.need_record_internal_outputs:
if len(self.internal_outputs_dict) == 0:
self.internal_outputs_dict['input'] = tf.identity(down_inputs)
self.internal_outputs_dict['{}${}'.format(self.num_internal_conv_outputs, up_name)] = conv
self.internal_outputs_dict['{}#{}'.format(self.num_internal_conv_outputs, up_name)] = biased
self.num_internal_conv_outputs += 1
if None == 'relu':
conv1 = tf.nn.relu(biased)
elif None == 'linear' or None is None:
conv1 = biased
elif None == 'tanh':
conv1 = tf.nn.tanh(biased)
elif None == 'sigmoid':
conv1 = tf.nn.sigmoid(biased)
else:
raise KeyError("Invalid activation type '%s'" % None)
self.top_layer = conv1
self.top_size = num_channels
up_inputs = conv1
prob_outputs = tf.nn.sigmoid(up_inputs)
rescaled = tf.multiply(prob_outputs, input)
return rescaled
|
AOFP
|
positive
|
def _extract_argument2s(doc_parsed_result, doc_connectives, syntax_tree_cache=None):
<DeepExtract>
if syntax_tree_cache is None:
syntax_tree_cache = dict()
doc_arg2_feats = list()
for (conn_idx, connective) in enumerate(doc_connectives):
(sent_idx, conn_indices) = (connective['sent_idx'], connective['indices'])
sent_parsed_result = doc_parsed_result[sent_idx]
sent_len = len(sent_parsed_result['tokens'])
if sent_idx in syntax_tree_cache:
syntax_tree = syntax_tree_cache[sent_idx]
else:
syntax_tree = syntax_tree_cache[sent_idx] = SyntaxTree(sent_parsed_result['parse'])
arg2_clauses = self._get_argument2_clauses(sent_parsed_result, connective, syntax_tree)
if len(arg2_clauses) == 0:
continue
conn = ' '.join([sent_parsed_result['tokens'][idx] for idx in conn_indices])
conn_lower = conn.lower()
conn_category = self.conn_category_mapping[conn_lower]
cpos = '_'.join([sent_parsed_result['pos_tags'][idx] for idx in conn_indices])
try:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
except BaseException as e:
print(sent_parsed_result)
continue
parent_node = conn_node.up
if parent_node:
parent_category = parent_node.name
children = parent_node.get_children()
(left_node, right_node) = (None, None)
for (child_idx, child) in enumerate(children):
if conn_node == child:
if child_idx > 0:
left_node = children[child_idx - 1]
if child_idx < len(children) - 1:
right_node = children[child_idx + 1]
else:
left_node = None
right_node = None
conn_ctx = list()
conn_ctx.append(conn_node.name)
conn_ctx.append(parent_node.name if parent_node else 'NULL')
conn_ctx.append(left_node.name if left_node else 'NULL')
conn_ctx.append(right_node.name if right_node else 'NULL')
conn_ctx = '-'.join(conn_ctx)
for (clause_idx, clause) in enumerate(arg2_clauses):
clause_first = sent_parsed_result['tokens'][clause[0]]
clause_last = sent_parsed_result['tokens'][clause[-1]]
if clause[0] == 0:
prev = 'NONE'
else:
(prev_sent_idx, prev_idx) = get_prev_token_index(doc_parsed_result, sent_idx, clause[0], skip_tokens=CLAUSE_SEPARATOR_SET)
if prev_sent_idx == sent_idx:
if prev_idx + 1 == clause[0]:
prev = sent_parsed_result['tokens'][prev_idx]
else:
prev = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(prev_idx + 1, clause[0])])
elif clause[0] - 1 >= 0:
prev = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(0, clause[0])])
else:
prev = 'NONE'
if clause[-1] == len(sent_parsed_result['tokens']) - 1:
next = 'NONE'
else:
(next_sent_idx, next_idx) = get_next_token_index(doc_parsed_result, sent_idx, clause[-1], skip_tokens=CLAUSE_SEPARATOR_SET)
if next_sent_idx == sent_idx:
if next_idx - 1 == clause[-1]:
next = sent_parsed_result['tokens'][next_idx]
else:
next = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(clause[-1] + 1, next_idx)])
elif clause[-1] + 1 < len(sent_parsed_result['tokens']):
next = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(clause[-1] + 1, len(sent_parsed_result['tokens']))])
else:
next = 'NONE'
try:
clause_first_node = syntax_tree.get_leaf_node_by_token_index(clause[0]).up
if clause_idx > 0:
prev_last_node = syntax_tree.get_leaf_node_by_token_index(arg2_clauses[clause_idx - 1][-1]).up
else:
prev_last_node = None
except:
clause_first_node = None
prev_last_node = None
clause_production_rules = list()
if syntax_tree.tree:
for node in syntax_tree.get_subtree_by_token_indices(clause).tree.traverse():
if not node.is_leaf():
clause_production_rules.append(node.name + '-->' + ' '.join([child.name for child in node.get_children()]))
try:
conn_to_root_paths = list()
cparent_to_root_paths = list()
for idx in conn_indices:
node = syntax_tree.get_leaf_node_by_token_index(idx)
path = syntax_tree.get_node_path_to_root(node)
conn_to_root_paths.append(path)
parent_node = node.up
path = syntax_tree.get_node_path_to_root(parent_node)
cparent_to_root_paths.append(path)
cparent_to_root_path_node_names = chain.from_iterable([path.split('-->') for path in cparent_to_root_paths])
conn_to_root_path = '&'.join(conn_to_root_paths)
compressed_cparent_to_root_path = '&'.join([get_compressed_path(path) for path in cparent_to_root_paths])
except:
cparent_to_root_path_node_names = ['NONE_TREE']
conn_to_root_path = 'NONE_TREE'
compressed_cparent_to_root_path = 'NONE_TREE'
try:
if prev_last_node:
clause_first_prev_last_parse_path = syntax_tree.get_node_to_node_path(clause_first_node, prev_last_node)
else:
clause_first_prev_last_parse_path = 'NONE'
except:
clause_first_prev_last_parse_path = 'NONE_TREE'
arg2_feats = list()
arg2_feats.append(Feature.get_feature_by_feat_list(self.clause_production_rule_dict2, clause_production_rules))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_first_dict2, clause_first))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_first_prev_last_parse_path_dict2, clause_first_prev_last_parse_path))
arg2_feats.append(Feature.get_feature_by_feat(self.next_dict2, next))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_to_root_path_dict2, conn_to_root_path))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_dict2, conn))
arg2_feats.append(Feature.get_feature_by_feat(self.prev_dict2, prev))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_last_next_dict2, clause_last + '|' + next))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_lower_dict2, conn_lower))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_conn_ctx_dict2, conn + '|' + conn_ctx))
arg2_feats.append(Feature.get_feature_by_feat(self.compressed_cparent_to_root_path_dict2, compressed_cparent_to_root_path))
arg2_feats.append(Feature.get_feature_by_feat(self.cpos_dict2, cpos))
arg2_feats.append(Feature.get_feature_by_feat_list(self.cparent_to_root_path_node_name_dict2, cparent_to_root_path_node_names))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_category_dict, conn_category))
arg2_feats = Feature.merge_features(arg2_feats, '%d|%d|%s' % (sent_idx, conn_idx, ','.join([str(idx) for idx in clause])))
doc_arg2_feats.append(arg2_feats)
doc_arg2_feats = doc_arg2_feats
</DeepExtract>
<DeepExtract>
if len(doc_arg2_feats) == 0:
doc_arg2_labels = list()
names = [x.name for x in doc_arg2_feats]
feats = sparse.vstack(list(map(lambda x: x.to_csr(), doc_arg2_feats)))
pred = self.ps_arg2_model.predict(feats)
doc_arg2_labels = list(zip(names, pred))
</DeepExtract>
doc_conn_arg2s = [list() for _ in range(len(doc_connectives))]
for (feats_name, label) in doc_arg2_labels:
(sent_idx, conn_idx, arg2_indices) = feats_name.split('|')
conn_idx = int(conn_idx)
arg2_indices = [int(idx) for idx in arg2_indices.split(',')]
doc_conn_arg2s[conn_idx].append((arg2_indices, label))
for (connective, conn_arg2s) in zip(doc_connectives, doc_conn_arg2s):
if len(conn_arg2s) == 0:
continue
(sent_idx, conn_indices) = (connective['sent_idx'], connective['indices'])
sent_parsed_result = doc_parsed_result[sent_idx]
sent_len = len(sent_parsed_result['tokens'])
implicit_arg2 = strip_punctuations(sent_parsed_result, list(range(0, conn_indices[0]))) + strip_punctuations(sent_parsed_result, list(range(conn_indices[-1] + 1, sent_len)))
for (arg2_indices, label) in conn_arg2s:
if label == 0:
parts = [list(), list()]
p_idx = 0
implicit_arg2_len = len(implicit_arg2)
arg2_len = len(arg2_indices)
for t_idx in implicit_arg2:
a_idx = bisect.bisect_left(arg2_indices, t_idx)
if a_idx < arg2_len and arg2_indices[a_idx] == t_idx:
p_idx = 1
else:
parts[p_idx].append(t_idx)
implicit_arg2 = strip_punctuations(sent_parsed_result, parts[0]) + strip_punctuations(sent_parsed_result, parts[1])
if len(implicit_arg2) > 0:
connective['arg2'] = {'sent_idx': sent_idx, 'indices': implicit_arg2}
else:
connective['arg2'] = {'sent_idx': sent_idx, 'indices': conn_arg2s[0][0]}
return doc_connectives
|
def _extract_argument2s(doc_parsed_result, doc_connectives, syntax_tree_cache=None):
if syntax_tree_cache is None:
syntax_tree_cache = dict()
doc_arg2_feats = list()
for (conn_idx, connective) in enumerate(doc_connectives):
(sent_idx, conn_indices) = (connective['sent_idx'], connective['indices'])
sent_parsed_result = doc_parsed_result[sent_idx]
sent_len = len(sent_parsed_result['tokens'])
if sent_idx in syntax_tree_cache:
syntax_tree = syntax_tree_cache[sent_idx]
else:
syntax_tree = syntax_tree_cache[sent_idx] = SyntaxTree(sent_parsed_result['parse'])
arg2_clauses = self._get_argument2_clauses(sent_parsed_result, connective, syntax_tree)
if len(arg2_clauses) == 0:
continue
conn = ' '.join([sent_parsed_result['tokens'][idx] for idx in conn_indices])
conn_lower = conn.lower()
conn_category = self.conn_category_mapping[conn_lower]
cpos = '_'.join([sent_parsed_result['pos_tags'][idx] for idx in conn_indices])
try:
conn_node = syntax_tree.get_self_category_node_by_token_indices(conn_indices)
except BaseException as e:
print(sent_parsed_result)
continue
parent_node = conn_node.up
if parent_node:
parent_category = parent_node.name
children = parent_node.get_children()
(left_node, right_node) = (None, None)
for (child_idx, child) in enumerate(children):
if conn_node == child:
if child_idx > 0:
left_node = children[child_idx - 1]
if child_idx < len(children) - 1:
right_node = children[child_idx + 1]
else:
left_node = None
right_node = None
conn_ctx = list()
conn_ctx.append(conn_node.name)
conn_ctx.append(parent_node.name if parent_node else 'NULL')
conn_ctx.append(left_node.name if left_node else 'NULL')
conn_ctx.append(right_node.name if right_node else 'NULL')
conn_ctx = '-'.join(conn_ctx)
for (clause_idx, clause) in enumerate(arg2_clauses):
clause_first = sent_parsed_result['tokens'][clause[0]]
clause_last = sent_parsed_result['tokens'][clause[-1]]
if clause[0] == 0:
prev = 'NONE'
else:
(prev_sent_idx, prev_idx) = get_prev_token_index(doc_parsed_result, sent_idx, clause[0], skip_tokens=CLAUSE_SEPARATOR_SET)
if prev_sent_idx == sent_idx:
if prev_idx + 1 == clause[0]:
prev = sent_parsed_result['tokens'][prev_idx]
else:
prev = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(prev_idx + 1, clause[0])])
elif clause[0] - 1 >= 0:
prev = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(0, clause[0])])
else:
prev = 'NONE'
if clause[-1] == len(sent_parsed_result['tokens']) - 1:
next = 'NONE'
else:
(next_sent_idx, next_idx) = get_next_token_index(doc_parsed_result, sent_idx, clause[-1], skip_tokens=CLAUSE_SEPARATOR_SET)
if next_sent_idx == sent_idx:
if next_idx - 1 == clause[-1]:
next = sent_parsed_result['tokens'][next_idx]
else:
next = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(clause[-1] + 1, next_idx)])
elif clause[-1] + 1 < len(sent_parsed_result['tokens']):
next = ' '.join([sent_parsed_result['tokens'][idx] for idx in range(clause[-1] + 1, len(sent_parsed_result['tokens']))])
else:
next = 'NONE'
try:
clause_first_node = syntax_tree.get_leaf_node_by_token_index(clause[0]).up
if clause_idx > 0:
prev_last_node = syntax_tree.get_leaf_node_by_token_index(arg2_clauses[clause_idx - 1][-1]).up
else:
prev_last_node = None
except:
clause_first_node = None
prev_last_node = None
clause_production_rules = list()
if syntax_tree.tree:
for node in syntax_tree.get_subtree_by_token_indices(clause).tree.traverse():
if not node.is_leaf():
clause_production_rules.append(node.name + '-->' + ' '.join([child.name for child in node.get_children()]))
try:
conn_to_root_paths = list()
cparent_to_root_paths = list()
for idx in conn_indices:
node = syntax_tree.get_leaf_node_by_token_index(idx)
path = syntax_tree.get_node_path_to_root(node)
conn_to_root_paths.append(path)
parent_node = node.up
path = syntax_tree.get_node_path_to_root(parent_node)
cparent_to_root_paths.append(path)
cparent_to_root_path_node_names = chain.from_iterable([path.split('-->') for path in cparent_to_root_paths])
conn_to_root_path = '&'.join(conn_to_root_paths)
compressed_cparent_to_root_path = '&'.join([get_compressed_path(path) for path in cparent_to_root_paths])
except:
cparent_to_root_path_node_names = ['NONE_TREE']
conn_to_root_path = 'NONE_TREE'
compressed_cparent_to_root_path = 'NONE_TREE'
try:
if prev_last_node:
clause_first_prev_last_parse_path = syntax_tree.get_node_to_node_path(clause_first_node, prev_last_node)
else:
clause_first_prev_last_parse_path = 'NONE'
except:
clause_first_prev_last_parse_path = 'NONE_TREE'
arg2_feats = list()
arg2_feats.append(Feature.get_feature_by_feat_list(self.clause_production_rule_dict2, clause_production_rules))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_first_dict2, clause_first))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_first_prev_last_parse_path_dict2, clause_first_prev_last_parse_path))
arg2_feats.append(Feature.get_feature_by_feat(self.next_dict2, next))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_to_root_path_dict2, conn_to_root_path))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_dict2, conn))
arg2_feats.append(Feature.get_feature_by_feat(self.prev_dict2, prev))
arg2_feats.append(Feature.get_feature_by_feat(self.clause_last_next_dict2, clause_last + '|' + next))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_lower_dict2, conn_lower))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_conn_ctx_dict2, conn + '|' + conn_ctx))
arg2_feats.append(Feature.get_feature_by_feat(self.compressed_cparent_to_root_path_dict2, compressed_cparent_to_root_path))
arg2_feats.append(Feature.get_feature_by_feat(self.cpos_dict2, cpos))
arg2_feats.append(Feature.get_feature_by_feat_list(self.cparent_to_root_path_node_name_dict2, cparent_to_root_path_node_names))
arg2_feats.append(Feature.get_feature_by_feat(self.conn_category_dict, conn_category))
arg2_feats = Feature.merge_features(arg2_feats, '%d|%d|%s' % (sent_idx, conn_idx, ','.join([str(idx) for idx in clause])))
doc_arg2_feats.append(arg2_feats)
doc_arg2_feats = doc_arg2_feats
if len(doc_arg2_feats) == 0:
doc_arg2_labels = list()
names = [x.name for x in doc_arg2_feats]
feats = sparse.vstack(list(map(lambda x: x.to_csr(), doc_arg2_feats)))
pred = self.ps_arg2_model.predict(feats)
doc_arg2_labels = list(zip(names, pred))
doc_conn_arg2s = [list() for _ in range(len(doc_connectives))]
for (feats_name, label) in doc_arg2_labels:
(sent_idx, conn_idx, arg2_indices) = feats_name.split('|')
conn_idx = int(conn_idx)
arg2_indices = [int(idx) for idx in arg2_indices.split(',')]
doc_conn_arg2s[conn_idx].append((arg2_indices, label))
for (connective, conn_arg2s) in zip(doc_connectives, doc_conn_arg2s):
if len(conn_arg2s) == 0:
continue
(sent_idx, conn_indices) = (connective['sent_idx'], connective['indices'])
sent_parsed_result = doc_parsed_result[sent_idx]
sent_len = len(sent_parsed_result['tokens'])
implicit_arg2 = strip_punctuations(sent_parsed_result, list(range(0, conn_indices[0]))) + strip_punctuations(sent_parsed_result, list(range(conn_indices[-1] + 1, sent_len)))
for (arg2_indices, label) in conn_arg2s:
if label == 0:
parts = [list(), list()]
p_idx = 0
implicit_arg2_len = len(implicit_arg2)
arg2_len = len(arg2_indices)
for t_idx in implicit_arg2:
a_idx = bisect.bisect_left(arg2_indices, t_idx)
if a_idx < arg2_len and arg2_indices[a_idx] == t_idx:
p_idx = 1
else:
parts[p_idx].append(t_idx)
implicit_arg2 = strip_punctuations(sent_parsed_result, parts[0]) + strip_punctuations(sent_parsed_result, parts[1])
if len(implicit_arg2) > 0:
connective['arg2'] = {'sent_idx': sent_idx, 'indices': implicit_arg2}
else:
connective['arg2'] = {'sent_idx': sent_idx, 'indices': conn_arg2s[0][0]}
return doc_connectives
|
ASER
|
positive
|
def register_noncomposite_task_or_subtask(task_name: str, shots: int, vocab: SeqIOVocabulary, max_examples: Optional[int], subtask_name: Optional[str]=None, strip_inputs: bool=True, strip_targets: bool=True, add_inputs_eos: bool=False, add_targets_eos: bool=False, min_validation_examples: int=_GLOBAL_MIN_VALIDATION_EXAMPLES, format_fn: json_task.Formatter=json_task.default_format_fn) -> Tuple[List[str], Dict[str, int]]:
"""Register subtask, or task with no subtasks.
Args:
task_name: bigbench task to add
shots: number of shots
vocab: model vocabulary
max_examples: max number of examples per subtasks or per task if the
task has no subtasks
subtask_name: if a subtask, subtask name otherwise None
strip_inputs: strip input when preprocessing
strip_targets: strip targets when preprocessing
add_inputs_eos: add eos to input when preprocessing
add_targets_eos: add eos to targets when preprocessing
min_validation_examples: min examples to put into validation split
format_fn: a callable that formats each example.
Returns:
seqio_task_names: names of each underlying seqio_task registered
rates: dictionary encoding rate of each seqio_task
"""
formatted_task_name = utils.format_name_for_seqio(task_name)
(task_path, json_util) = bb_json_paths.get_task_path(task_name)
if subtask_name is None:
formatted_subtask_name = None
examples = bb_json_paths.get_num_examples(task_name)
task_types = bb_json_paths.get_task_types(task_name)
else:
formatted_subtask_name = utils.format_name_for_seqio(subtask_name.split(':')[-1])
examples = bb_json_paths.get_num_examples(subtask_name)
task_types = bb_json_paths.get_task_types(subtask_name)
<DeepExtract>
max_examples = min(max_examples, examples) if max_examples else max_examples
</DeepExtract>
rate = max_examples or examples
seqio_task_names = []
rates = {}
for task_type in task_types:
<DeepExtract>
seqio_task_name = get_seqio_name(formatted_task_name, task_type, vocab, shots, formatted_subtask_name, max_examples)
if seqio_task_name in seqio.TaskRegistry.names():
seqio.TaskRegistry.remove(seqio_task_name)
additional_metrics = additional_metrics or []
seqio.TaskRegistry.add(seqio_task_name, source=seqio.FunctionDataSource(bb.get_dataset_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, num_shots=shots, bigbench_task_type=task_type, max_examples=max_examples, json_util=json_util, min_validation_examples=min_validation_examples, format_fn=format_fn), splits=['all', 'train', 'validation']), preprocessors=bb.get_preprocessors(strip_inputs=strip_inputs, strip_targets=strip_targets), output_features=bb.get_output_features(vocab=vocab.vocabulary, add_inputs_eos=add_inputs_eos, add_targets_eos=add_targets_eos), postprocess_fn=bb.get_postprocess_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, bigbench_task_type=task_type, json_util=json_util, format_fn=format_fn), metric_fns=[bb.get_metric_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, bigbench_task_type=task_type, json_util=json_util, format_fn=format_fn)] + additional_metrics)
seqio_task_name = seqio_task_name
</DeepExtract>
seqio_task_names.append(seqio_task_name)
rates[seqio_task_name] = rate
if len(task_types) > 1:
<DeepExtract>
task_type_prefix = bb.BigBenchTaskType.to_str(bb.BigBenchTaskType.MIX)
name = f'bigbench:{formatted_task_name}.{task_type_prefix}.{vocab.name}_vocab.{shots}_shot'
if max_examples:
name = f'{name}.{max_examples}_examples'
else:
name = f'{name}.all_examples'
if formatted_subtask_name:
name = f'{name}.{formatted_subtask_name}'
seqio_mixture_name = utils.format_name_for_seqio(name)
</DeepExtract>
if seqio_mixture_name in seqio.MixtureRegistry.names():
seqio.MixtureRegistry.remove(seqio_mixture_name)
seqio.MixtureRegistry.add(name=seqio_mixture_name, tasks=seqio_task_names, default_rate=1.0)
return (seqio_task_names, rates)
|
def register_noncomposite_task_or_subtask(task_name: str, shots: int, vocab: SeqIOVocabulary, max_examples: Optional[int], subtask_name: Optional[str]=None, strip_inputs: bool=True, strip_targets: bool=True, add_inputs_eos: bool=False, add_targets_eos: bool=False, min_validation_examples: int=_GLOBAL_MIN_VALIDATION_EXAMPLES, format_fn: json_task.Formatter=json_task.default_format_fn) -> Tuple[List[str], Dict[str, int]]:
"""Register subtask, or task with no subtasks.
Args:
task_name: bigbench task to add
shots: number of shots
vocab: model vocabulary
max_examples: max number of examples per subtasks or per task if the
task has no subtasks
subtask_name: if a subtask, subtask name otherwise None
strip_inputs: strip input when preprocessing
strip_targets: strip targets when preprocessing
add_inputs_eos: add eos to input when preprocessing
add_targets_eos: add eos to targets when preprocessing
min_validation_examples: min examples to put into validation split
format_fn: a callable that formats each example.
Returns:
seqio_task_names: names of each underlying seqio_task registered
rates: dictionary encoding rate of each seqio_task
"""
formatted_task_name = utils.format_name_for_seqio(task_name)
(task_path, json_util) = bb_json_paths.get_task_path(task_name)
if subtask_name is None:
formatted_subtask_name = None
examples = bb_json_paths.get_num_examples(task_name)
task_types = bb_json_paths.get_task_types(task_name)
else:
formatted_subtask_name = utils.format_name_for_seqio(subtask_name.split(':')[-1])
examples = bb_json_paths.get_num_examples(subtask_name)
task_types = bb_json_paths.get_task_types(subtask_name)
max_examples = min(max_examples, examples) if max_examples else max_examples
rate = max_examples or examples
seqio_task_names = []
rates = {}
for task_type in task_types:
seqio_task_name = get_seqio_name(formatted_task_name, task_type, vocab, shots, formatted_subtask_name, max_examples)
if seqio_task_name in seqio.TaskRegistry.names():
seqio.TaskRegistry.remove(seqio_task_name)
additional_metrics = additional_metrics or []
seqio.TaskRegistry.add(seqio_task_name, source=seqio.FunctionDataSource(bb.get_dataset_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, num_shots=shots, bigbench_task_type=task_type, max_examples=max_examples, json_util=json_util, min_validation_examples=min_validation_examples, format_fn=format_fn), splits=['all', 'train', 'validation']), preprocessors=bb.get_preprocessors(strip_inputs=strip_inputs, strip_targets=strip_targets), output_features=bb.get_output_features(vocab=vocab.vocabulary, add_inputs_eos=add_inputs_eos, add_targets_eos=add_targets_eos), postprocess_fn=bb.get_postprocess_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, bigbench_task_type=task_type, json_util=json_util, format_fn=format_fn), metric_fns=[bb.get_metric_fn(task_name=formatted_task_name, task_path=task_path, subtask_name=formatted_subtask_name, bigbench_task_type=task_type, json_util=json_util, format_fn=format_fn)] + additional_metrics)
seqio_task_name = seqio_task_name
seqio_task_names.append(seqio_task_name)
rates[seqio_task_name] = rate
if len(task_types) > 1:
task_type_prefix = bb.BigBenchTaskType.to_str(bb.BigBenchTaskType.MIX)
name = f'bigbench:{formatted_task_name}.{task_type_prefix}.{vocab.name}_vocab.{shots}_shot'
if max_examples:
name = f'{name}.{max_examples}_examples'
else:
name = f'{name}.all_examples'
if formatted_subtask_name:
name = f'{name}.{formatted_subtask_name}'
seqio_mixture_name = utils.format_name_for_seqio(name)
if seqio_mixture_name in seqio.MixtureRegistry.names():
seqio.MixtureRegistry.remove(seqio_mixture_name)
seqio.MixtureRegistry.add(name=seqio_mixture_name, tasks=seqio_task_names, default_rate=1.0)
return (seqio_task_names, rates)
|
BIG-bench
|
positive
|
@contextmanager
def allure_plugin_context():
"""Separates an allure integration under test from currently active
allure integration (if eny).
"""
<DeepExtract>
name_plugin_tuples = allure_commons.plugin_manager.list_name_plugin()
for (name, plugin) in name_plugin_tuples:
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
outer_context_plugins = name_plugin_tuples
</DeepExtract>
yield
<DeepExtract>
name_plugin_tuples = allure_commons.plugin_manager.list_name_plugin()
for (name, plugin) in name_plugin_tuples:
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
return name_plugin_tuples
</DeepExtract>
<DeepExtract>
for (name, plugin) in outer_context_plugins:
allure_commons.plugin_manager.register(plugin, name)
</DeepExtract>
|
@contextmanager
def allure_plugin_context():
"""Separates an allure integration under test from currently active
allure integration (if eny).
"""
name_plugin_tuples = allure_commons.plugin_manager.list_name_plugin()
for (name, plugin) in name_plugin_tuples:
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
outer_context_plugins = name_plugin_tuples
yield
name_plugin_tuples = allure_commons.plugin_manager.list_name_plugin()
for (name, plugin) in name_plugin_tuples:
allure_commons.plugin_manager.unregister(plugin=plugin, name=name)
return name_plugin_tuples
for (name, plugin) in outer_context_plugins:
allure_commons.plugin_manager.register(plugin, name)
</DeepExtract>
|
allure-python
|
positive
|
def create_app(environment='dev'):
context = importlib.import_module('depc.context')
conf_cls = getattr(context, '{}Config'.format(environment.capitalize()))
conf_file = str(Path(os.getenv('DEPC_HOME', str(Path(__file__).resolve().parents[1]))) / 'depc.{}.yml'.format(environment))
<DeepExtract>
if isinstance(__name__, str):
__name__ = importlib.import_module(__name__)
results = {}
for (loader, name, is_pkg) in pkgutil.walk_packages(__name__.__path__):
if not name.startswith('_') and (not name.startswith('tests')):
full_name = __name__.__name__ + '.' + name
if any((x in __name__.__name__ for x in ('models', 'sources', 'tasks', 'apiv1'))) or any((x in name for x in ('models', 'sources', 'tasks', 'apiv1'))):
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_submodules(full_name, ('models', 'sources', 'tasks', 'apiv1')))
return results
</DeepExtract>
from depc import admin
app = ExtendedFlask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.config.from_object(conf_cls)
<DeepExtract>
if not os.path.isfile(conf_file):
print('Missing configuration file')
data_file = {}
if verbose:
print('Using configuration file: %s' % conf_file)
with codecs.open(conf_file, 'r', 'utf8') as file_handler:
conf = yaml.load(file_handler)
if conf is None:
conf = {}
if verbose:
print(json.dumps(conf, sort_keys=True, indent=4, separators=(',', ': ')))
data_file = conf
</DeepExtract>
app.config.update(data_file)
conf_cls.init_app(app)
from depc.apiv1 import api as apiv1_blueprint
app.register_blueprint(apiv1_blueprint, url_prefix='/v1')
return app
|
def create_app(environment='dev'):
context = importlib.import_module('depc.context')
conf_cls = getattr(context, '{}Config'.format(environment.capitalize()))
conf_file = str(Path(os.getenv('DEPC_HOME', str(Path(__file__).resolve().parents[1]))) / 'depc.{}.yml'.format(environment))
if isinstance(__name__, str):
__name__ = importlib.import_module(__name__)
results = {}
for (loader, name, is_pkg) in pkgutil.walk_packages(__name__.__path__):
if not name.startswith('_') and (not name.startswith('tests')):
full_name = __name__.__name__ + '.' + name
if any((x in __name__.__name__ for x in ('models', 'sources', 'tasks', 'apiv1'))) or any((x in name for x in ('models', 'sources', 'tasks', 'apiv1'))):
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_submodules(full_name, ('models', 'sources', 'tasks', 'apiv1')))
return results
from depc import admin
app = ExtendedFlask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.config.from_object(conf_cls)
if not os.path.isfile(conf_file):
print('Missing configuration file')
data_file = {}
if verbose:
print('Using configuration file: %s' % conf_file)
with codecs.open(conf_file, 'r', 'utf8') as file_handler:
conf = yaml.load(file_handler)
if conf is None:
conf = {}
if verbose:
print(json.dumps(conf, sort_keys=True, indent=4, separators=(',', ': ')))
data_file = conf
app.config.update(data_file)
conf_cls.init_app(app)
from depc.apiv1 import api as apiv1_blueprint
app.register_blueprint(apiv1_blueprint, url_prefix='/v1')
return app
|
depc
|
positive
|
def test_bad_encoded_param(self, client):
<DeepExtract>
(code, mesg) = client.ehlo(domain)
assert code == 250
return mesg
</DeepExtract>
client.send(b'MAIL FROM: <anne\xff@example.com>\r\n')
assert client.getreply() == S.S500_STRICT_ASCII
|
def test_bad_encoded_param(self, client):
(code, mesg) = client.ehlo(domain)
assert code == 250
return mesg
client.send(b'MAIL FROM: <anne\xff@example.com>\r\n')
assert client.getreply() == S.S500_STRICT_ASCII
|
aiosmtpd
|
positive
|
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
<DeepExtract>
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
xy = np.dot(uv, Tinv)
xy = xy[:, 0:-1]
xy = xy
</DeepExtract>
return xy
|
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
xy = np.dot(uv, Tinv)
xy = xy[:, 0:-1]
xy = xy
return xy
|
Cross-Resolution-Face-Recognition
|
positive
|
def progbar(iterable, desc=None, leave=1, **kwargs):
if not disable_user_interaction:
<DeepExtract>
TS_old = termios.tcgetattr(sys.stdin)
TS_new = termios.tcgetattr(sys.stdin)
TS_new[3] = TS_new[3] & ~(termios.ECHO | termios.ICANON)
set_term_settings(TS_new)
TS_old = TS_old
</DeepExtract>
try:
for i in orig_progbar(iterable, pdesc(desc), leave, **kwargs):
yield i
except GeneratorExit:
pass
if not disable_user_interaction:
<DeepExtract>
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, TS_old)
</DeepExtract>
|
def progbar(iterable, desc=None, leave=1, **kwargs):
if not disable_user_interaction:
TS_old = termios.tcgetattr(sys.stdin)
TS_new = termios.tcgetattr(sys.stdin)
TS_new[3] = TS_new[3] & ~(termios.ECHO | termios.ICANON)
set_term_settings(TS_new)
TS_old = TS_old
try:
for i in orig_progbar(iterable, pdesc(desc), leave, **kwargs):
yield i
except GeneratorExit:
pass
if not disable_user_interaction:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, TS_old)
</DeepExtract>
|
DAPPER
|
positive
|
def get_child_watcher(self):
"""Get the child watcher.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
<DeepExtract>
with asyncio.events._lock:
if self._watcher is None:
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(self._local._loop)
</DeepExtract>
return self._watcher
|
def get_child_watcher(self):
"""Get the child watcher.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
with asyncio.events._lock:
if self._watcher is None:
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(self._local._loop)
return self._watcher
|
aiozmq
|
positive
|
def main(argv=()):
"""
The Aegean source finding program.
"""
parser = argparse.ArgumentParser(prog='aegean', prefix_chars='-')
parser.add_argument('image', nargs='?', default=None)
group1 = parser.add_argument_group('Configuration Options')
group1.add_argument('--find', dest='find', action='store_true', default=False, help='Source finding mode. [default: true, unless --save or --measure are selected]')
group1.add_argument('--hdu', dest='hdu_index', type=int, default=0, help='HDU index (0-based) for cubes with multiple images in extensions. [default: 0]')
group1.add_argument('--beam', dest='beam', type=float, nargs=3, default=None, help='The beam parameters to be used is "--beam major minor pa" all in degrees. [default: read from fits header].')
group1.add_argument('--slice', dest='slice', type=int, default=0, help='If the input data is a cube, then this slice will determine the array index of the image which will be processed by aegean')
group1.add_argument('--progress', default=False, action='store_true', help='Provide a progress bar as islands are being fit. [default: False]')
group2 = parser.add_argument_group('Input Options')
group2.add_argument('--forcerms', dest='rms', type=float, default=None, help='Assume a single image noise of rms. [default: None]')
group2.add_argument('--forcebkg', dest='bkg', type=float, default=None, help='Assume a single image background of bkg. [default: None]')
group1.add_argument('--cores', dest='cores', type=int, default=None, help='Number of CPU cores to use when calculating background and rms images [default: all cores]')
group2.add_argument('--noise', dest='noiseimg', default=None, type=str, help='A .fits file that represents the image noise (rms), created from Aegean with --save or BANE. [default: none]')
group2.add_argument('--background', dest='backgroundimg', default=None, type=str, help='A .fits file that represents the background level, created from Aegean with --save or BANE. [default: none]')
group2.add_argument('--psf', dest='imgpsf', default=None, type=str, help='A .fits file that represents the local PSF. ')
group2.add_argument('--autoload', dest='autoload', action='store_true', default=False, help="Automatically look for background, noise, region, and psf files using the input filename as a hint. [default: don't do this]")
group3 = parser.add_argument_group('Output Options')
group3.add_argument('--out', dest='outfile', default=None, type=str, help='Destination of Aegean catalog output. [default: No output]')
group3.add_argument('--table', dest='tables', default=None, type=str, help='Additional table outputs, format inferred from extension. [default: none]')
group3.add_argument('--tformats', dest='table_formats', action='store_true', default=False, help='Show a list of table formats supported in this install, and their extensions')
group3.add_argument('--blankout', dest='blank', action='store_true', default=False, help='Create a blanked output image. [Only works if cores=1].')
group3.add_argument('--colprefix', dest='column_prefix', default=None, type=str, help='Prepend each column name with "prefix_". [Default = prepend nothing]')
group4 = parser.add_argument_group('Source finding/fitting configuration options')
group4.add_argument('--maxsummits', dest='max_summits', type=float, default=None, help='If more than *maxsummits* summits are detected in an island, no fitting is done, only estimation. [default: no limit]')
group4.add_argument('--seedclip', dest='innerclip', type=float, default=5, help='The clipping value (in sigmas) for seeding islands. [default: 5]')
group4.add_argument('--floodclip', dest='outerclip', type=float, default=4, help='The clipping value (in sigmas) for growing islands. [default: 4]')
group4.add_argument('--island', dest='doislandflux', action='store_true', default=False, help='Also calculate the island flux in addition to the individual components. [default: false]')
group4.add_argument('--nopositive', dest='nopositive', action='store_true', default=False, help="Don't report sources with positive fluxes. [default: false]")
group4.add_argument('--negative', dest='negative', action='store_true', default=False, help='Report sources with negative fluxes. [default: false]')
group4.add_argument('--region', dest='region', type=str, default=None, help='Use this regions file to restrict source finding in this image.\nUse MIMAS region (.mim) files.')
group4.add_argument('--nocov', dest='docov', action='store_false', default=True, help="Don't use the covariance of the data in the fitting proccess. [Default = False]")
group5 = parser.add_argument_group('Priorized Fitting config options', 'in addition to the above source fitting options')
group5.add_argument('--priorized', dest='priorized', default=0, type=int, help='Enable priorized fitting level n=[1,2,3]. 1=fit flux, 2=fit flux/position, 3=fit flux/position/shape. See the GitHub wiki for more details.')
group5.add_argument('--ratio', dest='ratio', default=None, type=float, help='The ratio of synthesized beam sizes (image psf / input catalog psf). For use with priorized.')
group5.add_argument('--noregroup', dest='regroup', default=True, action='store_false', help='Do not regroup islands before priorized fitting')
group5.add_argument('--input', dest='input', type=str, default=None, help='If --priorized is used, this gives the filename for a catalog of locations at which fluxes will be measured.')
group5.add_argument('--catpsf', dest='catpsf', type=str, default=None, help='A psf map corresponding to the input catalog. This will allow for the correct resizing of sources when the catalog and image psfs differ')
group5.add_argument('--regroup-eps', dest='regroup_eps', default=None, type=float, help='The size in arcminutes that is used to regroup nearby components into a single set of components that will be solved for simultaneously')
group6 = parser.add_argument_group('Extra options')
group6.add_argument('--save', dest='save', action='store_true', default=False, help='Enable the saving of the background and noise images. Sets --find to false. [default: false]')
group6.add_argument('--outbase', dest='outbase', type=str, default=None, help='If --save is True, then this specifies the base name of the background and noise images. [default: inferred from input image]')
group6.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug mode. [default: false]')
group6.add_argument('--versions', dest='file_versions', action='store_true', default=False, help='Show the file versions of relevant modules. [default: false]')
group6.add_argument('--cite', dest='cite', action='store_true', default=False, help='Show citation information.')
options = parser.parse_args(args=argv)
invocation_string = ' '.join(argv)
logging.basicConfig(format='%(module)s:%(levelname)s %(message)s')
log = logging.getLogger('Aegean')
logging_level = logging.DEBUG if options.debug else logging.INFO
log.setLevel(logging_level)
log.info('This is Aegean {0}-({1})'.format(__version__, __date__))
log.debug('Run as:\n{0}'.format(invocation_string))
if options.cite:
print(__citation__)
return 0
import AegeanTools
from AegeanTools.source_finder import SourceFinder
sf = SourceFinder(log=log)
if options.table_formats:
show_formats()
return 0
if options.file_versions:
log.info('AegeanTools {0} from {1}'.format(AegeanTools.__version__, AegeanTools.__file__))
log.info('Numpy {0} from {1} '.format(np.__version__, np.__file__))
log.info('Scipy {0} from {1}'.format(scipy.__version__, scipy.__file__))
log.info('AstroPy {0} from {1}'.format(astropy.__version__, astropy.__file__))
log.info('LMFit {0} from {1}'.format(lmfit.__version__, lmfit.__file__))
return 0
if options.image is None:
parser.print_help()
return 0
filename = options.image
if not os.path.exists(filename):
log.error('{0} not found'.format(filename))
return 1
<DeepExtract>
header = astropy.io.fits.getheader(filename)
if not 'SIN' in header['CTYPE1']:
if options.imgpsf is None:
projection = header['CTYPE1'].split('-')[-1]
log.warning('For projection {0} you should consider supplying a psf via --psf'.format(projection))
return
</DeepExtract>
np.seterr(invalid='ignore', divide='ignore')
if options.nopositive and (not options.negative):
log.warning('Requested no positive sources, but no negative sources. Nothing to find.')
return 0
if (options.save or options.priorized) and (not options.find):
options.find = False
else:
options.find = True
if options.debug:
log.info('Setting cores=1 for debugging')
options.cores = 1
if options.cores is None:
options.cores = multiprocessing.cpu_count()
log.info('Found {0} cores'.format(options.cores))
log.info('Using {0} cores'.format(options.cores))
hdu_index = options.hdu_index
if hdu_index > 0:
log.info('Using hdu index {0}'.format(hdu_index))
if options.beam is not None:
beam = options.beam
options.beam = Beam(beam[0], beam[1], beam[2])
log.info('Using user supplied beam parameters')
log.info('Beam is {0} deg x {1} deg with pa {2}'.format(options.beam.a, options.beam.b, options.beam.pa))
basename = os.path.splitext(filename)[0]
if options.autoload:
files = get_aux_files(filename)
if files['bkg'] and (not options.backgroundimg):
options.backgroundimg = files['bkg']
log.info('Found background {0}'.format(options.backgroundimg))
if files['rms'] and (not options.noiseimg):
options.noiseimg = files['rms']
log.info('Found noise {0}'.format(options.noiseimg))
if files['mask'] and (not options.region):
options.region = files['mask']
log.info('Found region {0}'.format(options.region))
if files['psf'] and (not options.imgpsf):
options.imgpsf = files['psf']
log.info('Found psf {0}'.format(options.imgpsf))
if options.backgroundimg and (not os.path.exists(options.backgroundimg)):
log.error('{0} not found'.format(options.backgroundimg))
return 1
if options.noiseimg and (not os.path.exists(options.noiseimg)):
log.error('{0} not found'.format(options.noiseimg))
return 1
if options.imgpsf and (not os.path.exists(options.imgpsf)):
log.error('{0} not found'.format(options.imgpsf))
return 1
if options.catpsf and (not os.path.exists(options.catpsf)):
log.error('{0} not found'.format(options.catpsf))
return 1
if options.region is not None:
if not os.path.exists(options.region):
log.error('Region file {0} not found'.format(options.region))
return 1
if options.save:
sf.save_background_files(filename, hdu_index=hdu_index, cores=options.cores, beam=options.beam, outbase=options.outbase, bkgin=options.backgroundimg, rmsin=options.noiseimg, rms=options.rms, bkg=options.bkg, cube_index=options.slice)
return 0
if options.tables is not None:
if not check_table_formats(options.tables):
log.critical('One or more output table formats are not supported: Exiting')
return 1
if options.outfile == 'stdout':
options.outfile = sys.stdout
elif options.outfile is not None:
options.outfile = open(options.outfile, 'w')
sources = []
if options.find:
log.info('Finding sources.')
found = sf.find_sources_in_image(filename, outfile=options.outfile, hdu_index=options.hdu_index, rms=options.rms, bkg=options.bkg, max_summits=options.max_summits, innerclip=options.innerclip, outerclip=options.outerclip, cores=options.cores, rmsin=options.noiseimg, bkgin=options.backgroundimg, beam=options.beam, doislandflux=options.doislandflux, nonegative=not options.negative, nopositive=options.nopositive, mask=options.region, imgpsf=options.imgpsf, blank=options.blank, docov=options.docov, cube_index=options.slice, progress=options.progress)
if options.blank:
outname = basename + '_blank.fits'
sf.save_image(outname)
if len(found) == 0:
log.info('No sources found in image')
if options.priorized > 0:
if options.ratio is not None:
if options.ratio <= 0:
log.error('ratio must be positive definite')
return 1
if options.ratio < 1:
log.error('ratio <1 is not advised. Have fun!')
if options.input is None:
log.error('Must specify input catalog when --priorized is selected')
return 1
if not os.path.exists(options.input):
log.error('{0} not found'.format(options.input))
return 1
log.info('Priorized fitting of sources in input catalog.')
log.info('Stage = {0}'.format(options.priorized))
if options.doislandflux:
log.warning('--island requested but not yet supported for priorized fitting')
sf.priorized_fit_islands(filename, catalogue=options.input, hdu_index=options.hdu_index, rms=options.rms, bkg=options.bkg, outfile=options.outfile, bkgin=options.backgroundimg, rmsin=options.noiseimg, beam=options.beam, imgpsf=options.imgpsf, catpsf=options.catpsf, stage=options.priorized, ratio=options.ratio, outerclip=options.outerclip, cores=options.cores, doregroup=options.regroup, docov=options.docov, cube_index=options.slice, progress=options.progress, regroup_eps=options.regroup_eps)
sources = sf.sources
log.info('found {0} sources total'.format(len(sources)))
if len(sources) > 0 and options.tables:
meta = {'PROGRAM': 'Aegean', 'PROGVER': '{0}-({1})'.format(__version__, __date__), 'FITSFILE': filename, 'RUN-AS': invocation_string}
for t in options.tables.split(','):
save_catalog(t, sources, prefix=options.column_prefix, meta=meta)
return 0
|
def main(argv=()):
"""
The Aegean source finding program.
"""
parser = argparse.ArgumentParser(prog='aegean', prefix_chars='-')
parser.add_argument('image', nargs='?', default=None)
group1 = parser.add_argument_group('Configuration Options')
group1.add_argument('--find', dest='find', action='store_true', default=False, help='Source finding mode. [default: true, unless --save or --measure are selected]')
group1.add_argument('--hdu', dest='hdu_index', type=int, default=0, help='HDU index (0-based) for cubes with multiple images in extensions. [default: 0]')
group1.add_argument('--beam', dest='beam', type=float, nargs=3, default=None, help='The beam parameters to be used is "--beam major minor pa" all in degrees. [default: read from fits header].')
group1.add_argument('--slice', dest='slice', type=int, default=0, help='If the input data is a cube, then this slice will determine the array index of the image which will be processed by aegean')
group1.add_argument('--progress', default=False, action='store_true', help='Provide a progress bar as islands are being fit. [default: False]')
group2 = parser.add_argument_group('Input Options')
group2.add_argument('--forcerms', dest='rms', type=float, default=None, help='Assume a single image noise of rms. [default: None]')
group2.add_argument('--forcebkg', dest='bkg', type=float, default=None, help='Assume a single image background of bkg. [default: None]')
group1.add_argument('--cores', dest='cores', type=int, default=None, help='Number of CPU cores to use when calculating background and rms images [default: all cores]')
group2.add_argument('--noise', dest='noiseimg', default=None, type=str, help='A .fits file that represents the image noise (rms), created from Aegean with --save or BANE. [default: none]')
group2.add_argument('--background', dest='backgroundimg', default=None, type=str, help='A .fits file that represents the background level, created from Aegean with --save or BANE. [default: none]')
group2.add_argument('--psf', dest='imgpsf', default=None, type=str, help='A .fits file that represents the local PSF. ')
group2.add_argument('--autoload', dest='autoload', action='store_true', default=False, help="Automatically look for background, noise, region, and psf files using the input filename as a hint. [default: don't do this]")
group3 = parser.add_argument_group('Output Options')
group3.add_argument('--out', dest='outfile', default=None, type=str, help='Destination of Aegean catalog output. [default: No output]')
group3.add_argument('--table', dest='tables', default=None, type=str, help='Additional table outputs, format inferred from extension. [default: none]')
group3.add_argument('--tformats', dest='table_formats', action='store_true', default=False, help='Show a list of table formats supported in this install, and their extensions')
group3.add_argument('--blankout', dest='blank', action='store_true', default=False, help='Create a blanked output image. [Only works if cores=1].')
group3.add_argument('--colprefix', dest='column_prefix', default=None, type=str, help='Prepend each column name with "prefix_". [Default = prepend nothing]')
group4 = parser.add_argument_group('Source finding/fitting configuration options')
group4.add_argument('--maxsummits', dest='max_summits', type=float, default=None, help='If more than *maxsummits* summits are detected in an island, no fitting is done, only estimation. [default: no limit]')
group4.add_argument('--seedclip', dest='innerclip', type=float, default=5, help='The clipping value (in sigmas) for seeding islands. [default: 5]')
group4.add_argument('--floodclip', dest='outerclip', type=float, default=4, help='The clipping value (in sigmas) for growing islands. [default: 4]')
group4.add_argument('--island', dest='doislandflux', action='store_true', default=False, help='Also calculate the island flux in addition to the individual components. [default: false]')
group4.add_argument('--nopositive', dest='nopositive', action='store_true', default=False, help="Don't report sources with positive fluxes. [default: false]")
group4.add_argument('--negative', dest='negative', action='store_true', default=False, help='Report sources with negative fluxes. [default: false]')
group4.add_argument('--region', dest='region', type=str, default=None, help='Use this regions file to restrict source finding in this image.\nUse MIMAS region (.mim) files.')
group4.add_argument('--nocov', dest='docov', action='store_false', default=True, help="Don't use the covariance of the data in the fitting proccess. [Default = False]")
group5 = parser.add_argument_group('Priorized Fitting config options', 'in addition to the above source fitting options')
group5.add_argument('--priorized', dest='priorized', default=0, type=int, help='Enable priorized fitting level n=[1,2,3]. 1=fit flux, 2=fit flux/position, 3=fit flux/position/shape. See the GitHub wiki for more details.')
group5.add_argument('--ratio', dest='ratio', default=None, type=float, help='The ratio of synthesized beam sizes (image psf / input catalog psf). For use with priorized.')
group5.add_argument('--noregroup', dest='regroup', default=True, action='store_false', help='Do not regroup islands before priorized fitting')
group5.add_argument('--input', dest='input', type=str, default=None, help='If --priorized is used, this gives the filename for a catalog of locations at which fluxes will be measured.')
group5.add_argument('--catpsf', dest='catpsf', type=str, default=None, help='A psf map corresponding to the input catalog. This will allow for the correct resizing of sources when the catalog and image psfs differ')
group5.add_argument('--regroup-eps', dest='regroup_eps', default=None, type=float, help='The size in arcminutes that is used to regroup nearby components into a single set of components that will be solved for simultaneously')
group6 = parser.add_argument_group('Extra options')
group6.add_argument('--save', dest='save', action='store_true', default=False, help='Enable the saving of the background and noise images. Sets --find to false. [default: false]')
group6.add_argument('--outbase', dest='outbase', type=str, default=None, help='If --save is True, then this specifies the base name of the background and noise images. [default: inferred from input image]')
group6.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug mode. [default: false]')
group6.add_argument('--versions', dest='file_versions', action='store_true', default=False, help='Show the file versions of relevant modules. [default: false]')
group6.add_argument('--cite', dest='cite', action='store_true', default=False, help='Show citation information.')
options = parser.parse_args(args=argv)
invocation_string = ' '.join(argv)
logging.basicConfig(format='%(module)s:%(levelname)s %(message)s')
log = logging.getLogger('Aegean')
logging_level = logging.DEBUG if options.debug else logging.INFO
log.setLevel(logging_level)
log.info('This is Aegean {0}-({1})'.format(__version__, __date__))
log.debug('Run as:\n{0}'.format(invocation_string))
if options.cite:
print(__citation__)
return 0
import AegeanTools
from AegeanTools.source_finder import SourceFinder
sf = SourceFinder(log=log)
if options.table_formats:
show_formats()
return 0
if options.file_versions:
log.info('AegeanTools {0} from {1}'.format(AegeanTools.__version__, AegeanTools.__file__))
log.info('Numpy {0} from {1} '.format(np.__version__, np.__file__))
log.info('Scipy {0} from {1}'.format(scipy.__version__, scipy.__file__))
log.info('AstroPy {0} from {1}'.format(astropy.__version__, astropy.__file__))
log.info('LMFit {0} from {1}'.format(lmfit.__version__, lmfit.__file__))
return 0
if options.image is None:
parser.print_help()
return 0
filename = options.image
if not os.path.exists(filename):
log.error('{0} not found'.format(filename))
return 1
header = astropy.io.fits.getheader(filename)
if not 'SIN' in header['CTYPE1']:
if options.imgpsf is None:
projection = header['CTYPE1'].split('-')[-1]
log.warning('For projection {0} you should consider supplying a psf via --psf'.format(projection))
return
np.seterr(invalid='ignore', divide='ignore')
if options.nopositive and (not options.negative):
log.warning('Requested no positive sources, but no negative sources. Nothing to find.')
return 0
if (options.save or options.priorized) and (not options.find):
options.find = False
else:
options.find = True
if options.debug:
log.info('Setting cores=1 for debugging')
options.cores = 1
if options.cores is None:
options.cores = multiprocessing.cpu_count()
log.info('Found {0} cores'.format(options.cores))
log.info('Using {0} cores'.format(options.cores))
hdu_index = options.hdu_index
if hdu_index > 0:
log.info('Using hdu index {0}'.format(hdu_index))
if options.beam is not None:
beam = options.beam
options.beam = Beam(beam[0], beam[1], beam[2])
log.info('Using user supplied beam parameters')
log.info('Beam is {0} deg x {1} deg with pa {2}'.format(options.beam.a, options.beam.b, options.beam.pa))
basename = os.path.splitext(filename)[0]
if options.autoload:
files = get_aux_files(filename)
if files['bkg'] and (not options.backgroundimg):
options.backgroundimg = files['bkg']
log.info('Found background {0}'.format(options.backgroundimg))
if files['rms'] and (not options.noiseimg):
options.noiseimg = files['rms']
log.info('Found noise {0}'.format(options.noiseimg))
if files['mask'] and (not options.region):
options.region = files['mask']
log.info('Found region {0}'.format(options.region))
if files['psf'] and (not options.imgpsf):
options.imgpsf = files['psf']
log.info('Found psf {0}'.format(options.imgpsf))
if options.backgroundimg and (not os.path.exists(options.backgroundimg)):
log.error('{0} not found'.format(options.backgroundimg))
return 1
if options.noiseimg and (not os.path.exists(options.noiseimg)):
log.error('{0} not found'.format(options.noiseimg))
return 1
if options.imgpsf and (not os.path.exists(options.imgpsf)):
log.error('{0} not found'.format(options.imgpsf))
return 1
if options.catpsf and (not os.path.exists(options.catpsf)):
log.error('{0} not found'.format(options.catpsf))
return 1
if options.region is not None:
if not os.path.exists(options.region):
log.error('Region file {0} not found'.format(options.region))
return 1
if options.save:
sf.save_background_files(filename, hdu_index=hdu_index, cores=options.cores, beam=options.beam, outbase=options.outbase, bkgin=options.backgroundimg, rmsin=options.noiseimg, rms=options.rms, bkg=options.bkg, cube_index=options.slice)
return 0
if options.tables is not None:
if not check_table_formats(options.tables):
log.critical('One or more output table formats are not supported: Exiting')
return 1
if options.outfile == 'stdout':
options.outfile = sys.stdout
elif options.outfile is not None:
options.outfile = open(options.outfile, 'w')
sources = []
if options.find:
log.info('Finding sources.')
found = sf.find_sources_in_image(filename, outfile=options.outfile, hdu_index=options.hdu_index, rms=options.rms, bkg=options.bkg, max_summits=options.max_summits, innerclip=options.innerclip, outerclip=options.outerclip, cores=options.cores, rmsin=options.noiseimg, bkgin=options.backgroundimg, beam=options.beam, doislandflux=options.doislandflux, nonegative=not options.negative, nopositive=options.nopositive, mask=options.region, imgpsf=options.imgpsf, blank=options.blank, docov=options.docov, cube_index=options.slice, progress=options.progress)
if options.blank:
outname = basename + '_blank.fits'
sf.save_image(outname)
if len(found) == 0:
log.info('No sources found in image')
if options.priorized > 0:
if options.ratio is not None:
if options.ratio <= 0:
log.error('ratio must be positive definite')
return 1
if options.ratio < 1:
log.error('ratio <1 is not advised. Have fun!')
if options.input is None:
log.error('Must specify input catalog when --priorized is selected')
return 1
if not os.path.exists(options.input):
log.error('{0} not found'.format(options.input))
return 1
log.info('Priorized fitting of sources in input catalog.')
log.info('Stage = {0}'.format(options.priorized))
if options.doislandflux:
log.warning('--island requested but not yet supported for priorized fitting')
sf.priorized_fit_islands(filename, catalogue=options.input, hdu_index=options.hdu_index, rms=options.rms, bkg=options.bkg, outfile=options.outfile, bkgin=options.backgroundimg, rmsin=options.noiseimg, beam=options.beam, imgpsf=options.imgpsf, catpsf=options.catpsf, stage=options.priorized, ratio=options.ratio, outerclip=options.outerclip, cores=options.cores, doregroup=options.regroup, docov=options.docov, cube_index=options.slice, progress=options.progress, regroup_eps=options.regroup_eps)
sources = sf.sources
log.info('found {0} sources total'.format(len(sources)))
if len(sources) > 0 and options.tables:
meta = {'PROGRAM': 'Aegean', 'PROGVER': '{0}-({1})'.format(__version__, __date__), 'FITSFILE': filename, 'RUN-AS': invocation_string}
for t in options.tables.split(','):
save_catalog(t, sources, prefix=options.column_prefix, meta=meta)
return 0
|
Aegean
|
positive
|
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""Adds the sampled values of both parent distributions.
Parameters
----------
input_values: list
List of input values
k: integer
The number of samples that should be sampled
rng: random number generator
The random number generator to be used.
mpi_comm: MPI communicator object
Defines the MPI communicator object for MPI parallelization. The default value is None,
meaning the forward simulation is not MPI-parallelized.
Returns
-------
list:
The first entry is True, it is always possible to sample, given two parent values. The second entry is the
difference of the parents values.
"""
return_value = []
sample_value = []
<DeepExtract>
model_samples = {}
visited_state = [False] * self.get_input_dimension()
for i in range(0, self.get_input_dimension()):
visited_state[i] = self.get_input_connector().get_model(i).visited
for i in range(0, self.get_input_dimension()):
self.get_input_connector().get_model(i).visited = False
for i in range(0, self.get_input_dimension()):
model = self.get_input_connector().get_model(i)
if not model.visited:
model_has_valid_parameters = model._check_input(model.get_input_values())
if model_has_valid_parameters:
model_samples[model] = model.forward_simulate(model.get_input_values(), k, rng=rng)
model.visited = True
else:
raise ValueError('Model %s has invalid input parameters.' % parent.name)
for i in range(0, self.get_input_dimension()):
self.get_input_connector().get_model(i).visited = visited_state[i]
model_samples = model_samples
</DeepExtract>
for i in range(k):
parameter_values = [0 for i in range(self.get_input_dimension())]
for j in range(0, self.get_input_dimension()):
model = self.get_input_connector().get_model(j)
parameter_values[j] = model_samples[model][i]
sample_value = []
for j in range(self.get_output_dimension()):
sample_value.append(parameter_values[j] - parameter_values[j + self.get_output_dimension()])
if len(sample_value) == 1:
sample_value = sample_value[0]
return_value.append(sample_value)
return return_value
|
def forward_simulate(self, input_values, k, rng=np.random.RandomState(), mpi_comm=None):
"""Adds the sampled values of both parent distributions.
Parameters
----------
input_values: list
List of input values
k: integer
The number of samples that should be sampled
rng: random number generator
The random number generator to be used.
mpi_comm: MPI communicator object
Defines the MPI communicator object for MPI parallelization. The default value is None,
meaning the forward simulation is not MPI-parallelized.
Returns
-------
list:
The first entry is True, it is always possible to sample, given two parent values. The second entry is the
difference of the parents values.
"""
return_value = []
sample_value = []
model_samples = {}
visited_state = [False] * self.get_input_dimension()
for i in range(0, self.get_input_dimension()):
visited_state[i] = self.get_input_connector().get_model(i).visited
for i in range(0, self.get_input_dimension()):
self.get_input_connector().get_model(i).visited = False
for i in range(0, self.get_input_dimension()):
model = self.get_input_connector().get_model(i)
if not model.visited:
model_has_valid_parameters = model._check_input(model.get_input_values())
if model_has_valid_parameters:
model_samples[model] = model.forward_simulate(model.get_input_values(), k, rng=rng)
model.visited = True
else:
raise ValueError('Model %s has invalid input parameters.' % parent.name)
for i in range(0, self.get_input_dimension()):
self.get_input_connector().get_model(i).visited = visited_state[i]
model_samples = model_samples
for i in range(k):
parameter_values = [0 for i in range(self.get_input_dimension())]
for j in range(0, self.get_input_dimension()):
model = self.get_input_connector().get_model(j)
parameter_values[j] = model_samples[model][i]
sample_value = []
for j in range(self.get_output_dimension()):
sample_value.append(parameter_values[j] - parameter_values[j + self.get_output_dimension()])
if len(sample_value) == 1:
sample_value = sample_value[0]
return_value.append(sample_value)
return return_value
|
abcpy
|
positive
|
def forward_fact(self, e1, r, e2, kg):
def dist_mult_fact(E1, R, E2):
return torch.sum(E1 * R * E2, dim=1, keepdim=True)
E1_real = kg.get_entity_embeddings(e1)
R_real = kg.get_relation_embeddings(r)
E2_real = kg.get_entity_embeddings(e2)
E1_img = kg.get_entity_img_embeddings(e1)
R_img = kg.get_relation_img_embeddings(r)
E2_img = kg.get_entity_img_embeddings(e2)
<DeepExtract>
rrr = torch.sum(R_real * E1_real * E2_real, dim=1, keepdim=True)
</DeepExtract>
<DeepExtract>
rii = torch.sum(R_real * E1_img * E2_img, dim=1, keepdim=True)
</DeepExtract>
<DeepExtract>
iri = torch.sum(R_img * E1_real * E2_img, dim=1, keepdim=True)
</DeepExtract>
<DeepExtract>
iir = torch.sum(R_img * E1_img * E2_real, dim=1, keepdim=True)
</DeepExtract>
S = rrr + rii + iri - iir
S = F.sigmoid(S)
return S
|
def forward_fact(self, e1, r, e2, kg):
def dist_mult_fact(E1, R, E2):
return torch.sum(E1 * R * E2, dim=1, keepdim=True)
E1_real = kg.get_entity_embeddings(e1)
R_real = kg.get_relation_embeddings(r)
E2_real = kg.get_entity_embeddings(e2)
E1_img = kg.get_entity_img_embeddings(e1)
R_img = kg.get_relation_img_embeddings(r)
E2_img = kg.get_entity_img_embeddings(e2)
rrr = torch.sum(R_real * E1_real * E2_real, dim=1, keepdim=True)
rii = torch.sum(R_real * E1_img * E2_img, dim=1, keepdim=True)
iri = torch.sum(R_img * E1_real * E2_img, dim=1, keepdim=True)
iir = torch.sum(R_img * E1_img * E2_real, dim=1, keepdim=True)
S = rrr + rii + iri - iir
S = F.sigmoid(S)
return S
|
CPL
|
positive
|
@patch('decorators.s3')
def test_it_loads_nested_state(mock_s3):
@s3_state_store(load_keys=['Dict', 'List'], should_offload=False)
def my_func(event, *_):
return event
mock_s3.Object().get.side_effect = [{'Body': BytesIO(b'{"test": "data"}')}, {'Body': BytesIO(b'["data"]')}]
<DeepExtract>
res = {'Data': {'Dict': 's3://bucket/state/a', 'List': 's3://bucket/state/b'}}
</DeepExtract>
assert {'Data': {'Dict': {'test': 'data'}, 'List': ['data']}} == res
assert ('bucket', 'state/a') == mock_s3.Object.call_args_list[1][0]
assert ('bucket', 'state/b') == mock_s3.Object.call_args_list[2][0]
|
@patch('decorators.s3')
def test_it_loads_nested_state(mock_s3):
@s3_state_store(load_keys=['Dict', 'List'], should_offload=False)
def my_func(event, *_):
return event
mock_s3.Object().get.side_effect = [{'Body': BytesIO(b'{"test": "data"}')}, {'Body': BytesIO(b'["data"]')}]
res = {'Data': {'Dict': 's3://bucket/state/a', 'List': 's3://bucket/state/b'}}
assert {'Data': {'Dict': {'test': 'data'}, 'List': ['data']}} == res
assert ('bucket', 'state/a') == mock_s3.Object.call_args_list[1][0]
assert ('bucket', 'state/b') == mock_s3.Object.call_args_list[2][0]
|
amazon-s3-find-and-forget
|
positive
|
def run_sequentially(self, grid=None, rerun_best_setting=False, **kwargs):
if grid is None:
<DeepExtract>
curv_grid = self.tune_curv.get_grid()
damping_grid = self.tune_damping.get_grid()
grid = self._check_unique_and_combine(curv_grid, damping_grid)
</DeepExtract>
super().run_sequentially(grid, rerun_best_setting=rerun_best_setting, **kwargs)
|
def run_sequentially(self, grid=None, rerun_best_setting=False, **kwargs):
if grid is None:
curv_grid = self.tune_curv.get_grid()
damping_grid = self.tune_damping.get_grid()
grid = self._check_unique_and_combine(curv_grid, damping_grid)
super().run_sequentially(grid, rerun_best_setting=rerun_best_setting, **kwargs)
|
backpack
|
positive
|
def main(autodist):
d = autodist
fashion_mnist = tf.keras.datasets.fashion_mnist
((train_images, train_labels), (test_images, test_labels)) = fashion_mnist.load_data()
train_images = train_images[:512, :, :, None]
test_images = test_images[:512, :, :, None]
train_labels = train_labels[:512]
test_labels = test_labels[:512]
print(train_images.shape, train_labels.shape)
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
BUFFER_SIZE = len(train_images)
BATCH_SIZE = 32
EPOCHS = 1
train_steps_per_epoch = 8
with tf.Graph().as_default(), d.scope():
train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = tf.compat.v1.data.make_one_shot_iterator(train_dataset).get_next()
model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.SGD()
def train_step(inputs):
(x, y) = inputs
with tf.GradientTape() as tape:
y_hat = model(x, training=True)
loss = loss_fn(y, y_hat)
all_vars = []
for v in model.trainable_variables:
all_vars.append(v)
grads = optimizer.get_gradients(loss, all_vars)
update = optimizer.apply_gradients(zip(grads, all_vars))
return (loss, update)
<DeepExtract>
(x, y) = train_iterator
with tf.GradientTape() as tape:
y_hat = model(x, training=True)
loss = loss_fn(y, y_hat)
all_vars = []
for v in model.trainable_variables:
all_vars.append(v)
grads = optimizer.get_gradients(loss, all_vars)
update = optimizer.apply_gradients(zip(grads, all_vars))
fetches = (loss, update)
</DeepExtract>
sess = autodist.create_distributed_session()
for epoch in range(EPOCHS):
for _ in range(train_steps_per_epoch):
(loss, _) = sess.run(fetches)
print(f'train_loss: {loss}')
|
def main(autodist):
d = autodist
fashion_mnist = tf.keras.datasets.fashion_mnist
((train_images, train_labels), (test_images, test_labels)) = fashion_mnist.load_data()
train_images = train_images[:512, :, :, None]
test_images = test_images[:512, :, :, None]
train_labels = train_labels[:512]
test_labels = test_labels[:512]
print(train_images.shape, train_labels.shape)
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
BUFFER_SIZE = len(train_images)
BATCH_SIZE = 32
EPOCHS = 1
train_steps_per_epoch = 8
with tf.Graph().as_default(), d.scope():
train_dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_iterator = tf.compat.v1.data.make_one_shot_iterator(train_dataset).get_next()
model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(10, activation='softmax')])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.SGD()
def train_step(inputs):
(x, y) = inputs
with tf.GradientTape() as tape:
y_hat = model(x, training=True)
loss = loss_fn(y, y_hat)
all_vars = []
for v in model.trainable_variables:
all_vars.append(v)
grads = optimizer.get_gradients(loss, all_vars)
update = optimizer.apply_gradients(zip(grads, all_vars))
return (loss, update)
(x, y) = train_iterator
with tf.GradientTape() as tape:
y_hat = model(x, training=True)
loss = loss_fn(y, y_hat)
all_vars = []
for v in model.trainable_variables:
all_vars.append(v)
grads = optimizer.get_gradients(loss, all_vars)
update = optimizer.apply_gradients(zip(grads, all_vars))
fetches = (loss, update)
sess = autodist.create_distributed_session()
for epoch in range(EPOCHS):
for _ in range(train_steps_per_epoch):
(loss, _) = sess.run(fetches)
print(f'train_loss: {loss}')
|
autodist
|
positive
|
def __init__(self):
seed = 1337
self.args = arguments.get_args_iko()
self.rl_test = False
self.start_time = time.time()
self.actions = DalEnv.Actions
self.action_space = spaces.Discrete(len(self.actions))
if self.args.use_gpu > 0 and torch.cuda.is_available():
self.device = torch.device('cuda')
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
self.device = torch.device('cpu')
torch.set_default_tensor_type(torch.FloatTensor)
self.init_fig = False
self.n_maze_grids = None
self.grid_rows = self.args.map_size * self.args.sub_resolution
self.grid_cols = self.args.map_size * self.args.sub_resolution
self.grid_dirs = self.args.n_headings
self.collision_radius = self.args.collision_radius
num_dirs = 1
num_classes = self.args.n_lm_grids ** 2 * num_dirs
final_num_classes = num_classes
if self.args.n_pre_classes is not None:
num_classes = self.args.n_pre_classes
else:
num_classes = final_num_classes
(self.map_rows, self.map_cols) = (224, 224)
if self.args.pm_net == 'none':
self.perceptual_model = None
elif self.args.pm_net == 'densenet121':
self.perceptual_model = densenet121(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet169':
self.perceptual_model = densenet169(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet201':
self.perceptual_model = densenet201(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet161':
self.perceptual_model = densenet161(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet18s':
self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet34s':
self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet50s':
self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet101s':
self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet152s':
self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet18':
self.perceptual_model = resnet18(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet34':
self.perceptual_model = resnet34(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet50':
self.perceptual_model = resnet50(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet101':
self.perceptual_model = resnet101(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet152':
self.perceptual_model = resnet152(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
else:
raise Exception('pm-net required: resnet or densenet')
self.intri_model = intrinsic_model(self.grid_rows)
self.max_scan_range = 3.5
self.min_scan_range = 0.1
self.manhattans = []
self.manhattan = 0
self.rewards = []
self.reward = 0
self.done = 0
self.step_count = 0
self.step_max = self.args.num[2]
self.map_2d = None
self.xlim = (-3.0, 3.0)
self.ylim = (-3.0, 3.0)
if self.args.thickness == 0.0:
self.radius = 0.5 * (self.xlim[1] - self.xlim[0]) / self.args.map_size / 2 * 0.9
else:
self.radius = (self.xlim[1] - self.xlim[0]) / self.args.map_size / 2 * self.args.thickness
self.longest = float(self.grid_dirs / 2 + self.grid_rows - 1 + self.grid_cols - 1)
self.cell_size = (self.xlim[1] - self.xlim[0]) / self.grid_rows
self.heading_resol = 2 * np.pi / self.grid_dirs
self.fwd_step = self.cell_size * self.args.fwd_step
self.collision = False
self.sigma_xy = self.cell_size * 0.1
self.sigma_theta = self.heading_resol * 0.1
self.scans_over_map = np.zeros((self.grid_rows, self.grid_cols, 360))
self.scans_over_map_high = np.zeros((self.map_rows, self.map_cols, 360))
self.scan_2d = np.zeros((self.map_rows, self.map_cols))
self.scan_2d_low = np.zeros((self.grid_rows, self.grid_cols))
self.map_2d = np.zeros((self.map_rows, self.map_cols))
self.map_design = np.zeros((self.grid_rows, self.grid_cols), dtype='float')
self.map_design_tensor = torch.zeros((1, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.data_cnt = 0
self.bel_ent = np.log(1.0 / (self.grid_dirs * self.grid_rows * self.grid_cols))
self.likelihood = torch.ones((self.grid_dirs, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.likelihood = self.likelihood / self.likelihood.sum()
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.belief = torch.ones((self.grid_dirs, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.belief = self.belief / self.belief.sum()
self.loss_policy = 0
self.loss_value = 0
self.turtle_loc = np.zeros((self.grid_rows, self.grid_cols))
self.current_pose = Pose2d(0, 0, 0)
self.goal_pose = Pose2d(0, 0, 0)
self.last_pose = Pose2d(0, 0, 0)
self.perturbed_goal_pose = Pose2d(0, 0, 0)
self.start_pose = Pose2d(0, 0, 0)
self.true_grid = Grid(head=0, row=0, col=0)
self.bel_grid = Grid(head=0, row=0, col=0)
self.reward_block_penalty = 0
self.reward_bel_gt = 0
self.reward_bel_gt_nonlog = 0
self.reward_infogain = 0
self.reward_bel_ent = 0
self.reward_hit = 0
self.reward_dist = 0
self.reward_inv_dist = 0
self.actions_space = list(('turn_left', 'turn_right', 'go_fwd', 'hold'))
self.action_name = 'none'
self.current_state = 'new_env_pose'
self.state = np.zeros((6, self.grid_rows, self.grid_cols), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1, shape=self.state.shape, dtype='float32')
<DeepExtract>
(self.np_random, _) = seeding.np_random(seed)
return [seed]
</DeepExtract>
<DeepExtract>
self.clear_objects()
self.set_walls()
self.place_turtle()
self.get_lidar()
self.get_scan_2d()
self.step_count = 0
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.rewards = []
self.manhattans = []
self.reward = 0
self.explored_space = np.zeros((self.grid_dirs, self.grid_rows, self.grid_cols), dtype='float')
self.bel_list = []
self.state[0, :, :] = self.map_design
ding = self.belief.detach().cpu().numpy()
self.state[1:5, :, :] = ding
self.state[5, :, :] = self.scan_2d_low
return self.state
</DeepExtract>
|
def __init__(self):
seed = 1337
self.args = arguments.get_args_iko()
self.rl_test = False
self.start_time = time.time()
self.actions = DalEnv.Actions
self.action_space = spaces.Discrete(len(self.actions))
if self.args.use_gpu > 0 and torch.cuda.is_available():
self.device = torch.device('cuda')
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
self.device = torch.device('cpu')
torch.set_default_tensor_type(torch.FloatTensor)
self.init_fig = False
self.n_maze_grids = None
self.grid_rows = self.args.map_size * self.args.sub_resolution
self.grid_cols = self.args.map_size * self.args.sub_resolution
self.grid_dirs = self.args.n_headings
self.collision_radius = self.args.collision_radius
num_dirs = 1
num_classes = self.args.n_lm_grids ** 2 * num_dirs
final_num_classes = num_classes
if self.args.n_pre_classes is not None:
num_classes = self.args.n_pre_classes
else:
num_classes = final_num_classes
(self.map_rows, self.map_cols) = (224, 224)
if self.args.pm_net == 'none':
self.perceptual_model = None
elif self.args.pm_net == 'densenet121':
self.perceptual_model = densenet121(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet169':
self.perceptual_model = densenet169(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet201':
self.perceptual_model = densenet201(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'densenet161':
self.perceptual_model = densenet161(pretrained=self.args.use_pretrained, drop_rate=self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet18s':
self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet34s':
self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet50s':
self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet101s':
self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet152s':
self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == 'resnet18':
self.perceptual_model = resnet18(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet34':
self.perceptual_model = resnet34(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet50':
self.perceptual_model = resnet50(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet101':
self.perceptual_model = resnet101(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == 'resnet152':
self.perceptual_model = resnet152(num_classes=num_classes)
num_ftrs = self.perceptual_model.fc.in_features
else:
raise Exception('pm-net required: resnet or densenet')
self.intri_model = intrinsic_model(self.grid_rows)
self.max_scan_range = 3.5
self.min_scan_range = 0.1
self.manhattans = []
self.manhattan = 0
self.rewards = []
self.reward = 0
self.done = 0
self.step_count = 0
self.step_max = self.args.num[2]
self.map_2d = None
self.xlim = (-3.0, 3.0)
self.ylim = (-3.0, 3.0)
if self.args.thickness == 0.0:
self.radius = 0.5 * (self.xlim[1] - self.xlim[0]) / self.args.map_size / 2 * 0.9
else:
self.radius = (self.xlim[1] - self.xlim[0]) / self.args.map_size / 2 * self.args.thickness
self.longest = float(self.grid_dirs / 2 + self.grid_rows - 1 + self.grid_cols - 1)
self.cell_size = (self.xlim[1] - self.xlim[0]) / self.grid_rows
self.heading_resol = 2 * np.pi / self.grid_dirs
self.fwd_step = self.cell_size * self.args.fwd_step
self.collision = False
self.sigma_xy = self.cell_size * 0.1
self.sigma_theta = self.heading_resol * 0.1
self.scans_over_map = np.zeros((self.grid_rows, self.grid_cols, 360))
self.scans_over_map_high = np.zeros((self.map_rows, self.map_cols, 360))
self.scan_2d = np.zeros((self.map_rows, self.map_cols))
self.scan_2d_low = np.zeros((self.grid_rows, self.grid_cols))
self.map_2d = np.zeros((self.map_rows, self.map_cols))
self.map_design = np.zeros((self.grid_rows, self.grid_cols), dtype='float')
self.map_design_tensor = torch.zeros((1, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.data_cnt = 0
self.bel_ent = np.log(1.0 / (self.grid_dirs * self.grid_rows * self.grid_cols))
self.likelihood = torch.ones((self.grid_dirs, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.likelihood = self.likelihood / self.likelihood.sum()
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.belief = torch.ones((self.grid_dirs, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.belief = self.belief / self.belief.sum()
self.loss_policy = 0
self.loss_value = 0
self.turtle_loc = np.zeros((self.grid_rows, self.grid_cols))
self.current_pose = Pose2d(0, 0, 0)
self.goal_pose = Pose2d(0, 0, 0)
self.last_pose = Pose2d(0, 0, 0)
self.perturbed_goal_pose = Pose2d(0, 0, 0)
self.start_pose = Pose2d(0, 0, 0)
self.true_grid = Grid(head=0, row=0, col=0)
self.bel_grid = Grid(head=0, row=0, col=0)
self.reward_block_penalty = 0
self.reward_bel_gt = 0
self.reward_bel_gt_nonlog = 0
self.reward_infogain = 0
self.reward_bel_ent = 0
self.reward_hit = 0
self.reward_dist = 0
self.reward_inv_dist = 0
self.actions_space = list(('turn_left', 'turn_right', 'go_fwd', 'hold'))
self.action_name = 'none'
self.current_state = 'new_env_pose'
self.state = np.zeros((6, self.grid_rows, self.grid_cols), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=1, shape=self.state.shape, dtype='float32')
(self.np_random, _) = seeding.np_random(seed)
return [seed]
self.clear_objects()
self.set_walls()
self.place_turtle()
self.get_lidar()
self.get_scan_2d()
self.step_count = 0
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.rewards = []
self.manhattans = []
self.reward = 0
self.explored_space = np.zeros((self.grid_dirs, self.grid_rows, self.grid_cols), dtype='float')
self.bel_list = []
self.state[0, :, :] = self.map_design
ding = self.belief.detach().cpu().numpy()
self.state[1:5, :, :] = ding
self.state[5, :, :] = self.scan_2d_low
return self.state
</DeepExtract>
|
dal
|
positive
|
def on_good_fix(self, core, gps_data, distance, bearing):
self.gps_data = gps_data
self.gps_last_good_fix = gps_data
self.gps_has_fix = True
self.gps_target_distance = distance
self.gps_target_bearing = bearing
if self.map.dragging:
return
if self.follow_position and (not self.map.set_center_lazy(self.gps_data.position)) or not self.follow_position:
<DeepExtract>
pass
</DeepExtract>
self.map.refresh()
|
def on_good_fix(self, core, gps_data, distance, bearing):
self.gps_data = gps_data
self.gps_last_good_fix = gps_data
self.gps_has_fix = True
self.gps_target_distance = distance
self.gps_target_bearing = bearing
if self.map.dragging:
return
if self.follow_position and (not self.map.set_center_lazy(self.gps_data.position)) or not self.follow_position:
pass
self.map.refresh()
|
advancedcaching
|
positive
|
def pad_subword_embeddings_batch(self, batch: List[Tuple[Tuple[List, List], List]], teacher_forcing: bool=False) -> Union[Tuple[Tuple[torch.Tensor, List, List], torch.Tensor], Tuple[Tuple[torch.Tensor, List, List, torch.Tensor], torch.Tensor]]:
"""
Method to pad a batch of subword embeddings sequences and their targets to the length of the longest one.
Args:
batch (list[Tuple[Tuple[list, list], list]]): a list of tuples containing the two following elements:
- a tuple where the first element is a list of words represented as subword embeddings and the
second element is a list of the number of subword embeddings that each word is decomposed into.
- a list of targets.
teacher_forcing (bool): if True, the padded target vectors are returned twice,
once with the sequences and their lengths, and once on their own. This enables
the use of teacher forcing during the training of sequence to sequence models.
Return:
A tuple of two elements:
- A tuple (``x``, ``y`` , ``z``). The element ``x`` is a :class:`~torch.Tensor` of
padded subword vectors,``y`` is a list of padded decomposition lengths,
and ``z`` is a list of the original lengths of the sequences
before padding. If teacher_forcing is True, a fourth element is added which
corresponds to a :class:`~torch.Tensor` of the padded targets. For details
on the padding of sequences, check out :meth:`~DataPadder.pad_subword_embeddings_sequences` below.
The returned sequences are sorted in decreasing order.
- a :class:`~torch.Tensor` containing the padded targets.
"""
<DeepExtract>
sorted_batch = sorted(batch, key=lambda x: len(x[0][1]), reverse=True)
(sequence_batch, target_batch) = zip(*sorted_batch)
(sequences_tuples, target_vectors) = (sequence_batch, target_batch)
</DeepExtract>
<DeepExtract>
(sequences_vectors, decomp_len, lengths) = zip(*[(torch.tensor(np.array(vectors)), word_decomposition_len, len(vectors)) for (vectors, word_decomposition_len) in sequences_tuples])
padded_sequences_vectors = self._pad_tensors(sequences_vectors)
longest_sequence_length = max(lengths)
for decomposition_length in decomp_len:
if len(decomposition_length) < longest_sequence_length:
decomposition_length.extend([1] * (longest_sequence_length - len(decomposition_length)))
(padded_sequences, decomposition_lengths, sequence_lengths) = (padded_sequences_vectors, list(decomp_len), list(lengths))
</DeepExtract>
<DeepExtract>
target_vectors = map(torch.tensor, target_vectors)
padded_target_vectors = self._pad_tensors(target_vectors)
</DeepExtract>
if teacher_forcing:
return ((padded_sequences, decomposition_lengths, sequence_lengths, padded_target_vectors), padded_target_vectors)
return ((padded_sequences, decomposition_lengths, sequence_lengths), padded_target_vectors)
|
def pad_subword_embeddings_batch(self, batch: List[Tuple[Tuple[List, List], List]], teacher_forcing: bool=False) -> Union[Tuple[Tuple[torch.Tensor, List, List], torch.Tensor], Tuple[Tuple[torch.Tensor, List, List, torch.Tensor], torch.Tensor]]:
"""
Method to pad a batch of subword embeddings sequences and their targets to the length of the longest one.
Args:
batch (list[Tuple[Tuple[list, list], list]]): a list of tuples containing the two following elements:
- a tuple where the first element is a list of words represented as subword embeddings and the
second element is a list of the number of subword embeddings that each word is decomposed into.
- a list of targets.
teacher_forcing (bool): if True, the padded target vectors are returned twice,
once with the sequences and their lengths, and once on their own. This enables
the use of teacher forcing during the training of sequence to sequence models.
Return:
A tuple of two elements:
- A tuple (``x``, ``y`` , ``z``). The element ``x`` is a :class:`~torch.Tensor` of
padded subword vectors,``y`` is a list of padded decomposition lengths,
and ``z`` is a list of the original lengths of the sequences
before padding. If teacher_forcing is True, a fourth element is added which
corresponds to a :class:`~torch.Tensor` of the padded targets. For details
on the padding of sequences, check out :meth:`~DataPadder.pad_subword_embeddings_sequences` below.
The returned sequences are sorted in decreasing order.
- a :class:`~torch.Tensor` containing the padded targets.
"""
sorted_batch = sorted(batch, key=lambda x: len(x[0][1]), reverse=True)
(sequence_batch, target_batch) = zip(*sorted_batch)
(sequences_tuples, target_vectors) = (sequence_batch, target_batch)
(sequences_vectors, decomp_len, lengths) = zip(*[(torch.tensor(np.array(vectors)), word_decomposition_len, len(vectors)) for (vectors, word_decomposition_len) in sequences_tuples])
padded_sequences_vectors = self._pad_tensors(sequences_vectors)
longest_sequence_length = max(lengths)
for decomposition_length in decomp_len:
if len(decomposition_length) < longest_sequence_length:
decomposition_length.extend([1] * (longest_sequence_length - len(decomposition_length)))
(padded_sequences, decomposition_lengths, sequence_lengths) = (padded_sequences_vectors, list(decomp_len), list(lengths))
target_vectors = map(torch.tensor, target_vectors)
padded_target_vectors = self._pad_tensors(target_vectors)
if teacher_forcing:
return ((padded_sequences, decomposition_lengths, sequence_lengths, padded_target_vectors), padded_target_vectors)
return ((padded_sequences, decomposition_lengths, sequence_lengths), padded_target_vectors)
|
deepparse
|
positive
|
def f_log_seq_info(self):
""" After m_data_length has been created, create seq_info
"""
for file_name in self.m_file_list:
if file_name not in self.m_data_length:
nii_warn.f_eprint('Exclude %s from dataset' % file_name)
continue
length_remain = self.m_data_length[file_name]
start_pos = 0
seg_idx = 0
if self.m_truncate_seq is not None:
while length_remain > 0:
info_idx = len(self.m_seq_info)
seg_length = min(self.m_truncate_seq, length_remain)
seq_info = nii_seqinfo.SeqInfo(seg_length, file_name, seg_idx, start_pos, info_idx)
if self.m_min_seq_len is None or seg_length >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
seg_idx += 1
start_pos += seg_length
length_remain -= seg_length
else:
info_idx = len(self.m_seq_info)
seq_info = nii_seqinfo.SeqInfo(length_remain, file_name, seg_idx, start_pos, info_idx)
if self.m_min_seq_len is None or length_remain >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
<DeepExtract>
self.m_data_total_length = sum([x.seq_length() for x in self.m_seq_info])
</DeepExtract>
return
|
def f_log_seq_info(self):
""" After m_data_length has been created, create seq_info
"""
for file_name in self.m_file_list:
if file_name not in self.m_data_length:
nii_warn.f_eprint('Exclude %s from dataset' % file_name)
continue
length_remain = self.m_data_length[file_name]
start_pos = 0
seg_idx = 0
if self.m_truncate_seq is not None:
while length_remain > 0:
info_idx = len(self.m_seq_info)
seg_length = min(self.m_truncate_seq, length_remain)
seq_info = nii_seqinfo.SeqInfo(seg_length, file_name, seg_idx, start_pos, info_idx)
if self.m_min_seq_len is None or seg_length >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
seg_idx += 1
start_pos += seg_length
length_remain -= seg_length
else:
info_idx = len(self.m_seq_info)
seq_info = nii_seqinfo.SeqInfo(length_remain, file_name, seg_idx, start_pos, info_idx)
if self.m_min_seq_len is None or length_remain >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
self.m_data_total_length = sum([x.seq_length() for x in self.m_seq_info])
return
|
2021
|
positive
|
def on_context_menu(web_view, menu):
"""Populate context menu, given the context/configuration."""
window = web_view.window()
try:
atts_button = web_view.editor.widget.findChild(gui.Button)
except AttributeError:
atts_button = None
say_text = config['presets'] and strip(web_view.selectedText())
submenu = aqt.qt.QMenu('Awesome&TTS', menu)
submenu.setIcon(gui.ICON)
needs_separator = False
if atts_button:
submenu.addAction('Add MP3 to the Note', lambda : atts_button.click() if atts_button.isEnabled() else aqt.utils.showWarning('Select the note field to which you want to add an MP3.', window))
needs_separator = True
if say_text:
say_display = say_text if len(say_text) < 25 else say_text[0:20].rstrip(' .') + '...'
if config['presets']:
if needs_separator:
submenu.addSeparator()
else:
needs_separator = True
def preset_glue(xxx_todo_changeme):
"""Closure for callback handler to access `preset`."""
(name, preset) = xxx_todo_changeme
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_preset_handler(say_text, preset, window))
for item in sorted(config['presets'].items(), key=lambda item: item[0].lower()):
<DeepExtract>
(name, preset) = item
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_preset_handler(say_text, preset, window))
</DeepExtract>
if config['groups']:
if needs_separator:
submenu.addSeparator()
else:
needs_separator = True
def group_glue(xxx_todo_changeme1):
"""Closure for callback handler to access `group`."""
(name, group) = xxx_todo_changeme1
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_group_handler(say_text, group, window))
for item in sorted(config['groups'].items(), key=lambda item: item[0].lower()):
<DeepExtract>
(name, group) = item
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_group_handler(say_text, group, window))
</DeepExtract>
menu.addMenu(submenu)
|
def on_context_menu(web_view, menu):
"""Populate context menu, given the context/configuration."""
window = web_view.window()
try:
atts_button = web_view.editor.widget.findChild(gui.Button)
except AttributeError:
atts_button = None
say_text = config['presets'] and strip(web_view.selectedText())
submenu = aqt.qt.QMenu('Awesome&TTS', menu)
submenu.setIcon(gui.ICON)
needs_separator = False
if atts_button:
submenu.addAction('Add MP3 to the Note', lambda : atts_button.click() if atts_button.isEnabled() else aqt.utils.showWarning('Select the note field to which you want to add an MP3.', window))
needs_separator = True
if say_text:
say_display = say_text if len(say_text) < 25 else say_text[0:20].rstrip(' .') + '...'
if config['presets']:
if needs_separator:
submenu.addSeparator()
else:
needs_separator = True
def preset_glue(xxx_todo_changeme):
"""Closure for callback handler to access `preset`."""
(name, preset) = xxx_todo_changeme
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_preset_handler(say_text, preset, window))
for item in sorted(config['presets'].items(), key=lambda item: item[0].lower()):
(name, preset) = item
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_preset_handler(say_text, preset, window))
if config['groups']:
if needs_separator:
submenu.addSeparator()
else:
needs_separator = True
def group_glue(xxx_todo_changeme1):
"""Closure for callback handler to access `group`."""
(name, group) = xxx_todo_changeme1
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_group_handler(say_text, group, window))
for item in sorted(config['groups'].items(), key=lambda item: item[0].lower()):
(name, group) = item
submenu.addAction('Say "%s" w/ %s' % (say_display, name), lambda : say_text_group_handler(say_text, group, window))
menu.addMenu(submenu)
|
awesometts-anki-addon
|
positive
|
def get_source_power(self):
"""Get the source power.
In general, FDFD_TE.source_power should be used to access the source
power instead. The source power is computed following every forward
solve, so calling this function explicitly is generally unnecessary.
Notes
-----
1. The source power is computed using the interpolated fields.
Returns
-------
float
Electromagnetic power generated by source.
"""
w_pml_l = self._w_pml_left
w_pml_r = self._w_pml_right
w_pml_t = self._w_pml_top
w_pml_b = self._w_pml_bottom
M = self._M
N = self._N
dx = self._dx
dy = self._dy
<DeepExtract>
bc = self._bc
if RANK != 0:
Ez = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Ez' == 'Ez':
if domain is not None:
Ez = self.Ez[j, k]
else:
Ez = np.copy(self.Ez)
elif 'Ez' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Ez = Hx0[j, k] / 2.0
else:
Ez = Hx0 / 2.0
elif 'Ez' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Ez = Hy0[j, k] / 2.0
else:
Ez = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Ez')
</DeepExtract>
<DeepExtract>
bc = self._bc
if RANK != 0:
Hx = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Hx' == 'Ez':
if domain is not None:
Hx = self.Ez[j, k]
else:
Hx = np.copy(self.Ez)
elif 'Hx' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Hx = Hx0[j, k] / 2.0
else:
Hx = Hx0 / 2.0
elif 'Hx' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Hx = Hy0[j, k] / 2.0
else:
Hx = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Hx')
</DeepExtract>
<DeepExtract>
bc = self._bc
if RANK != 0:
Hy = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Hy' == 'Ez':
if domain is not None:
Hy = self.Ez[j, k]
else:
Hy = np.copy(self.Ez)
elif 'Hy' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Hy = Hx0[j, k] / 2.0
else:
Hy = Hx0 / 2.0
elif 'Hy' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Hy = Hy0[j, k] / 2.0
else:
Hy = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Hy')
</DeepExtract>
if RANK != 0:
return MathDummy()
Sx = -0.5 * (Ez * np.conj(Hy)).real
Sy = 0.5 * (Ez * np.conj(Hx)).real
S1 = -Sy[w_pml_b, w_pml_l:N - 1 - w_pml_r]
S2 = Sy[M - 1 - w_pml_t, w_pml_l:N - 1 - w_pml_r]
S3 = -Sx[w_pml_b:M - 1 - w_pml_t, w_pml_l]
S4 = Sx[w_pml_b:M - 1 - w_pml_t, N - 1 - w_pml_r]
P_S = np.sum(S1 + S2) * dx + np.sum(S3 + S4) * dy
x_all = np.arange(w_pml_l, N - w_pml_r)
y_all = np.arange(w_pml_b, M - w_pml_t)
y_all = y_all.reshape(y_all.shape[0], 1).astype(np.int)
if not self.real_materials:
eps = self._eps.get_values(0, N, 0, M)
mu = self._mu.get_values(0, N, 0, M)
else:
eps = np.zeros(Ez.shape, dtype=np.complex128)
mu = np.zeros(Ez.shape, dtype=np.complex128)
Ez2 = Ez[y_all, x_all] * np.conj(Ez[y_all, x_all])
Hx2 = Hx[y_all, x_all] * np.conj(Hx[y_all, x_all])
Hy2 = Hy[y_all, x_all] * np.conj(Hy[y_all, x_all])
P_loss = 0.25 * dx * dy * np.sum(eps[y_all, x_all].imag * Ez2 + mu[y_all, x_all].imag * (Hx2 + Hy2))
return P_S + P_loss.real
|
def get_source_power(self):
"""Get the source power.
In general, FDFD_TE.source_power should be used to access the source
power instead. The source power is computed following every forward
solve, so calling this function explicitly is generally unnecessary.
Notes
-----
1. The source power is computed using the interpolated fields.
Returns
-------
float
Electromagnetic power generated by source.
"""
w_pml_l = self._w_pml_left
w_pml_r = self._w_pml_right
w_pml_t = self._w_pml_top
w_pml_b = self._w_pml_bottom
M = self._M
N = self._N
dx = self._dx
dy = self._dy
bc = self._bc
if RANK != 0:
Ez = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Ez' == 'Ez':
if domain is not None:
Ez = self.Ez[j, k]
else:
Ez = np.copy(self.Ez)
elif 'Ez' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Ez = Hx0[j, k] / 2.0
else:
Ez = Hx0 / 2.0
elif 'Ez' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Ez = Hy0[j, k] / 2.0
else:
Ez = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Ez')
bc = self._bc
if RANK != 0:
Hx = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Hx' == 'Ez':
if domain is not None:
Hx = self.Ez[j, k]
else:
Hx = np.copy(self.Ez)
elif 'Hx' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Hx = Hx0[j, k] / 2.0
else:
Hx = Hx0 / 2.0
elif 'Hx' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Hx = Hy0[j, k] / 2.0
else:
Hx = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Hx')
bc = self._bc
if RANK != 0:
Hy = MathDummy()
if domain is not None:
j = domain.j
k = domain.k
if 'Hy' == 'Ez':
if domain is not None:
Hy = self.Ez[j, k]
else:
Hy = np.copy(self.Ez)
elif 'Hy' == 'Hx':
Hx = np.pad(self.Hx, 1, 'constant', constant_values=0)
if bc[1] == 'E':
Hx[0, :] = -1 * Hx[1, :]
elif bc[1] == 'H':
Hx[0, :] = Hx[1, :]
elif bc[1] == 'P':
Hx[0, :] = Hx[-2, :]
Hx[-1, :] = Hx[1, :]
Hx0 = np.copy(Hx)
Hx0[1:, :] += Hx[0:-1, :]
Hx0 = Hx0[1:-1, 1:-1]
if domain is not None:
Hy = Hx0[j, k] / 2.0
else:
Hy = Hx0 / 2.0
elif 'Hy' == 'Hy':
Hy = np.pad(self.Hy, 1, 'constant', constant_values=0)
if bc[0] == 'E':
Hy[:, 0] = -1 * Hy[:, 1]
elif bc[0] == 'H':
Hy[:, 0] = Hy[:, 1]
elif bc[0] == 'P':
Hy[:, 0] = Hy[:, -2]
Hy[:, -1] = Hy[:, 1]
Hy0 = np.copy(Hy)
Hy0[:, 0:-1] += Hy[:, 1:]
Hy0 = Hy0[1:-1, 1:-1]
if domain is not None:
Hy = Hy0[j, k] / 2.0
else:
Hy = Hy0 / 2.0
else:
raise ValueError('Unrecongnized field componenet "%s". The allowedfield components are Ez, Hx, Hy.' % 'Hy')
if RANK != 0:
return MathDummy()
Sx = -0.5 * (Ez * np.conj(Hy)).real
Sy = 0.5 * (Ez * np.conj(Hx)).real
S1 = -Sy[w_pml_b, w_pml_l:N - 1 - w_pml_r]
S2 = Sy[M - 1 - w_pml_t, w_pml_l:N - 1 - w_pml_r]
S3 = -Sx[w_pml_b:M - 1 - w_pml_t, w_pml_l]
S4 = Sx[w_pml_b:M - 1 - w_pml_t, N - 1 - w_pml_r]
P_S = np.sum(S1 + S2) * dx + np.sum(S3 + S4) * dy
x_all = np.arange(w_pml_l, N - w_pml_r)
y_all = np.arange(w_pml_b, M - w_pml_t)
y_all = y_all.reshape(y_all.shape[0], 1).astype(np.int)
if not self.real_materials:
eps = self._eps.get_values(0, N, 0, M)
mu = self._mu.get_values(0, N, 0, M)
else:
eps = np.zeros(Ez.shape, dtype=np.complex128)
mu = np.zeros(Ez.shape, dtype=np.complex128)
Ez2 = Ez[y_all, x_all] * np.conj(Ez[y_all, x_all])
Hx2 = Hx[y_all, x_all] * np.conj(Hx[y_all, x_all])
Hy2 = Hy[y_all, x_all] * np.conj(Hy[y_all, x_all])
P_loss = 0.25 * dx * dy * np.sum(eps[y_all, x_all].imag * Ez2 + mu[y_all, x_all].imag * (Hx2 + Hy2))
return P_S + P_loss.real
|
emopt
|
positive
|
def test_abor(self):
<DeepExtract>
self.client_connect()
self.client.login(user='nobody', passwd='nobody')
</DeepExtract>
self.assertEqual(self.client.sendcmd('abor'), '225 No transfer to abort.')
|
def test_abor(self):
self.client_connect()
self.client.login(user='nobody', passwd='nobody')
self.assertEqual(self.client.sendcmd('abor'), '225 No transfer to abort.')
|
conpot
|
positive
|
def _replace_mapped_refs(refs: List):
for (i, ref) in enumerate(refs):
<DeepExtract>
if ref in self._geography_mapping:
refs[i] = self._geography_mapping[ref]
if ref in self._sectors_mapping:
refs[i] = self._sectors_mapping[ref]
refs[i] = ref
</DeepExtract>
return refs
|
def _replace_mapped_refs(refs: List):
for (i, ref) in enumerate(refs):
if ref in self._geography_mapping:
refs[i] = self._geography_mapping[ref]
if ref in self._sectors_mapping:
refs[i] = self._sectors_mapping[ref]
refs[i] = ref
return refs
|
connectors
|
positive
|
def info_container(image, container):
<DeepExtract>
docker = DOCKER
cmd = '{docker} inspect {name}'
(out, err, rc) = output3(cmd.format(**locals()))
if rc:
logg.info('%s : %s', cmd, err)
logg.debug('no address for %s', container)
addr = None
values = json.loads(out)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', container, values)
addr = values[0]['NetworkSettings']['IPAddress']
assert isinstance(addr, basestring)
logg.debug('address %s for %s', addr, container)
addr = addr
</DeepExtract>
return addr
|
def info_container(image, container):
docker = DOCKER
cmd = '{docker} inspect {name}'
(out, err, rc) = output3(cmd.format(**locals()))
if rc:
logg.info('%s : %s', cmd, err)
logg.debug('no address for %s', container)
addr = None
values = json.loads(out)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', container, values)
addr = values[0]['NetworkSettings']['IPAddress']
assert isinstance(addr, basestring)
logg.debug('address %s for %s', addr, container)
addr = addr
return addr
|
docker-systemctl-images
|
positive
|
def __init__(self, session):
printl('', self, 'S')
Screen.__init__(self, session)
HelpableScreen.__init__(self)
DPH_PlexScreen.__init__(self)
self.guiElements = getGuiElements()
self.cfglist = []
ConfigListScreen.__init__(self, self.cfglist, session, on_change=self._changed)
self._hasChanged = False
self['btn_greenText'] = Label()
self['btn_green'] = Pixmap()
self['help'] = StaticText()
self['setupActions'] = ActionMap(['SetupActions', 'ColorActions', 'DPS_Settings'], {'green': self.keySave, 'red': self.keyCancel, 'cancel': self.keyCancel, 'ok': self.ok, 'left': self.keyLeft, 'right': self.keyRight, 'bouquet_up': self.keyBouquetUp, 'bouquet_down': self.keyBouquetDown}, -2)
<DeepExtract>
printl('', self, 'S')
separator = ''.ljust(240, '_')
self.cfglist = []
self.cfglist.append(getConfigListEntry(_('General Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Boxname'), config.plugins.dreamplex.boxName, _('Enter the name of your box, e.g. Livingroom.')))
self.cfglist.append(getConfigListEntry(_('> Used Skin'), config.plugins.dreamplex.skin, _('If you change the skin you have to restart at least the GUI!')))
self.cfglist.append(getConfigListEntry(_('> Show Plugin in Main Menu'), config.plugins.dreamplex.showInMainMenu, _('Use this to start the plugin direct in the main menu.')))
self.cfglist.append(getConfigListEntry(_('> Use Cache for Sections'), config.plugins.dreamplex.useCache, _('Save plex server answers in cache to speed up a bit.')))
self.cfglist.append(getConfigListEntry(_('> Use Picture Cache'), config.plugins.dreamplex.usePicCache, _('Use this only if you do have enough space on your hdd drive or flash.')))
self.cfglist.append(getConfigListEntry(_('> Show Player Poster on external LCD'), config.plugins.dreamplex.lcd4linux, _('e.g. lcd4linux')))
if config.plugins.dreamplex.showUpdateFunction.value:
self.cfglist.append(getConfigListEntry(_('> Check for updates on startup'), config.plugins.dreamplex.checkForUpdateOnStartup, _('If activated on each start we will check if there is a new version depending on your update type.')))
self.cfglist.append(getConfigListEntry(_('> Updatetype'), config.plugins.dreamplex.updateType, _('Use Beta only if you really want to help with testing')))
self.cfglist.append(getConfigListEntry(_('Userinterface Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Summerize Servers'), config.plugins.dreamplex.summerizeServers, _('Summerize servers in an additional menu step. (myPlex only)')))
self.cfglist.append(getConfigListEntry(_('> Summerize Sections'), config.plugins.dreamplex.summerizeSections, _('Summerize sections in an additional menu step.')))
self.cfglist.append(getConfigListEntry(_('> Show Filter for Section'), config.plugins.dreamplex.showFilter, _('Show additional filter in an additional menu step e.g. OnDeck')))
self.cfglist.append(getConfigListEntry(_('> Show Seen/Unseen count in TvShows'), config.plugins.dreamplex.showUnSeenCounts, _('Calculate and show them for tv shows.')))
self.cfglist.append(getConfigListEntry(_('> Start with Filtermode'), config.plugins.dreamplex.startWithFilterMode, _('Start with filtermode in any media view.')))
self.cfglist.append(getConfigListEntry(_('> Exit function in Player'), config.plugins.dreamplex.exitFunction, _('Specifiy what the exit button in the player should do.')))
self.cfglist.append(getConfigListEntry(_('> Show Backdrops as Videos'), config.plugins.dreamplex.useBackdropVideos, _('Use this if you have m1v videos as backdrops')))
self.cfglist.append(getConfigListEntry(_('> Stop Live TV on startup'), config.plugins.dreamplex.stopLiveTvOnStartup, _("Stop live TV. Enables 'play themes', 'use backdrop videos'")))
if config.plugins.dreamplex.stopLiveTvOnStartup.value:
if config.plugins.dreamplex.useBackdropVideos.value:
config.plugins.dreamplex.playTheme.value = False
else:
self.cfglist.append(getConfigListEntry(_('>> Play Themes in TV Shows'), config.plugins.dreamplex.playTheme, _('Plays tv show themes automatically.')))
else:
config.plugins.dreamplex.playTheme.value = False
if config.plugins.dreamplex.useBackdropVideos.value:
config.plugins.dreamplex.fastScroll.value = False
config.plugins.dreamplex.liveTvInViews.value = False
else:
self.cfglist.append(getConfigListEntry(_('> Use fastScroll as default'), config.plugins.dreamplex.fastScroll, _('No update for addiontal informations in media views to speed up.')))
if not config.plugins.dreamplex.stopLiveTvOnStartup.value:
self.cfglist.append(getConfigListEntry(_('> Show liveTv in Views instead of backdrops'), config.plugins.dreamplex.liveTvInViews, _('Show live tv while you are navigating through your libs.')))
self.cfglist.append(getConfigListEntry(_('> Show additional data for myPlex sections'), config.plugins.dreamplex.showDetailsInList, _('If server summerize is off you can here add additional information for better overview.')))
if config.plugins.dreamplex.showDetailsInList.value:
self.cfglist.append(getConfigListEntry(_('> Detail type for additional data'), config.plugins.dreamplex.showDetailsInListDetailType, _('Specifiy the type of additional data.')))
self.cfglist.append(getConfigListEntry(_('Path Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Default View for Movies'), config.plugins.dreamplex.defaultMovieView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('> Default View for Shows'), config.plugins.dreamplex.defaultShowView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('> Default View for Music'), config.plugins.dreamplex.defaultMusicView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('Path Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.mediafolderpath = getConfigListEntry(_('> Media Folder Path'), config.plugins.dreamplex.mediafolderpath, _('/hdd/dreamplex/medias'))
self.cfglist.append(self.mediafolderpath)
self.configfolderpath = getConfigListEntry(_('> Config Folder Path'), config.plugins.dreamplex.configfolderpath, _('/hdd/dreamplex/config'))
self.cfglist.append(self.configfolderpath)
self.cachefolderpath = getConfigListEntry(_('> Cache Folder Path'), config.plugins.dreamplex.cachefolderpath, _('/hdd/dreamplex/cache'))
self.cfglist.append(self.cachefolderpath)
self.playerTempPath = getConfigListEntry(_('> Player Temp Path'), config.plugins.dreamplex.playerTempPath, _('/tmp'))
self.cfglist.append(self.playerTempPath)
self.logfolderpath = getConfigListEntry(_('> Log Folder Path'), config.plugins.dreamplex.logfolderpath, _('/tmp'))
self.cfglist.append(self.logfolderpath)
self.cfglist.append(getConfigListEntry(_('Remote Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Activate Remote Player'), config.plugins.dreamplex.remoteAgent, _('Activate to be able to use with any app with remote function for Plex.')))
if config.plugins.dreamplex.remoteAgent.value:
self.cfglist.append(getConfigListEntry(_('> Remote Player Port'), config.plugins.dreamplex.remotePort, _('Change the port to your needs.')))
self.cfglist.append(getConfigListEntry(_('Misc Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Debug Mode'), config.plugins.dreamplex.debugMode, _('Enable only if needed. Slows down rapidly.')))
if config.plugins.dreamplex.debugMode.value:
self.cfglist.append(getConfigListEntry(_('> Write debugfile'), config.plugins.dreamplex.writeDebugFile, _('Without this option we just print to console.')))
self['config'].list = self.cfglist
self['config'].l.setList(self.cfglist)
printl('', self, 'C')
</DeepExtract>
self['config'].onSelectionChanged.append(self.updateHelp)
self.onLayoutFinish.append(self.finishLayout)
printl('', self, 'C')
|
def __init__(self, session):
printl('', self, 'S')
Screen.__init__(self, session)
HelpableScreen.__init__(self)
DPH_PlexScreen.__init__(self)
self.guiElements = getGuiElements()
self.cfglist = []
ConfigListScreen.__init__(self, self.cfglist, session, on_change=self._changed)
self._hasChanged = False
self['btn_greenText'] = Label()
self['btn_green'] = Pixmap()
self['help'] = StaticText()
self['setupActions'] = ActionMap(['SetupActions', 'ColorActions', 'DPS_Settings'], {'green': self.keySave, 'red': self.keyCancel, 'cancel': self.keyCancel, 'ok': self.ok, 'left': self.keyLeft, 'right': self.keyRight, 'bouquet_up': self.keyBouquetUp, 'bouquet_down': self.keyBouquetDown}, -2)
printl('', self, 'S')
separator = ''.ljust(240, '_')
self.cfglist = []
self.cfglist.append(getConfigListEntry(_('General Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Boxname'), config.plugins.dreamplex.boxName, _('Enter the name of your box, e.g. Livingroom.')))
self.cfglist.append(getConfigListEntry(_('> Used Skin'), config.plugins.dreamplex.skin, _('If you change the skin you have to restart at least the GUI!')))
self.cfglist.append(getConfigListEntry(_('> Show Plugin in Main Menu'), config.plugins.dreamplex.showInMainMenu, _('Use this to start the plugin direct in the main menu.')))
self.cfglist.append(getConfigListEntry(_('> Use Cache for Sections'), config.plugins.dreamplex.useCache, _('Save plex server answers in cache to speed up a bit.')))
self.cfglist.append(getConfigListEntry(_('> Use Picture Cache'), config.plugins.dreamplex.usePicCache, _('Use this only if you do have enough space on your hdd drive or flash.')))
self.cfglist.append(getConfigListEntry(_('> Show Player Poster on external LCD'), config.plugins.dreamplex.lcd4linux, _('e.g. lcd4linux')))
if config.plugins.dreamplex.showUpdateFunction.value:
self.cfglist.append(getConfigListEntry(_('> Check for updates on startup'), config.plugins.dreamplex.checkForUpdateOnStartup, _('If activated on each start we will check if there is a new version depending on your update type.')))
self.cfglist.append(getConfigListEntry(_('> Updatetype'), config.plugins.dreamplex.updateType, _('Use Beta only if you really want to help with testing')))
self.cfglist.append(getConfigListEntry(_('Userinterface Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Summerize Servers'), config.plugins.dreamplex.summerizeServers, _('Summerize servers in an additional menu step. (myPlex only)')))
self.cfglist.append(getConfigListEntry(_('> Summerize Sections'), config.plugins.dreamplex.summerizeSections, _('Summerize sections in an additional menu step.')))
self.cfglist.append(getConfigListEntry(_('> Show Filter for Section'), config.plugins.dreamplex.showFilter, _('Show additional filter in an additional menu step e.g. OnDeck')))
self.cfglist.append(getConfigListEntry(_('> Show Seen/Unseen count in TvShows'), config.plugins.dreamplex.showUnSeenCounts, _('Calculate and show them for tv shows.')))
self.cfglist.append(getConfigListEntry(_('> Start with Filtermode'), config.plugins.dreamplex.startWithFilterMode, _('Start with filtermode in any media view.')))
self.cfglist.append(getConfigListEntry(_('> Exit function in Player'), config.plugins.dreamplex.exitFunction, _('Specifiy what the exit button in the player should do.')))
self.cfglist.append(getConfigListEntry(_('> Show Backdrops as Videos'), config.plugins.dreamplex.useBackdropVideos, _('Use this if you have m1v videos as backdrops')))
self.cfglist.append(getConfigListEntry(_('> Stop Live TV on startup'), config.plugins.dreamplex.stopLiveTvOnStartup, _("Stop live TV. Enables 'play themes', 'use backdrop videos'")))
if config.plugins.dreamplex.stopLiveTvOnStartup.value:
if config.plugins.dreamplex.useBackdropVideos.value:
config.plugins.dreamplex.playTheme.value = False
else:
self.cfglist.append(getConfigListEntry(_('>> Play Themes in TV Shows'), config.plugins.dreamplex.playTheme, _('Plays tv show themes automatically.')))
else:
config.plugins.dreamplex.playTheme.value = False
if config.plugins.dreamplex.useBackdropVideos.value:
config.plugins.dreamplex.fastScroll.value = False
config.plugins.dreamplex.liveTvInViews.value = False
else:
self.cfglist.append(getConfigListEntry(_('> Use fastScroll as default'), config.plugins.dreamplex.fastScroll, _('No update for addiontal informations in media views to speed up.')))
if not config.plugins.dreamplex.stopLiveTvOnStartup.value:
self.cfglist.append(getConfigListEntry(_('> Show liveTv in Views instead of backdrops'), config.plugins.dreamplex.liveTvInViews, _('Show live tv while you are navigating through your libs.')))
self.cfglist.append(getConfigListEntry(_('> Show additional data for myPlex sections'), config.plugins.dreamplex.showDetailsInList, _('If server summerize is off you can here add additional information for better overview.')))
if config.plugins.dreamplex.showDetailsInList.value:
self.cfglist.append(getConfigListEntry(_('> Detail type for additional data'), config.plugins.dreamplex.showDetailsInListDetailType, _('Specifiy the type of additional data.')))
self.cfglist.append(getConfigListEntry(_('Path Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Default View for Movies'), config.plugins.dreamplex.defaultMovieView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('> Default View for Shows'), config.plugins.dreamplex.defaultShowView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('> Default View for Music'), config.plugins.dreamplex.defaultMusicView, _('Specify what view type should start automatically.')))
self.cfglist.append(getConfigListEntry(_('Path Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.mediafolderpath = getConfigListEntry(_('> Media Folder Path'), config.plugins.dreamplex.mediafolderpath, _('/hdd/dreamplex/medias'))
self.cfglist.append(self.mediafolderpath)
self.configfolderpath = getConfigListEntry(_('> Config Folder Path'), config.plugins.dreamplex.configfolderpath, _('/hdd/dreamplex/config'))
self.cfglist.append(self.configfolderpath)
self.cachefolderpath = getConfigListEntry(_('> Cache Folder Path'), config.plugins.dreamplex.cachefolderpath, _('/hdd/dreamplex/cache'))
self.cfglist.append(self.cachefolderpath)
self.playerTempPath = getConfigListEntry(_('> Player Temp Path'), config.plugins.dreamplex.playerTempPath, _('/tmp'))
self.cfglist.append(self.playerTempPath)
self.logfolderpath = getConfigListEntry(_('> Log Folder Path'), config.plugins.dreamplex.logfolderpath, _('/tmp'))
self.cfglist.append(self.logfolderpath)
self.cfglist.append(getConfigListEntry(_('Remote Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Activate Remote Player'), config.plugins.dreamplex.remoteAgent, _('Activate to be able to use with any app with remote function for Plex.')))
if config.plugins.dreamplex.remoteAgent.value:
self.cfglist.append(getConfigListEntry(_('> Remote Player Port'), config.plugins.dreamplex.remotePort, _('Change the port to your needs.')))
self.cfglist.append(getConfigListEntry(_('Misc Settings ') + separator, config.plugins.dreamplex.about, _(' ')))
self.cfglist.append(getConfigListEntry(_('> Debug Mode'), config.plugins.dreamplex.debugMode, _('Enable only if needed. Slows down rapidly.')))
if config.plugins.dreamplex.debugMode.value:
self.cfglist.append(getConfigListEntry(_('> Write debugfile'), config.plugins.dreamplex.writeDebugFile, _('Without this option we just print to console.')))
self['config'].list = self.cfglist
self['config'].l.setList(self.cfglist)
printl('', self, 'C')
self['config'].onSelectionChanged.append(self.updateHelp)
self.onLayoutFinish.append(self.finishLayout)
printl('', self, 'C')
|
DreamPlex
|
positive
|
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for (_, group) in groups:
<DeepExtract>
complete = group[group.evrmarry == 1].agemarry
ongoing = group[group.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
(hf, sf) = (hf, sf)
</DeepExtract>
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for (i, hf) in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i - 1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
|
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for (_, group) in groups:
complete = group[group.evrmarry == 1].agemarry
ongoing = group[group.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
(hf, sf) = (hf, sf)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for (i, hf) in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i - 1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
|
bayesianGameofThrones
|
positive
|
def test_error_formatter_is_called_with_debug_disabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=False, error_formatter=error_formatter)
<DeepExtract>
client = TestClient(app)
client.post('/', json={'query': '{ error }'})
</DeepExtract>
error_formatter.assert_called_once_with(ANY, False)
|
def test_error_formatter_is_called_with_debug_disabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=False, error_formatter=error_formatter)
client = TestClient(app)
client.post('/', json={'query': '{ error }'})
error_formatter.assert_called_once_with(ANY, False)
|
ariadne
|
positive
|
def __call__(self, read, info: ModificationInfo):
<DeepExtract>
matches = []
if self.action == 'lowercase':
read.sequence = read.sequence.upper()
trimmed_read = read
for _ in range(self.times):
match = self.adapters.match_to(trimmed_read.sequence)
if match is None:
break
matches.append(match)
trimmed_read = match.trimmed(trimmed_read)
if not matches:
(trimmed_read, matches) = (trimmed_read, [])
if self.action == 'trim':
pass
elif self.action == 'retain':
trimmed_read = self.trim_but_retain_adapter(read, matches)
elif self.action == 'mask':
trimmed_read = self.masked_read(read, matches)
elif self.action == 'lowercase':
trimmed_read = self.lowercased_read(read, matches)
assert len(trimmed_read.sequence) == len(read)
elif self.action is None:
trimmed_read = read[:]
(trimmed_read, matches) = (trimmed_read, matches)
</DeepExtract>
if matches:
self.with_adapters += 1
for match in matches:
self.adapter_statistics[match.adapter].add_match(match)
info.matches.extend(matches)
return trimmed_read
|
def __call__(self, read, info: ModificationInfo):
matches = []
if self.action == 'lowercase':
read.sequence = read.sequence.upper()
trimmed_read = read
for _ in range(self.times):
match = self.adapters.match_to(trimmed_read.sequence)
if match is None:
break
matches.append(match)
trimmed_read = match.trimmed(trimmed_read)
if not matches:
(trimmed_read, matches) = (trimmed_read, [])
if self.action == 'trim':
pass
elif self.action == 'retain':
trimmed_read = self.trim_but_retain_adapter(read, matches)
elif self.action == 'mask':
trimmed_read = self.masked_read(read, matches)
elif self.action == 'lowercase':
trimmed_read = self.lowercased_read(read, matches)
assert len(trimmed_read.sequence) == len(read)
elif self.action is None:
trimmed_read = read[:]
(trimmed_read, matches) = (trimmed_read, matches)
if matches:
self.with_adapters += 1
for match in matches:
self.adapter_statistics[match.adapter].add_match(match)
info.matches.extend(matches)
return trimmed_read
|
cutadapt
|
positive
|
def post(self, *args, **kwargs):
widget = self.object
widget.pk = None
widget.save(created=False)
for dimension in self.model.objects.get(id=self.kwargs['id']).dimensions:
dimension.pk = None
dimension.widget_id = widget.id
dimension.save()
messages.success(self.request, _('Widget was successfully cloned.'))
<DeepExtract>
if self.request.META.get('HTTP_REFERER') != self.request.build_absolute_uri():
success_url = self.request.META.get('HTTP_REFERER')
try:
success_url = self.object.parent.get_absolute_url()
except:
pass
else:
success_url = success_url
success_url = super(WidgetActionMixin, self).get_success_url()
</DeepExtract>
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
return response
|
def post(self, *args, **kwargs):
widget = self.object
widget.pk = None
widget.save(created=False)
for dimension in self.model.objects.get(id=self.kwargs['id']).dimensions:
dimension.pk = None
dimension.widget_id = widget.id
dimension.save()
messages.success(self.request, _('Widget was successfully cloned.'))
if self.request.META.get('HTTP_REFERER') != self.request.build_absolute_uri():
success_url = self.request.META.get('HTTP_REFERER')
try:
success_url = self.object.parent.get_absolute_url()
except:
pass
else:
success_url = success_url
success_url = super(WidgetActionMixin, self).get_success_url()
response = HttpResponseRedirect(success_url)
response['X-Horizon-Location'] = success_url
return response
|
django-leonardo
|
positive
|
@patch('azure_functions_worker.extension.logger.info')
def test_info_discover_extension_list_func_ext(self, info_mock: Mock):
sdk = get_sdk_from_sys_path()
<DeepExtract>
class NewFuncExtension(sdk.FuncExtensionBase):
def __init__(self):
self._trigger_name = self._mock_func_name
self._post_function_load_executed = False
self._pre_invocation_executed = False
self._post_invocation_executed = False
self._pre_invocation_executed_fargs = {}
self._post_invocation_executed_fargs = {}
self._post_invocation_executed_fret = None
def post_function_load(self, function_name, function_directory, *args, **kwargs):
self._post_function_load_executed = True
def pre_invocation(self, logger, context, fargs, *args, **kwargs):
self._pre_invocation_executed = True
self._pre_invocation_executed_fargs = fargs
def post_invocation(self, logger, context, fargs, fret, *args, **kwargs):
self._post_invocation_executed = True
self._post_invocation_executed_fargs = fargs
self._post_invocation_executed_fret = fret
FuncExtClass = NewFuncExtension
</DeepExtract>
FuncExtClass()
self._instance._info_discover_extension_list(self._mock_func_name, sdk)
info_mock.assert_called_once_with('Python Worker Extension Manager is loading %s, current registered extensions: %s', 'HttpTrigger', '{"FuncExtension": {"HttpTrigger": ["NewFuncExtension"]}}')
|
@patch('azure_functions_worker.extension.logger.info')
def test_info_discover_extension_list_func_ext(self, info_mock: Mock):
sdk = get_sdk_from_sys_path()
class NewFuncExtension(sdk.FuncExtensionBase):
def __init__(self):
self._trigger_name = self._mock_func_name
self._post_function_load_executed = False
self._pre_invocation_executed = False
self._post_invocation_executed = False
self._pre_invocation_executed_fargs = {}
self._post_invocation_executed_fargs = {}
self._post_invocation_executed_fret = None
def post_function_load(self, function_name, function_directory, *args, **kwargs):
self._post_function_load_executed = True
def pre_invocation(self, logger, context, fargs, *args, **kwargs):
self._pre_invocation_executed = True
self._pre_invocation_executed_fargs = fargs
def post_invocation(self, logger, context, fargs, fret, *args, **kwargs):
self._post_invocation_executed = True
self._post_invocation_executed_fargs = fargs
self._post_invocation_executed_fret = fret
FuncExtClass = NewFuncExtension
FuncExtClass()
self._instance._info_discover_extension_list(self._mock_func_name, sdk)
info_mock.assert_called_once_with('Python Worker Extension Manager is loading %s, current registered extensions: %s', 'HttpTrigger', '{"FuncExtension": {"HttpTrigger": ["NewFuncExtension"]}}')
|
azure-functions-python-worker
|
positive
|
@property
def roidb(self):
if self._roidb is not None:
return self._roidb
<DeepExtract>
self._roidb = self._roidb_handler
</DeepExtract>
return self._roidb
|
@property
def roidb(self):
if self._roidb is not None:
return self._roidb
self._roidb = self._roidb_handler
return self._roidb
|
CenterFace.pytorch
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.