before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def _RabinMillerTest(number, rounds):
"""Probabilistic algorithm to identify primality of given number."""
if number < 3 or number & 1 == 0:
return number == 2
(s, d) = (0, number - 1)
while d & 1 == 0:
(s, d) = (s + 1, d >> 1)
for _ in xrange(min(rounds, number - 2)):
<DeepExtract>
if 2 >= number - 1:
raise ValueError('upper_limit should be greater than lower_limit')
width = number - 1 - 2
range_bit_length = 0
while width != 0:
range_bit_length += 1
width >>= 1
result = 2 + GetRandomNumber(range_bit_length)
while result >= number - 1:
result = 2 + GetRandomNumber(range_bit_length)
a = result
</DeepExtract>
x = pow(a, d, number)
if x != 1 and x + 1 != number:
for _ in xrange(1, s):
x = pow(x, 2, number)
if x == 1:
return False
elif x == number - 1:
a = 0
break
if a:
return False
return True
|
def _RabinMillerTest(number, rounds):
"""Probabilistic algorithm to identify primality of given number."""
if number < 3 or number & 1 == 0:
return number == 2
(s, d) = (0, number - 1)
while d & 1 == 0:
(s, d) = (s + 1, d >> 1)
for _ in xrange(min(rounds, number - 2)):
if 2 >= number - 1:
raise ValueError('upper_limit should be greater than lower_limit')
width = number - 1 - 2
range_bit_length = 0
while width != 0:
range_bit_length += 1
width >>= 1
result = 2 + GetRandomNumber(range_bit_length)
while result >= number - 1:
result = 2 + GetRandomNumber(range_bit_length)
a = result
x = pow(a, d, number)
if x != 1 and x + 1 != number:
for _ in xrange(1, s):
x = pow(x, 2, number)
if x == 1:
return False
elif x == number - 1:
a = 0
break
if a:
return False
return True
|
encrypted-bigquery-client
|
positive
|
def parse_address_list(self, s):
result = set()
for i in s.split(','):
if '-' in i:
<DeepExtract>
if not '-' in i:
raise CmdSyntaxError('Expected two dash-separated addresses')
(start, finish) = (self.parse_address(i) for i in i.split('-', 1))
(start, finish) = (start, finish)
</DeepExtract>
result |= {IPv4Address(a) for a in range(start, finish + 1)}
else:
result.add(self.parse_address(i))
return result
|
def parse_address_list(self, s):
result = set()
for i in s.split(','):
if '-' in i:
if not '-' in i:
raise CmdSyntaxError('Expected two dash-separated addresses')
(start, finish) = (self.parse_address(i) for i in i.split('-', 1))
(start, finish) = (start, finish)
result |= {IPv4Address(a) for a in range(start, finish + 1)}
else:
result.add(self.parse_address(i))
return result
|
compoundpi
|
positive
|
def _non_dist_train(model, dataset, cfg, validate=False):
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [build_dataloader(ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) for ds in dataset]
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
<DeepExtract>
if hasattr(model, 'module'):
model = model.module
cfg.optimizer = cfg.optimizer.copy()
paramwise_options = cfg.optimizer.pop('paramwise_options', None)
if paramwise_options is None:
optimizer = obj_from_dict(cfg.optimizer, torch.optim, dict(params=filter(lambda p: p.requires_grad, model.parameters())))
else:
assert isinstance(paramwise_options, dict)
base_lr = cfg.optimizer['lr']
base_wd = cfg.optimizer.get('weight_decay', None)
if 'bias_decay_mult' in paramwise_options or 'norm_decay_mult' in paramwise_options:
assert base_wd is not None
bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.0)
bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.0)
norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.0)
params = []
for (name, param) in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
if re.search('(bn|gn)(\\d+)?.(weight|bias)', name):
if base_wd is not None:
param_group['weight_decay'] = base_wd * norm_decay_mult
elif name.endswith('.bias'):
param_group['lr'] = base_lr * bias_lr_mult
if base_wd is not None:
param_group['weight_decay'] = base_wd * bias_decay_mult
params.append(param_group)
optimizer_cls = getattr(torch.optim, cfg.optimizer.pop('type'))
optimizer = optimizer_cls(params, **cfg.optimizer)
</DeepExtract>
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level)
import logging
runner.logger.setLevel(logging.INFO)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=False)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
def _non_dist_train(model, dataset, cfg, validate=False):
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [build_dataloader(ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, cfg.gpus, dist=False) for ds in dataset]
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
if hasattr(model, 'module'):
model = model.module
cfg.optimizer = cfg.optimizer.copy()
paramwise_options = cfg.optimizer.pop('paramwise_options', None)
if paramwise_options is None:
optimizer = obj_from_dict(cfg.optimizer, torch.optim, dict(params=filter(lambda p: p.requires_grad, model.parameters())))
else:
assert isinstance(paramwise_options, dict)
base_lr = cfg.optimizer['lr']
base_wd = cfg.optimizer.get('weight_decay', None)
if 'bias_decay_mult' in paramwise_options or 'norm_decay_mult' in paramwise_options:
assert base_wd is not None
bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.0)
bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.0)
norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.0)
params = []
for (name, param) in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
if re.search('(bn|gn)(\\d+)?.(weight|bias)', name):
if base_wd is not None:
param_group['weight_decay'] = base_wd * norm_decay_mult
elif name.endswith('.bias'):
param_group['lr'] = base_lr * bias_lr_mult
if base_wd is not None:
param_group['weight_decay'] = base_wd * bias_decay_mult
params.append(param_group)
optimizer_cls = getattr(torch.optim, cfg.optimizer.pop('type'))
optimizer = optimizer_cls(params, **cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level)
import logging
runner.logger.setLevel(logging.INFO)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=False)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
BalancedGroupSoftmax
|
positive
|
def post_delete_handler(self):
"""
When the model has been deleted, decrement the actual tag
"""
<DeepExtract>
check_value = self.field.attname in self.instance.__dict__
if check_value:
try:
value = self.descriptor.descriptor.__get__(self.instance)
except (self.tag_model.DoesNotExist, self.instance.DoesNotExist):
self.changed = True
if self.tag_cache:
self.tag_cache.pk = None
old_tag = self.tag_cache
self.flush_cache()
old_tag = value
old_tag = None
</DeepExtract>
if not old_tag:
return
try:
old_tag.decrement()
except type(old_tag).DoesNotExist:
pass
<DeepExtract>
self.descriptor.descriptor.__set__(self.instance, None)
self.flush_cache()
</DeepExtract>
if not self.changed:
self.tag_name = old_tag.name
self.tag_cache = None
self.changed = True
|
def post_delete_handler(self):
"""
When the model has been deleted, decrement the actual tag
"""
check_value = self.field.attname in self.instance.__dict__
if check_value:
try:
value = self.descriptor.descriptor.__get__(self.instance)
except (self.tag_model.DoesNotExist, self.instance.DoesNotExist):
self.changed = True
if self.tag_cache:
self.tag_cache.pk = None
old_tag = self.tag_cache
self.flush_cache()
old_tag = value
old_tag = None
if not old_tag:
return
try:
old_tag.decrement()
except type(old_tag).DoesNotExist:
pass
self.descriptor.descriptor.__set__(self.instance, None)
self.flush_cache()
if not self.changed:
self.tag_name = old_tag.name
self.tag_cache = None
self.changed = True
|
django-tagulous
|
positive
|
def MenuItem(self, path):
"""Return the menu item specifed by path
Path can be a string in the form "MenuItem->MenuItem->MenuItem..."
where each MenuItem is the text of an item at that level of the menu.
E.g. ::
File->Export->ExportAsPNG
spaces are not important so you could also have written... ::
File -> Export -> Export As PNG
"""
if self.appdata is not None:
menu_appdata = self.appdata['MenuItems']
else:
menu_appdata = None
<DeepExtract>
menu_hwnd = self._menu_handle()
if menu_hwnd:
menu = Menu(self, menu_hwnd)
menu = None
</DeepExtract>
if menu:
return self.Menu().GetMenuPath(path, appdata=menu_appdata)[-1]
raise RuntimeError('There is no menu.')
|
def MenuItem(self, path):
"""Return the menu item specifed by path
Path can be a string in the form "MenuItem->MenuItem->MenuItem..."
where each MenuItem is the text of an item at that level of the menu.
E.g. ::
File->Export->ExportAsPNG
spaces are not important so you could also have written... ::
File -> Export -> Export As PNG
"""
if self.appdata is not None:
menu_appdata = self.appdata['MenuItems']
else:
menu_appdata = None
menu_hwnd = self._menu_handle()
if menu_hwnd:
menu = Menu(self, menu_hwnd)
menu = None
if menu:
return self.Menu().GetMenuPath(path, appdata=menu_appdata)[-1]
raise RuntimeError('There is no menu.')
|
BrowserRefresh-Sublime
|
positive
|
def test_users_groups_after_removal(self):
<DeepExtract>
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group2, self.obj1)
assign_perm('delete_contenttype', self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('pk', flat=True)
self.assertEqual(set(result), {u.pk for u in (self.user1, self.user2)})
</DeepExtract>
remove_perm('change_contenttype', self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('pk', flat=True)
self.assertEqual(set(result), {self.user2.pk})
|
def test_users_groups_after_removal(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group2, self.obj1)
assign_perm('delete_contenttype', self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('pk', flat=True)
self.assertEqual(set(result), {u.pk for u in (self.user1, self.user2)})
remove_perm('change_contenttype', self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('pk', flat=True)
self.assertEqual(set(result), {self.user2.pk})
|
django-guardian
|
positive
|
def getOrganizationNetworks(apiKey, organizationId, query=None):
url = '/organizations/' + str(organizationId) + '/networks'
<DeepExtract>
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not query is None:
qArrayFix = {}
for item in query:
if isinstance(query[item], list):
qArrayFix['%s[]' % item] = query[item]
else:
qArrayFix[item] = query[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + url + query
verb = 'get'.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, query, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
</DeepExtract>
return (success, errors, response)
|
def getOrganizationNetworks(apiKey, organizationId, query=None):
url = '/organizations/' + str(organizationId) + '/networks'
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not query is None:
qArrayFix = {}
for item in query:
if isinstance(query[item], list):
qArrayFix['%s[]' % item] = query[item]
else:
qArrayFix[item] = query[item]
query = '?' + urlencode(qArrayFix, True)
url = API_BASE_URL + url + query
verb = 'get'.upper()
session = NoRebuildAuthSession()
verbs = {'DELETE': {'function': session.delete, 'hasBody': False}, 'GET': {'function': session.get, 'hasBody': False}, 'POST': {'function': session.post, 'hasBody': True}, 'PUT': {'function': session.put, 'hasBody': True}}
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb in verbs:
if verbs[verb]['hasBody'] and (not p_requestBody is None):
r = verbs[verb]['function'](url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
r = verbs[verb]['function'](url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
retryInterval = API_RETRY_DEFAULT_WAIT
if 'Retry-After' in r.headers:
retryInterval = r.headers['Retry-After']
if 'retry-after' in r.headers:
retryInterval = r.headers['retry-after']
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, retryInterval))
time.sleep(int(retryInterval))
(success, errors, responseHeaders, responseBody) = merakiRequest(apiKey, 'get', url, p_additionalHeaders, query, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(apiKey, 'get', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
return (success, errors, response)
|
automation-scripts
|
positive
|
def accuracy(inpt, output, target, batch_size, nprint):
"""Calculate output accuracy given target."""
assert nprint < batch_size + 1
def task_print(inp, output, target):
stop_bound = 0
print_len = 0
while print_len < len(target) and target[print_len] > stop_bound:
print_len += 1
<DeepExtract>
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' i: ' + ' '.join([str(i - 1) for i in inp if i > 0]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' i: ' + ' '.join([str(i - 1) for i in inp if i > 0]) + ('\n' if newline else ''))
sys.stdout.flush()
</DeepExtract>
<DeepExtract>
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' o: ' + ' '.join([str(output[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' o: ' + ' '.join([str(output[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
sys.stdout.flush()
</DeepExtract>
<DeepExtract>
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' t: ' + ' '.join([str(target[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' t: ' + ' '.join([str(target[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
sys.stdout.flush()
</DeepExtract>
decoded_target = target
<DeepExtract>
decoded_output = [np.argmax(o, axis=1) for o in output]
</DeepExtract>
total = 0
errors = 0
seq = [0 for b in xrange(batch_size)]
for l in xrange(len(decoded_output)):
for b in xrange(batch_size):
if decoded_target[l][b] > 0:
total += 1
if decoded_output[l][b] != decoded_target[l][b]:
seq[b] = 1
errors += 1
e = 0
for _ in xrange(min(nprint, sum(seq))):
while seq[e] == 0:
e += 1
<DeepExtract>
stop_bound = 0
print_len = 0
while print_len < len([decoded_target[l][e] for l in xrange(len(decoded_target))]) and [decoded_target[l][e] for l in xrange(len(decoded_target))][print_len] > stop_bound:
print_len += 1
print_out(' i: ' + ' '.join([str(i - 1) for i in [inpt[l][e] for l in xrange(len(inpt))] if i > 0]))
print_out(' o: ' + ' '.join([str([decoded_output[l][e] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
print_out(' t: ' + ' '.join([str([decoded_target[l][e] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
</DeepExtract>
e += 1
for b in xrange(nprint - errors):
<DeepExtract>
stop_bound = 0
print_len = 0
while print_len < len([decoded_target[l][b] for l in xrange(len(decoded_target))]) and [decoded_target[l][b] for l in xrange(len(decoded_target))][print_len] > stop_bound:
print_len += 1
print_out(' i: ' + ' '.join([str(i - 1) for i in [inpt[l][b] for l in xrange(len(inpt))] if i > 0]))
print_out(' o: ' + ' '.join([str([decoded_output[l][b] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
print_out(' t: ' + ' '.join([str([decoded_target[l][b] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
</DeepExtract>
return (errors, total, sum(seq))
|
def accuracy(inpt, output, target, batch_size, nprint):
"""Calculate output accuracy given target."""
assert nprint < batch_size + 1
def task_print(inp, output, target):
stop_bound = 0
print_len = 0
while print_len < len(target) and target[print_len] > stop_bound:
print_len += 1
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' i: ' + ' '.join([str(i - 1) for i in inp if i > 0]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' i: ' + ' '.join([str(i - 1) for i in inp if i > 0]) + ('\n' if newline else ''))
sys.stdout.flush()
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' o: ' + ' '.join([str(output[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' o: ' + ' '.join([str(output[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
sys.stdout.flush()
if log_filename:
try:
with gfile.GFile(log_filename, mode='a') as f:
f.write(' t: ' + ' '.join([str(target[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
except:
sys.stdout.write('Error appending to %s\n' % log_filename)
sys.stdout.write(' t: ' + ' '.join([str(target[l] - 1) for l in xrange(print_len)]) + ('\n' if newline else ''))
sys.stdout.flush()
decoded_target = target
decoded_output = [np.argmax(o, axis=1) for o in output]
total = 0
errors = 0
seq = [0 for b in xrange(batch_size)]
for l in xrange(len(decoded_output)):
for b in xrange(batch_size):
if decoded_target[l][b] > 0:
total += 1
if decoded_output[l][b] != decoded_target[l][b]:
seq[b] = 1
errors += 1
e = 0
for _ in xrange(min(nprint, sum(seq))):
while seq[e] == 0:
e += 1
stop_bound = 0
print_len = 0
while print_len < len([decoded_target[l][e] for l in xrange(len(decoded_target))]) and [decoded_target[l][e] for l in xrange(len(decoded_target))][print_len] > stop_bound:
print_len += 1
print_out(' i: ' + ' '.join([str(i - 1) for i in [inpt[l][e] for l in xrange(len(inpt))] if i > 0]))
print_out(' o: ' + ' '.join([str([decoded_output[l][e] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
print_out(' t: ' + ' '.join([str([decoded_target[l][e] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
e += 1
for b in xrange(nprint - errors):
stop_bound = 0
print_len = 0
while print_len < len([decoded_target[l][b] for l in xrange(len(decoded_target))]) and [decoded_target[l][b] for l in xrange(len(decoded_target))][print_len] > stop_bound:
print_len += 1
print_out(' i: ' + ' '.join([str(i - 1) for i in [inpt[l][b] for l in xrange(len(inpt))] if i > 0]))
print_out(' o: ' + ' '.join([str([decoded_output[l][b] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
print_out(' t: ' + ' '.join([str([decoded_target[l][b] for l in xrange(len(decoded_target))][l] - 1) for l in xrange(print_len)]))
return (errors, total, sum(seq))
|
AI_Reader
|
positive
|
def get_personal_words(self, info: dict):
"""
Find all personal words in a given node (info dict)
Return all the spans that are classified as personal words.
<defaultdict>: {'default': [list of personal (span, details, words)], 'possessive': [list of personal (span, details, words)]}
"""
<DeepExtract>
dependency = info['dependencies']
skeleton_dep = info['skeleton_dependencies']
(subj, objs) = self._get_subj_obj_from_dep(skeleton_dep)
(compounds, modifiers, cases) = self._get_data_from_dep(dependency)
if subj is None:
processed_info = None
returns = defaultdict(list)
(span, details, words) = self._restore_span(subj, info['words'], compounds, modifiers)
returns['subj'].append((span, details, words))
for obj in objs:
(span, details, words) = self._restore_span(obj, info['words'], compounds, modifiers)
returns['objs'].append((span, details, words))
if 'nmod:poss' in modifiers:
for nmod_poss_list in modifiers['nmod:poss'].values():
for poss in nmod_poss_list:
(span, details, words) = self._restore_span(poss, info['words'], compounds, modifiers, cases)
returns['possessive'].append((span, details, words))
for nmod in modifiers:
if nmod != 'nmod:poss':
for nmod_list in modifiers[nmod].values():
for modifier in nmod_list:
(span, details, words) = self._restore_span(modifier, info['words'], compounds, modifiers)
returns['nmod'].append((span, details, words))
processed_info = returns
</DeepExtract>
if processed_info is None:
return {}
(subj, obj_list, poss_list, nmod_list) = (processed_info.get('subj', []), processed_info.get('objs', []), processed_info.get('possessive', []), processed_info.get('nmod', []))
all_personal_spans = []
def _proc_item(item, is_subj=False, label='default'):
item_index = item[1]['target'][0]
<DeepExtract>
root = (item_index, info['words'][item_index], info['pos_tags'][item_index])
ners = info['ners']
mentions = info['mentions']
personal_spans = []
for span in mentions:
if mentions[span]['ner'] == 'PERSON':
personal_spans.append(span)
pos_tag = 'PRP$' if root[1] in self.POSSESSIVE_PRP else root[2]
true_returns = {'is_person': True, 'word': root[1], 'pos_tag': pos_tag, 'index': item_index}
false_returns = {'is_person': False, 'word': root[1], 'pos_tag': pos_tag, 'index': item_index}
if root[2] in {'PRP', 'PRP$'} and root[1] != 'it':
personal_info = true_returns
if root[2] in {'WP', 'WP$'} and root[1] in self.WP_WORDS:
personal_info = true_returns
if root[2] in {'NN', 'NNS', 'NNP', 'NNPS', 'PRP', 'PRP$', 'FW', 'LS'}:
if root[1] in self.INDEFINITE_WORDS:
personal_info = true_returns
if root[1] in self.INDEFINITE_WORDS_SP and is_subj:
personal_info = true_returns
if ners and ners[item_index] == 'PERSON' or (personal_spans and any([l <= item_index and item_index < r for (l, r) in personal_spans])):
personal_info = true_returns
personal_info = false_returns
</DeepExtract>
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
for item in subj:
<DeepExtract>
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=True)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
</DeepExtract>
for item in obj_list:
<DeepExtract>
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
</DeepExtract>
for item in poss_list:
<DeepExtract>
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
'possessive' = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, 'possessive'))
</DeepExtract>
for item in nmod_list:
<DeepExtract>
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
</DeepExtract>
return all_personal_spans
|
def get_personal_words(self, info: dict):
"""
Find all personal words in a given node (info dict)
Return all the spans that are classified as personal words.
<defaultdict>: {'default': [list of personal (span, details, words)], 'possessive': [list of personal (span, details, words)]}
"""
dependency = info['dependencies']
skeleton_dep = info['skeleton_dependencies']
(subj, objs) = self._get_subj_obj_from_dep(skeleton_dep)
(compounds, modifiers, cases) = self._get_data_from_dep(dependency)
if subj is None:
processed_info = None
returns = defaultdict(list)
(span, details, words) = self._restore_span(subj, info['words'], compounds, modifiers)
returns['subj'].append((span, details, words))
for obj in objs:
(span, details, words) = self._restore_span(obj, info['words'], compounds, modifiers)
returns['objs'].append((span, details, words))
if 'nmod:poss' in modifiers:
for nmod_poss_list in modifiers['nmod:poss'].values():
for poss in nmod_poss_list:
(span, details, words) = self._restore_span(poss, info['words'], compounds, modifiers, cases)
returns['possessive'].append((span, details, words))
for nmod in modifiers:
if nmod != 'nmod:poss':
for nmod_list in modifiers[nmod].values():
for modifier in nmod_list:
(span, details, words) = self._restore_span(modifier, info['words'], compounds, modifiers)
returns['nmod'].append((span, details, words))
processed_info = returns
if processed_info is None:
return {}
(subj, obj_list, poss_list, nmod_list) = (processed_info.get('subj', []), processed_info.get('objs', []), processed_info.get('possessive', []), processed_info.get('nmod', []))
all_personal_spans = []
def _proc_item(item, is_subj=False, label='default'):
item_index = item[1]['target'][0]
root = (item_index, info['words'][item_index], info['pos_tags'][item_index])
ners = info['ners']
mentions = info['mentions']
personal_spans = []
for span in mentions:
if mentions[span]['ner'] == 'PERSON':
personal_spans.append(span)
pos_tag = 'PRP$' if root[1] in self.POSSESSIVE_PRP else root[2]
true_returns = {'is_person': True, 'word': root[1], 'pos_tag': pos_tag, 'index': item_index}
false_returns = {'is_person': False, 'word': root[1], 'pos_tag': pos_tag, 'index': item_index}
if root[2] in {'PRP', 'PRP$'} and root[1] != 'it':
personal_info = true_returns
if root[2] in {'WP', 'WP$'} and root[1] in self.WP_WORDS:
personal_info = true_returns
if root[2] in {'NN', 'NNS', 'NNP', 'NNPS', 'PRP', 'PRP$', 'FW', 'LS'}:
if root[1] in self.INDEFINITE_WORDS:
personal_info = true_returns
if root[1] in self.INDEFINITE_WORDS_SP and is_subj:
personal_info = true_returns
if ners and ners[item_index] == 'PERSON' or (personal_spans and any([l <= item_index and item_index < r for (l, r) in personal_spans])):
personal_info = true_returns
personal_info = false_returns
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
for item in subj:
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=True)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
for item in obj_list:
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
for item in poss_list:
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
'possessive' = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, 'possessive'))
for item in nmod_list:
item_index = item[1]['target'][0]
personal_info = self.classify_personal(item_index, info, is_subj=is_subj)
if personal_info['pos_tag'] in {'PRP$', 'WP$'}:
label = 'possessive'
if personal_info['is_person']:
all_personal_spans.append((item, label))
return all_personal_spans
|
ASER
|
positive
|
def f():
img = window.label.img.resized(dlg.dims['w'], dlg.dims['h'], keepAspectRatio=dlg.dims['kr'])
img.filename = 'unnamed'
<DeepExtract>
if img is None:
return
icc.configure(colorSpace=img.colorSpace, workingProfile=img.cmsProfile)
window.label.img.savedBtnValues = window.btnValues.copy()
d = img.savedBtnValues
if d:
window.btnValues = d.copy()
else:
for k in window.btnValues:
window.btnValues[k] = False
window.btnValues['pointer'] = True
window.label.img = img
ind = window.tabBar.currentIndex()
if window.tabBar.tabData(ind) is not img:
window.tabBar.setTabText(ind, basename(img.filename))
window.tabBar.setTabData(ind, img)
window.cropTool.fit(img)
window.cropTool.setCropTool(img)
for btn in window.btns.values():
s = btn.autoExclusive()
btn.setAutoExclusive(False)
btn.setChecked(window.btnValues[btn.accessibleName()])
btn.setAutoExclusive(s)
window.histView.targetImage = window.label.img
def f(hist=True):
window.label.repaint()
window.label_3.repaint()
if not hist:
return
showHistogram()
def g():
layer = window.label.img.getActiveLayer()
if layer.isDrawLayer():
if layer.brushDict is None:
window.label.brushUpdate()
layer.brushDict = window.label.State['brush']
restoreBrush(layer)
window.label.img.onImageChanged = f
window.label.img.onActiveLayerChanged = g
f()
g()
window.label_2.img = imImage(QImg=img, meta=img.meta)
if window.viewState != 'After':
window.viewState = 'After'
window.splitter.hide()
window.label.show()
window.label_3.img = img
window.label_2.img.isMouseSelectable = False
window.tableView.setLayers(window.label.img)
tool = window.label.img.getActiveLayer().tool
if tool is not None:
tool.showTool()
updateCurrentViews()
window.label_3.update()
updateStatus()
gc.collect()
</DeepExtract>
img.layersStack[0].applyToStack()
|
def f():
img = window.label.img.resized(dlg.dims['w'], dlg.dims['h'], keepAspectRatio=dlg.dims['kr'])
img.filename = 'unnamed'
if img is None:
return
icc.configure(colorSpace=img.colorSpace, workingProfile=img.cmsProfile)
window.label.img.savedBtnValues = window.btnValues.copy()
d = img.savedBtnValues
if d:
window.btnValues = d.copy()
else:
for k in window.btnValues:
window.btnValues[k] = False
window.btnValues['pointer'] = True
window.label.img = img
ind = window.tabBar.currentIndex()
if window.tabBar.tabData(ind) is not img:
window.tabBar.setTabText(ind, basename(img.filename))
window.tabBar.setTabData(ind, img)
window.cropTool.fit(img)
window.cropTool.setCropTool(img)
for btn in window.btns.values():
s = btn.autoExclusive()
btn.setAutoExclusive(False)
btn.setChecked(window.btnValues[btn.accessibleName()])
btn.setAutoExclusive(s)
window.histView.targetImage = window.label.img
def f(hist=True):
window.label.repaint()
window.label_3.repaint()
if not hist:
return
showHistogram()
def g():
layer = window.label.img.getActiveLayer()
if layer.isDrawLayer():
if layer.brushDict is None:
window.label.brushUpdate()
layer.brushDict = window.label.State['brush']
restoreBrush(layer)
window.label.img.onImageChanged = f
window.label.img.onActiveLayerChanged = g
f()
g()
window.label_2.img = imImage(QImg=img, meta=img.meta)
if window.viewState != 'After':
window.viewState = 'After'
window.splitter.hide()
window.label.show()
window.label_3.img = img
window.label_2.img.isMouseSelectable = False
window.tableView.setLayers(window.label.img)
tool = window.label.img.getActiveLayer().tool
if tool is not None:
tool.showTool()
updateCurrentViews()
window.label_3.update()
updateStatus()
gc.collect()
img.layersStack[0].applyToStack()
|
bLUe_PYSIDE2
|
positive
|
def initialize_vfs(self, fs_path=None, data_fs_path=None, temp_dir=None):
if data_fs_path is not None:
logger.info('Opening path {} for persistent storage of files.'.format(data_fs_path))
<DeepExtract>
self._conpot_vfs = dict()
if data_fs_path is None:
try:
self.data_fs = open_fs(os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'tests', 'data', 'data_temp_fs'))
except fs.errors.FSError:
logger.exception('Unable to create persistent storage for Conpot. Exiting')
sys.exit(3)
else:
try:
assert data_fs_path and isinstance(data_fs_path, str)
self.data_fs = open_fs(data_fs_path)
except AssertionError:
logger.exception('Incorrect FS url specified. Please check documentation for more details.')
sys.exit(3)
except fs.errors.CreateFailed:
logger.exception('Unexpected error occurred while creating Conpot FS.')
sys.exit(3)
self.protocol_fs = None
</DeepExtract>
if fs_path is None:
fs_path = 'tar://' + os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'data.tar')
logger.warning('Using default FS path. {}'.format(fs_path))
self.protocol_fs = AbstractFS(src_path=fs_path, temp_dir=temp_dir)
|
def initialize_vfs(self, fs_path=None, data_fs_path=None, temp_dir=None):
if data_fs_path is not None:
logger.info('Opening path {} for persistent storage of files.'.format(data_fs_path))
self._conpot_vfs = dict()
if data_fs_path is None:
try:
self.data_fs = open_fs(os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'tests', 'data', 'data_temp_fs'))
except fs.errors.FSError:
logger.exception('Unable to create persistent storage for Conpot. Exiting')
sys.exit(3)
else:
try:
assert data_fs_path and isinstance(data_fs_path, str)
self.data_fs = open_fs(data_fs_path)
except AssertionError:
logger.exception('Incorrect FS url specified. Please check documentation for more details.')
sys.exit(3)
except fs.errors.CreateFailed:
logger.exception('Unexpected error occurred while creating Conpot FS.')
sys.exit(3)
self.protocol_fs = None
if fs_path is None:
fs_path = 'tar://' + os.path.join('/'.join(conpot.__file__.split('/')[:-1]), 'data.tar')
logger.warning('Using default FS path. {}'.format(fs_path))
self.protocol_fs = AbstractFS(src_path=fs_path, temp_dir=temp_dir)
|
conpot
|
positive
|
@pytest.mark.parametrize(['vasprun_parser'], [('partial',)], indirect=True)
def test_create_node_dos_partial(fresh_aiida_env, vasprun_parser):
"""Check that the node composer works for the density of states node and contain the correct decomposed density of states."""
node_settings_key = 'dos'
assert NODES[node_settings_key]['link_name'] == 'dos'
assert NODES[node_settings_key]['type'] == 'core.array'
<DeepExtract>
requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]}
parsed_quantities = {}
equivalent_keys = {}
for parser in [vasprun_parser]:
for item in NODES[node_settings_key]['quantities']:
if item in parser.PARSABLE_QUANTITIES:
parsed_quantities[item] = parser.get_quantity(item)
equivalent_keys[item] = [item]
composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities)
data_class = get_data_class(NODES[node_settings_key]['type'])
assert NODES[node_settings_key]['link_name'] in composed_nodes.successful
assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class)
composed_nodes = composed_nodes
</DeepExtract>
data_obj = composed_nodes.successful['dos']
dos = data_obj.get_array('pdos')
energy = data_obj.get_array('energy')
assert dos.shape == (8, 1000, 9)
assert energy.shape == (1000,)
np.testing.assert_allclose(dos[3, 500], np.array([0.077, 0.0146, 0.0109, 0.0155, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0.0, rtol=1e-07)
np.testing.assert_allclose(dos[7, 500], np.array([0.0747, 0.0121, 0.0092, 0.0116, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0.0, rtol=1e-07)
assert energy[500] == pytest.approx(0.01)
|
@pytest.mark.parametrize(['vasprun_parser'], [('partial',)], indirect=True)
def test_create_node_dos_partial(fresh_aiida_env, vasprun_parser):
"""Check that the node composer works for the density of states node and contain the correct decomposed density of states."""
node_settings_key = 'dos'
assert NODES[node_settings_key]['link_name'] == 'dos'
assert NODES[node_settings_key]['type'] == 'core.array'
requested_node = {NODES[node_settings_key]['link_name']: NODES[node_settings_key]}
parsed_quantities = {}
equivalent_keys = {}
for parser in [vasprun_parser]:
for item in NODES[node_settings_key]['quantities']:
if item in parser.PARSABLE_QUANTITIES:
parsed_quantities[item] = parser.get_quantity(item)
equivalent_keys[item] = [item]
composed_nodes = NodeComposer(requested_node, equivalent_keys, parsed_quantities)
data_class = get_data_class(NODES[node_settings_key]['type'])
assert NODES[node_settings_key]['link_name'] in composed_nodes.successful
assert isinstance(composed_nodes.successful[NODES[node_settings_key]['link_name']], data_class)
composed_nodes = composed_nodes
data_obj = composed_nodes.successful['dos']
dos = data_obj.get_array('pdos')
energy = data_obj.get_array('energy')
assert dos.shape == (8, 1000, 9)
assert energy.shape == (1000,)
np.testing.assert_allclose(dos[3, 500], np.array([0.077, 0.0146, 0.0109, 0.0155, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0.0, rtol=1e-07)
np.testing.assert_allclose(dos[7, 500], np.array([0.0747, 0.0121, 0.0092, 0.0116, 0.0, 0.0, 0.0, 0.0, 0.0]), atol=0.0, rtol=1e-07)
assert energy[500] == pytest.approx(0.01)
|
aiida-vasp
|
positive
|
def identifier_from_shell(shell, atom_coords, connectivity, level, stereo):
"""Determine new identifier for a shell at a specific level.
Parameters
----------
shell : Shell
Shell for which to determine identifier
atom_coords : dict
Dict matching atom ids to coords.
connectivity : dict
Dict matching atom id pair tuples to their bond order (5 for unbound).
level : int
Level/iteration
stereo : bool
Add stereo indicators
"""
header = [level, shell.last_shell.identifier]
<DeepExtract>
if len(shell.shells) == 0:
atom_tuples = []
atom_tuples = [(connectivity[shell.center_atom, x.center_atom], x.identifier, x) for x in shell.shells]
if stereo:
atom_tuples.sort(key=_first_two)
stereo_indicators = stereo_indicators_from_shell(shell, atom_tuples, atom_coords)
atom_tuples = [x[:-1] + (y,) + (x[-1],) for (x, y) in zip(atom_tuples, stereo_indicators)]
atom_tuples = [x[:-1] for x in atom_tuples]
atom_tuples.sort()
atom_tuples = atom_tuples
</DeepExtract>
flat_atom_tuples = [y for x in atom_tuples for y in x]
arr = np.array(header + flat_atom_tuples, dtype=IDENT_DTYPE)
return hash_int64_array(arr)
|
def identifier_from_shell(shell, atom_coords, connectivity, level, stereo):
"""Determine new identifier for a shell at a specific level.
Parameters
----------
shell : Shell
Shell for which to determine identifier
atom_coords : dict
Dict matching atom ids to coords.
connectivity : dict
Dict matching atom id pair tuples to their bond order (5 for unbound).
level : int
Level/iteration
stereo : bool
Add stereo indicators
"""
header = [level, shell.last_shell.identifier]
if len(shell.shells) == 0:
atom_tuples = []
atom_tuples = [(connectivity[shell.center_atom, x.center_atom], x.identifier, x) for x in shell.shells]
if stereo:
atom_tuples.sort(key=_first_two)
stereo_indicators = stereo_indicators_from_shell(shell, atom_tuples, atom_coords)
atom_tuples = [x[:-1] + (y,) + (x[-1],) for (x, y) in zip(atom_tuples, stereo_indicators)]
atom_tuples = [x[:-1] for x in atom_tuples]
atom_tuples.sort()
atom_tuples = atom_tuples
flat_atom_tuples = [y for x in atom_tuples for y in x]
arr = np.array(header + flat_atom_tuples, dtype=IDENT_DTYPE)
return hash_int64_array(arr)
|
e3fp
|
positive
|
def flush(self):
"""Flushes the batch.
Almost the same as ``self.value()``, but:
* this method doesn't throw an error even if underlying batch
flush actually completed with an error
* on the other hand, subsequent flush throws an error.
So this method is intended to be called by schedulers:
* They must flush each batch just once
* They don't care (and moreover, shouldn't know) about actual
flush errors. These errors will be anyway re-thrown later -
on attempt to access values of underlying batch items.
"""
if self.is_computed():
raise BatchingError('Batch is already flushed or cancelled.')
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: -> batch flush:')
<DeepExtract>
debug.write(debug.str(self), 4)
debug.write('Priority: %s' % debug.repr(self.get_priority()), 4 + 1)
if self.items:
debug.write('Items:', 4 + 1)
for item in self.items:
item.dump(4 + 2)
else:
debug.write('No items.', 4 + 1)
</DeepExtract>
if _debug_options.DUMP_STACK:
debug.dump_stack()
try:
self.error()
if not _debug.options.KEEP_DEPENDENCIES:
self.items.clear()
finally:
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: <- batch flushed: %s' % debug.str(self))
|
def flush(self):
"""Flushes the batch.
Almost the same as ``self.value()``, but:
* this method doesn't throw an error even if underlying batch
flush actually completed with an error
* on the other hand, subsequent flush throws an error.
So this method is intended to be called by schedulers:
* They must flush each batch just once
* They don't care (and moreover, shouldn't know) about actual
flush errors. These errors will be anyway re-thrown later -
on attempt to access values of underlying batch items.
"""
if self.is_computed():
raise BatchingError('Batch is already flushed or cancelled.')
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: -> batch flush:')
debug.write(debug.str(self), 4)
debug.write('Priority: %s' % debug.repr(self.get_priority()), 4 + 1)
if self.items:
debug.write('Items:', 4 + 1)
for item in self.items:
item.dump(4 + 2)
else:
debug.write('No items.', 4 + 1)
if _debug_options.DUMP_STACK:
debug.dump_stack()
try:
self.error()
if not _debug.options.KEEP_DEPENDENCIES:
self.items.clear()
finally:
if _debug_options.DUMP_FLUSH_BATCH:
debug.write('@async: <- batch flushed: %s' % debug.str(self))
|
asynq
|
positive
|
def _on_apply_context(self, context_data):
reason = context_data['reason']
if reason == -1:
self._arch = context_data['arch']
self._platform = context_data['platform']
self._pointer_size = context_data['pointerSize']
self.java_available = context_data['java']
str_fmt = 'injected into := {0:d}'.format(self.pid)
<DeepExtract>
self.onLogEvent.emit(str(str_fmt))
</DeepExtract>
elif 'context' in context_data:
context = Context(context_data['context'])
self.contexts[str(context_data['tid'])] = context
sym = ''
if 'pc' in context_data['context']:
name = context_data['ptr']
if 'symbol' in context_data['context']['pc'] and context_data['context']['pc']['symbol']['name'] is not None:
sym = context_data['context']['pc']['symbol']['moduleName']
sym += ' - '
sym += context_data['context']['pc']['symbol']['name']
else:
name = context_data['ptr']
if context_data['reason'] == 0:
<DeepExtract>
self.onLogEvent.emit(str('breakpoint %s %s @thread := %d' % (name, sym, context_data['tid'])))
</DeepExtract>
if not reason == -1 and self.context_tid == 0:
self.context_tid = context_data['tid']
|
def _on_apply_context(self, context_data):
reason = context_data['reason']
if reason == -1:
self._arch = context_data['arch']
self._platform = context_data['platform']
self._pointer_size = context_data['pointerSize']
self.java_available = context_data['java']
str_fmt = 'injected into := {0:d}'.format(self.pid)
self.onLogEvent.emit(str(str_fmt))
elif 'context' in context_data:
context = Context(context_data['context'])
self.contexts[str(context_data['tid'])] = context
sym = ''
if 'pc' in context_data['context']:
name = context_data['ptr']
if 'symbol' in context_data['context']['pc'] and context_data['context']['pc']['symbol']['name'] is not None:
sym = context_data['context']['pc']['symbol']['moduleName']
sym += ' - '
sym += context_data['context']['pc']['symbol']['name']
else:
name = context_data['ptr']
if context_data['reason'] == 0:
self.onLogEvent.emit(str('breakpoint %s %s @thread := %d' % (name, sym, context_data['tid'])))
if not reason == -1 and self.context_tid == 0:
self.context_tid = context_data['tid']
|
Dwarf
|
positive
|
def generateColorFunction(config):
function = lambda x: x
if disable_colors:
return function
for color in config.split(','):
<DeepExtract>
def wrapper(text):
function = globals()[color.lower().replace('-', '_')](function(text))
function = wrapper
</DeepExtract>
return function
|
def generateColorFunction(config):
function = lambda x: x
if disable_colors:
return function
for color in config.split(','):
def wrapper(text):
function = globals()[color.lower().replace('-', '_')](function(text))
function = wrapper
return function
|
217gdb
|
positive
|
def test_dbj037_check_width_height(dash_duo):
<DeepExtract>
app = dash.Dash(__name__)
app.layout = html.Div(simple_app_layout(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px')))
dash_duo.start_server(app, dev_tools_props_check=True)
dash_duo.wait_for_element('#' + _COMPONENT_ID)
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px'), 'smiles'):
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px'), 'options') and 'NOuseOpenChemLib' in dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px').options:
return app
dash_duo.wait_for_element(_FIRST_LINE_SELECTOR)
return app
</DeepExtract>
main_div = dash_duo.wait_for_element('#' + _COMPONENT_ID + ' > div > div:nth-of-type(1)')
assert 'width: 500px' in main_div.get_attribute('style'), 'Option width not working'
assert 'height: 1000px' in main_div.get_attribute('style'), 'Option height not working'
|
def test_dbj037_check_width_height(dash_duo):
app = dash.Dash(__name__)
app.layout = html.Div(simple_app_layout(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px')))
dash_duo.start_server(app, dev_tools_props_check=True)
dash_duo.wait_for_element('#' + _COMPONENT_ID)
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px'), 'smiles'):
if hasattr(dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px'), 'options') and 'NOuseOpenChemLib' in dash_bio.Jsme(id=_COMPONENT_ID, width='500px', height='1000px').options:
return app
dash_duo.wait_for_element(_FIRST_LINE_SELECTOR)
return app
main_div = dash_duo.wait_for_element('#' + _COMPONENT_ID + ' > div > div:nth-of-type(1)')
assert 'width: 500px' in main_div.get_attribute('style'), 'Option width not working'
assert 'height: 1000px' in main_div.get_attribute('style'), 'Option height not working'
|
dash-bio
|
positive
|
def __init__(self, root='data', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
<DeepExtract>
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
</DeepExtract>
<DeepExtract>
img_paths = glob.glob(osp.join(self.train_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if True:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
train = dataset
</DeepExtract>
<DeepExtract>
img_paths = glob.glob(osp.join(self.query_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if False:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
query = dataset
</DeepExtract>
<DeepExtract>
img_paths = glob.glob(osp.join(self.gallery_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if False:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
gallery = dataset
</DeepExtract>
if verbose:
print('=> Market1501 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
|
def __init__(self, root='data', verbose=True, **kwargs):
super(Market1501, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'bounding_box_test')
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
img_paths = glob.glob(osp.join(self.train_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if True:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
train = dataset
img_paths = glob.glob(osp.join(self.query_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if False:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
query = dataset
img_paths = glob.glob(osp.join(self.gallery_dir, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if pid == -1 and os.environ.get('junk') is None:
continue
assert -1 <= pid <= 1501
assert 1 <= camid <= 6
camid -= 1
if False:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
gallery = dataset
if verbose:
print('=> Market1501 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
|
ABD-Net
|
positive
|
def call(self) -> None:
try:
(opts, args) = getopt.getopt(self.args, 'mh')
except getopt.GetoptError:
<DeepExtract>
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if fmt == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if fmt == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
</DeepExtract>
return
for (o, _a) in opts:
if o in '-h':
<DeepExtract>
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if 'human' == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if 'human' == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
</DeepExtract>
return
elif o in '-m':
<DeepExtract>
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if 'megabytes' == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if 'megabytes' == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
</DeepExtract>
return
<DeepExtract>
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if fmt == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if fmt == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
</DeepExtract>
|
def call(self) -> None:
try:
(opts, args) = getopt.getopt(self.args, 'mh')
except getopt.GetoptError:
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if fmt == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if fmt == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
return
for (o, _a) in opts:
if o in '-h':
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if 'human' == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if 'human' == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
return
elif o in '-m':
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if 'megabytes' == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if 'megabytes' == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
return
raw_mem_stats = self.get_free_stats()
raw_mem_stats['calc_total_buffers_and_cache'] = raw_mem_stats['Buffers'] + raw_mem_stats['Cached']
raw_mem_stats['calc_total_used'] = raw_mem_stats['MemTotal'] - (raw_mem_stats['MemFree'] + raw_mem_stats['calc_total_buffers_and_cache'])
raw_mem_stats['calc_swap_used'] = raw_mem_stats['SwapTotal'] - raw_mem_stats['SwapFree']
if fmt == 'megabytes':
for (key, value) in raw_mem_stats.items():
raw_mem_stats[key] = int(value / 1000)
if fmt == 'human':
magnitude = ['B', 'M', 'G', 'T', 'Z']
human_mem_stats = {}
for (key, value) in raw_mem_stats.items():
current_magnitude = 0
while value >= 1000 and current_magnitude < len(magnitude):
value = floor(float(value / 1000))
current_magnitude += 1
human_mem_stats[key] = str(f'{value:g}{magnitude[current_magnitude]}')
self.write(FREE_OUTPUT.format(**human_mem_stats))
else:
self.write(FREE_OUTPUT.format(**raw_mem_stats))
</DeepExtract>
|
cowrie
|
positive
|
def forward_test(self, img, **kwargs):
<DeepExtract>
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
x = x
</DeepExtract>
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs]
return dict(zip(keys, out_tensors))
|
def forward_test(self, img, **kwargs):
if self.with_sobel:
img = self.sobel_layer(img)
x = self.backbone(img)
x = x
outs = self.head(x)
keys = ['head{}'.format(i) for i in range(len(outs))]
out_tensors = [out.cpu() for out in outs]
return dict(zip(keys, out_tensors))
|
DenseCL
|
positive
|
def run(command, *modules):
exitcode = 0
if command in ['help']:
<DeepExtract>
if systemctl.help_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.help_modules(*modules))
return
shown = 0
for element in systemctl.help_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.help_modules(*modules))
</DeepExtract>
elif command in ['cat']:
<DeepExtract>
if systemctl.cat_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.cat_modules(*modules))
return
print(systemctl.cat_modules(*modules))
if DebugPrintResult:
result1 = systemctl.cat_modules(*modules).split('\n')[0][:-20]
if systemctl.cat_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.cat_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.cat_modules(*modules))
</DeepExtract>
elif command in ['clean']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.clean_modules(*modules))
if systemctl.clean_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['command']:
<DeepExtract>
if systemctl.command_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.command_of_unit(*modules))
return
shown = 0
for element in systemctl.command_of_unit(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.command_of_unit(*modules))
</DeepExtract>
elif command in ['daemon-reload']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.daemon_reload_target())
if systemctl.daemon_reload_target() is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['default']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.default_system())
if systemctl.default_system() is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['default-services']:
<DeepExtract>
if systemctl.default_services_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.default_services_modules(*modules))
return
shown = 0
for element in systemctl.default_services_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.default_services_modules(*modules))
</DeepExtract>
elif command in ['disable']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.disable_modules(*modules))
if systemctl.disable_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['enable']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.enable_modules(*modules))
if systemctl.enable_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['environment']:
<DeepExtract>
if systemctl.environment_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.environment_of_unit(*modules))
return
shown = 0
for key in sorted(systemctl.environment_of_unit(*modules).keys()):
element = systemctl.environment_of_unit(*modules)[key]
print('%s=%s' % (key, element))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.environment_of_unit(*modules))
</DeepExtract>
elif command in ['get-default']:
<DeepExtract>
if systemctl.get_default_target() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_default_target())
return
print(systemctl.get_default_target())
if DebugPrintResult:
result1 = systemctl.get_default_target().split('\n')[0][:-20]
if systemctl.get_default_target() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_default_target())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_default_target())
</DeepExtract>
elif command in ['get-preset']:
<DeepExtract>
if systemctl.get_preset_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_preset_of_unit(*modules))
return
print(systemctl.get_preset_of_unit(*modules))
if DebugPrintResult:
result1 = systemctl.get_preset_of_unit(*modules).split('\n')[0][:-20]
if systemctl.get_preset_of_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_preset_of_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_preset_of_unit(*modules))
</DeepExtract>
elif command in ['halt']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.halt_target())
if systemctl.halt_target() is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['init']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.init_modules(*modules))
if systemctl.init_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['is-active']:
<DeepExtract>
if systemctl.is_active_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_active_modules(*modules))
return
shown = 0
for element in systemctl.is_active_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_active_modules(*modules))
</DeepExtract>
elif command in ['is-enabled']:
<DeepExtract>
if systemctl.is_enabled_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_enabled_modules(*modules))
return
shown = 0
for element in systemctl.is_enabled_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_enabled_modules(*modules))
</DeepExtract>
elif command in ['is-failed']:
<DeepExtract>
if systemctl.is_failed_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_failed_modules(*modules))
return
shown = 0
for element in systemctl.is_failed_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_failed_modules(*modules))
</DeepExtract>
elif command in ['is-system-running']:
<DeepExtract>
if systemctl.is_system_running_info() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_system_running_info())
return
print(systemctl.is_system_running_info())
if DebugPrintResult:
result1 = systemctl.is_system_running_info().split('\n')[0][:-20]
if systemctl.is_system_running_info() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.is_system_running_info())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.is_system_running_info())
</DeepExtract>
elif command in ['kill']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.kill_modules(*modules))
if systemctl.kill_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['list-start-dependencies']:
<DeepExtract>
shown = 0
for element in systemctl.list_start_dependencies_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_start_dependencies_modules(*modules))
</DeepExtract>
elif command in ['list-dependencies']:
<DeepExtract>
if systemctl.list_dependencies_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.list_dependencies_modules(*modules))
return
shown = 0
for element in systemctl.list_dependencies_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_dependencies_modules(*modules))
</DeepExtract>
elif command in ['list-unit-files']:
<DeepExtract>
shown = 0
for element in systemctl.list_unit_files_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_unit_files_modules(*modules))
</DeepExtract>
elif command in ['list-units']:
<DeepExtract>
shown = 0
for element in systemctl.list_units_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_units_modules(*modules))
</DeepExtract>
elif command in ['listen']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.listen_modules(*modules))
if systemctl.listen_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['log', 'logs']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.log_modules(*modules))
if systemctl.log_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['mask']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.mask_modules(*modules))
if systemctl.mask_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['preset']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.preset_modules(*modules))
if systemctl.preset_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['preset-all']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.preset_all_modules())
if systemctl.preset_all_modules() is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['reap-zombies']:
<DeepExtract>
if systemctl.reap_zombies_target() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.reap_zombies_target())
return
print(systemctl.reap_zombies_target())
if DebugPrintResult:
result1 = systemctl.reap_zombies_target().split('\n')[0][:-20]
if systemctl.reap_zombies_target() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.reap_zombies_target())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.reap_zombies_target())
</DeepExtract>
elif command in ['reload']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_modules(*modules))
if systemctl.reload_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['reload-or-restart']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_restart_modules(*modules))
if systemctl.reload_or_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['reload-or-try-restart']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_try_restart_modules(*modules))
if systemctl.reload_or_try_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['reset-failed']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reset_failed_modules(*modules))
if systemctl.reset_failed_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['restart']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.restart_modules(*modules))
if systemctl.restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['set-default']:
<DeepExtract>
if systemctl.set_default_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.set_default_modules(*modules))
return
print(systemctl.set_default_modules(*modules))
if DebugPrintResult:
result1 = systemctl.set_default_modules(*modules).split('\n')[0][:-20]
if systemctl.set_default_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.set_default_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.set_default_modules(*modules))
</DeepExtract>
elif command in ['show']:
<DeepExtract>
if systemctl.show_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.show_modules(*modules))
return
shown = 0
for element in systemctl.show_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.show_modules(*modules))
</DeepExtract>
elif command in ['start']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.start_modules(*modules))
if systemctl.start_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['status']:
<DeepExtract>
if systemctl.status_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.status_modules(*modules))
return
print(systemctl.status_modules(*modules))
if DebugPrintResult:
result1 = systemctl.status_modules(*modules).split('\n')[0][:-20]
if systemctl.status_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.status_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.status_modules(*modules))
</DeepExtract>
elif command in ['stop']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.stop_modules(*modules))
if systemctl.stop_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['try-restart']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.try_restart_modules(*modules))
if systemctl.try_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['unmask']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.unmask_modules(*modules))
if systemctl.unmask_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['version']:
<DeepExtract>
if systemctl.version_info() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.version_info())
return
shown = 0
for element in systemctl.version_info():
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.version_info())
</DeepExtract>
elif command in ['__cat_unit']:
<DeepExtract>
if systemctl.cat_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.cat_unit(*modules))
return
print(systemctl.cat_unit(*modules))
if DebugPrintResult:
result1 = systemctl.cat_unit(*modules).split('\n')[0][:-20]
if systemctl.cat_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.cat_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.cat_unit(*modules))
</DeepExtract>
elif command in ['__get_active_unit']:
<DeepExtract>
if systemctl.get_active_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_active_unit(*modules))
return
print(systemctl.get_active_unit(*modules))
if DebugPrintResult:
result1 = systemctl.get_active_unit(*modules).split('\n')[0][:-20]
if systemctl.get_active_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_active_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_active_unit(*modules))
</DeepExtract>
elif command in ['__get_description']:
<DeepExtract>
if systemctl.get_description(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_description(*modules))
return
print(systemctl.get_description(*modules))
if DebugPrintResult:
result1 = systemctl.get_description(*modules).split('\n')[0][:-20]
if systemctl.get_description(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_description(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_description(*modules))
</DeepExtract>
elif command in ['__get_status_file']:
<DeepExtract>
if systemctl.get_status_file(modules[0]) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_status_file(modules[0]))
return
print(systemctl.get_status_file(modules[0]))
if DebugPrintResult:
result1 = systemctl.get_status_file(modules[0]).split('\n')[0][:-20]
if systemctl.get_status_file(modules[0]) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_status_file(modules[0]))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_status_file(modules[0]))
</DeepExtract>
elif command in ['__get_status_pid_file', '__get_pid_file']:
<DeepExtract>
if systemctl.get_status_pid_file(modules[0]) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_status_pid_file(modules[0]))
return
print(systemctl.get_status_pid_file(modules[0]))
if DebugPrintResult:
result1 = systemctl.get_status_pid_file(modules[0]).split('\n')[0][:-20]
if systemctl.get_status_pid_file(modules[0]) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_status_pid_file(modules[0]))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_status_pid_file(modules[0]))
</DeepExtract>
elif command in ['__disable_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.disable_unit(*modules))
if systemctl.disable_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__enable_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.enable_unit(*modules))
if systemctl.enable_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__is_enabled']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.is_enabled(*modules))
if systemctl.is_enabled(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__killall']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.killall(*modules))
if systemctl.killall(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__kill_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.kill_unit(*modules))
if systemctl.kill_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__load_preset_files']:
<DeepExtract>
if systemctl.load_preset_files(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.load_preset_files(*modules))
return
shown = 0
for element in systemctl.load_preset_files(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.load_preset_files(*modules))
</DeepExtract>
elif command in ['__mask_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.mask_unit(*modules))
if systemctl.mask_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__read_env_file']:
<DeepExtract>
shown = 0
for element in list(systemctl.read_env_file(*modules)):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', list(systemctl.read_env_file(*modules)))
</DeepExtract>
elif command in ['__reload_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_unit(*modules))
if systemctl.reload_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__reload_or_restart_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_restart_unit(*modules))
if systemctl.reload_or_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__reload_or_try_restart_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_try_restart_unit(*modules))
if systemctl.reload_or_try_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__reset_failed_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reset_failed_unit(*modules))
if systemctl.reset_failed_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__restart_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.restart_unit(*modules))
if systemctl.restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__start_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.start_unit(*modules))
if systemctl.start_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__stop_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.stop_unit(*modules))
if systemctl.stop_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__try_restart_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.try_restart_unit(*modules))
if systemctl.try_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__test_start_unit']:
systemctl.test_start_unit(*modules)
elif command in ['__unmask_unit']:
<DeepExtract>
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.unmask_unit(*modules))
if systemctl.unmask_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
</DeepExtract>
elif command in ['__show_unit_items']:
<DeepExtract>
shown = 0
for element in list(systemctl.show_unit_items(*modules)):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', list(systemctl.show_unit_items(*modules)))
</DeepExtract>
else:
logg.error('Unknown operation %s', command)
return EXIT_FAILURE
exitcode |= systemctl.error
return exitcode
|
def run(command, *modules):
exitcode = 0
if command in ['help']:
if systemctl.help_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.help_modules(*modules))
return
shown = 0
for element in systemctl.help_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.help_modules(*modules))
elif command in ['cat']:
if systemctl.cat_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.cat_modules(*modules))
return
print(systemctl.cat_modules(*modules))
if DebugPrintResult:
result1 = systemctl.cat_modules(*modules).split('\n')[0][:-20]
if systemctl.cat_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.cat_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.cat_modules(*modules))
elif command in ['clean']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.clean_modules(*modules))
if systemctl.clean_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['command']:
if systemctl.command_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.command_of_unit(*modules))
return
shown = 0
for element in systemctl.command_of_unit(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.command_of_unit(*modules))
elif command in ['daemon-reload']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.daemon_reload_target())
if systemctl.daemon_reload_target() is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['default']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.default_system())
if systemctl.default_system() is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['default-services']:
if systemctl.default_services_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.default_services_modules(*modules))
return
shown = 0
for element in systemctl.default_services_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.default_services_modules(*modules))
elif command in ['disable']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.disable_modules(*modules))
if systemctl.disable_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['enable']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.enable_modules(*modules))
if systemctl.enable_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['environment']:
if systemctl.environment_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.environment_of_unit(*modules))
return
shown = 0
for key in sorted(systemctl.environment_of_unit(*modules).keys()):
element = systemctl.environment_of_unit(*modules)[key]
print('%s=%s' % (key, element))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.environment_of_unit(*modules))
elif command in ['get-default']:
if systemctl.get_default_target() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_default_target())
return
print(systemctl.get_default_target())
if DebugPrintResult:
result1 = systemctl.get_default_target().split('\n')[0][:-20]
if systemctl.get_default_target() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_default_target())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_default_target())
elif command in ['get-preset']:
if systemctl.get_preset_of_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_preset_of_unit(*modules))
return
print(systemctl.get_preset_of_unit(*modules))
if DebugPrintResult:
result1 = systemctl.get_preset_of_unit(*modules).split('\n')[0][:-20]
if systemctl.get_preset_of_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_preset_of_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_preset_of_unit(*modules))
elif command in ['halt']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.halt_target())
if systemctl.halt_target() is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['init']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.init_modules(*modules))
if systemctl.init_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['is-active']:
if systemctl.is_active_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_active_modules(*modules))
return
shown = 0
for element in systemctl.is_active_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_active_modules(*modules))
elif command in ['is-enabled']:
if systemctl.is_enabled_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_enabled_modules(*modules))
return
shown = 0
for element in systemctl.is_enabled_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_enabled_modules(*modules))
elif command in ['is-failed']:
if systemctl.is_failed_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_failed_modules(*modules))
return
shown = 0
for element in systemctl.is_failed_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.is_failed_modules(*modules))
elif command in ['is-system-running']:
if systemctl.is_system_running_info() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.is_system_running_info())
return
print(systemctl.is_system_running_info())
if DebugPrintResult:
result1 = systemctl.is_system_running_info().split('\n')[0][:-20]
if systemctl.is_system_running_info() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.is_system_running_info())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.is_system_running_info())
elif command in ['kill']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.kill_modules(*modules))
if systemctl.kill_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['list-start-dependencies']:
shown = 0
for element in systemctl.list_start_dependencies_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_start_dependencies_modules(*modules))
elif command in ['list-dependencies']:
if systemctl.list_dependencies_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.list_dependencies_modules(*modules))
return
shown = 0
for element in systemctl.list_dependencies_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_dependencies_modules(*modules))
elif command in ['list-unit-files']:
shown = 0
for element in systemctl.list_unit_files_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_unit_files_modules(*modules))
elif command in ['list-units']:
shown = 0
for element in systemctl.list_units_modules(*modules):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.list_units_modules(*modules))
elif command in ['listen']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.listen_modules(*modules))
if systemctl.listen_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['log', 'logs']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.log_modules(*modules))
if systemctl.log_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['mask']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.mask_modules(*modules))
if systemctl.mask_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['preset']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.preset_modules(*modules))
if systemctl.preset_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['preset-all']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.preset_all_modules())
if systemctl.preset_all_modules() is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['reap-zombies']:
if systemctl.reap_zombies_target() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.reap_zombies_target())
return
print(systemctl.reap_zombies_target())
if DebugPrintResult:
result1 = systemctl.reap_zombies_target().split('\n')[0][:-20]
if systemctl.reap_zombies_target() == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.reap_zombies_target())
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.reap_zombies_target())
elif command in ['reload']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_modules(*modules))
if systemctl.reload_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['reload-or-restart']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_restart_modules(*modules))
if systemctl.reload_or_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['reload-or-try-restart']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_try_restart_modules(*modules))
if systemctl.reload_or_try_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['reset-failed']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reset_failed_modules(*modules))
if systemctl.reset_failed_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['restart']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.restart_modules(*modules))
if systemctl.restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['set-default']:
if systemctl.set_default_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.set_default_modules(*modules))
return
print(systemctl.set_default_modules(*modules))
if DebugPrintResult:
result1 = systemctl.set_default_modules(*modules).split('\n')[0][:-20]
if systemctl.set_default_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.set_default_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.set_default_modules(*modules))
elif command in ['show']:
if systemctl.show_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.show_modules(*modules))
return
shown = 0
for element in systemctl.show_modules(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.show_modules(*modules))
elif command in ['start']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.start_modules(*modules))
if systemctl.start_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['status']:
if systemctl.status_modules(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.status_modules(*modules))
return
print(systemctl.status_modules(*modules))
if DebugPrintResult:
result1 = systemctl.status_modules(*modules).split('\n')[0][:-20]
if systemctl.status_modules(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.status_modules(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.status_modules(*modules))
elif command in ['stop']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.stop_modules(*modules))
if systemctl.stop_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['try-restart']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.try_restart_modules(*modules))
if systemctl.try_restart_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['unmask']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.unmask_modules(*modules))
if systemctl.unmask_modules(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['version']:
if systemctl.version_info() is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.version_info())
return
shown = 0
for element in systemctl.version_info():
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.version_info())
elif command in ['__cat_unit']:
if systemctl.cat_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.cat_unit(*modules))
return
print(systemctl.cat_unit(*modules))
if DebugPrintResult:
result1 = systemctl.cat_unit(*modules).split('\n')[0][:-20]
if systemctl.cat_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.cat_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.cat_unit(*modules))
elif command in ['__get_active_unit']:
if systemctl.get_active_unit(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_active_unit(*modules))
return
print(systemctl.get_active_unit(*modules))
if DebugPrintResult:
result1 = systemctl.get_active_unit(*modules).split('\n')[0][:-20]
if systemctl.get_active_unit(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_active_unit(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_active_unit(*modules))
elif command in ['__get_description']:
if systemctl.get_description(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_description(*modules))
return
print(systemctl.get_description(*modules))
if DebugPrintResult:
result1 = systemctl.get_description(*modules).split('\n')[0][:-20]
if systemctl.get_description(*modules) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_description(*modules))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_description(*modules))
elif command in ['__get_status_file']:
if systemctl.get_status_file(modules[0]) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_status_file(modules[0]))
return
print(systemctl.get_status_file(modules[0]))
if DebugPrintResult:
result1 = systemctl.get_status_file(modules[0]).split('\n')[0][:-20]
if systemctl.get_status_file(modules[0]) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_status_file(modules[0]))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_status_file(modules[0]))
elif command in ['__get_status_pid_file', '__get_pid_file']:
if systemctl.get_status_pid_file(modules[0]) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.get_status_pid_file(modules[0]))
return
print(systemctl.get_status_pid_file(modules[0]))
if DebugPrintResult:
result1 = systemctl.get_status_pid_file(modules[0]).split('\n')[0][:-20]
if systemctl.get_status_pid_file(modules[0]) == result1:
logg.log(HINT, "EXEC END '%s'", systemctl.get_status_pid_file(modules[0]))
else:
logg.log(HINT, "EXEC END '%s...'", result1)
logg.debug(" END '%s'", systemctl.get_status_pid_file(modules[0]))
elif command in ['__disable_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.disable_unit(*modules))
if systemctl.disable_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__enable_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.enable_unit(*modules))
if systemctl.enable_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__is_enabled']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.is_enabled(*modules))
if systemctl.is_enabled(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__killall']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.killall(*modules))
if systemctl.killall(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__kill_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.kill_unit(*modules))
if systemctl.kill_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__load_preset_files']:
if systemctl.load_preset_files(*modules) is None:
if DebugPrintResult:
logg.debug(' END %s', systemctl.load_preset_files(*modules))
return
shown = 0
for element in systemctl.load_preset_files(*modules):
print(element)
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', systemctl.load_preset_files(*modules))
elif command in ['__mask_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.mask_unit(*modules))
if systemctl.mask_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__read_env_file']:
shown = 0
for element in list(systemctl.read_env_file(*modules)):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', list(systemctl.read_env_file(*modules)))
elif command in ['__reload_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_unit(*modules))
if systemctl.reload_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__reload_or_restart_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_restart_unit(*modules))
if systemctl.reload_or_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__reload_or_try_restart_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reload_or_try_restart_unit(*modules))
if systemctl.reload_or_try_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__reset_failed_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.reset_failed_unit(*modules))
if systemctl.reset_failed_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__restart_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.restart_unit(*modules))
if systemctl.restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__start_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.start_unit(*modules))
if systemctl.start_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__stop_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.stop_unit(*modules))
if systemctl.stop_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__try_restart_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.try_restart_unit(*modules))
if systemctl.try_restart_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__test_start_unit']:
systemctl.test_start_unit(*modules)
elif command in ['__unmask_unit']:
if DebugPrintResult:
logg.log(HINT, 'EXEC END %s', systemctl.unmask_unit(*modules))
if systemctl.unmask_unit(*modules) is False:
exitcode = NOT_OK
exitcode = 0
elif command in ['__show_unit_items']:
shown = 0
for element in list(systemctl.show_unit_items(*modules)):
print('\t'.join([str(elem) for elem in element]))
shown += 1
if DebugPrintResult:
logg.log(HINT, 'EXEC END %i items', shown)
logg.debug(' END %s', list(systemctl.show_unit_items(*modules)))
else:
logg.error('Unknown operation %s', command)
return EXIT_FAILURE
exitcode |= systemctl.error
return exitcode
|
docker-systemctl-images
|
positive
|
def llh(self, U: Tuple[Tensor, ...], X: Tensor) -> Tensor:
<DeepExtract>
assert len(U) == 2
(U0, U1) = U
Xhat = tf.matmul(tf.transpose(U0), U1)
residuals = tf.reshape(X - Xhat, (-1,))
indices = tf.cast(tf.where(tf.reshape(self.testMask, (-1,))), dtype=tf.int32)
testResiduals = tf.gather_nd(residuals, indices)
r = testResiduals
</DeepExtract>
llh = tf.reduce_sum(self.noiseDistribution.llh(r))
return llh
|
def llh(self, U: Tuple[Tensor, ...], X: Tensor) -> Tensor:
assert len(U) == 2
(U0, U1) = U
Xhat = tf.matmul(tf.transpose(U0), U1)
residuals = tf.reshape(X - Xhat, (-1,))
indices = tf.cast(tf.where(tf.reshape(self.testMask, (-1,))), dtype=tf.int32)
testResiduals = tf.gather_nd(residuals, indices)
r = testResiduals
llh = tf.reduce_sum(self.noiseDistribution.llh(r))
return llh
|
decompose
|
positive
|
def normalize_gtl(self):
gt = self.gt_likelihood
self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
if self.args.gtl_output == 'softmax':
<DeepExtract>
e = np.exp(np.array(gt) / self.args.temperature)
dist = e / np.sum(e)
gt = dist
</DeepExtract>
elif self.args.gtl_output == 'softermax':
<DeepExtract>
gt = np.array(gt)
gt = gt - gt.min() + np.exp(1)
e = np.log(gt)
dist = e / np.sum(e)
gt = dist
</DeepExtract>
elif self.args.gtl_output == 'linear':
gt = np.clip(gt, 1e-05, 1.0)
gt = gt / gt.sum()
self.gt_likelihood = gt
|
def normalize_gtl(self):
gt = self.gt_likelihood
self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
if self.args.gtl_output == 'softmax':
e = np.exp(np.array(gt) / self.args.temperature)
dist = e / np.sum(e)
gt = dist
elif self.args.gtl_output == 'softermax':
gt = np.array(gt)
gt = gt - gt.min() + np.exp(1)
e = np.log(gt)
dist = e / np.sum(e)
gt = dist
elif self.args.gtl_output == 'linear':
gt = np.clip(gt, 1e-05, 1.0)
gt = gt / gt.sum()
self.gt_likelihood = gt
|
dal
|
positive
|
def stop_instance(self, node):
"""
Destroy a VM.
:param Node node: A `Node`:class: instance
"""
<DeepExtract>
with self.__lock:
if self._resource_client is None:
log.debug('Making Azure `ServicePrincipalcredentials` object with tenant=%r, client_id=%r, secret=%r ...', self.tenant_id, self.client_id, '<redacted>' if self.secret else None)
credentials = ServicePrincipalCredentials(tenant=self.tenant_id, client_id=self.client_id, secret=self.secret)
log.debug('Initializing Azure `ComputeManagementclient` ...')
self._compute_client = ComputeManagementClient(credentials, self.subscription_id)
log.debug('Initializing Azure `NetworkManagementclient` ...')
self._network_client = NetworkManagementClient(credentials, self.subscription_id)
log.debug('Initializing Azure `ResourceManagementclient` ...')
self._resource_client = ResourceManagementClient(credentials, self.subscription_id)
log.info('Azure API clients initialized.')
</DeepExtract>
(cluster_name, node_name) = node.instance_id
<DeepExtract>
with self.__lock:
if not self._inventory:
for obj in self._resource_client.resources.list_by_resource_group(cluster_name):
self._inventory[obj.name] = obj.id
</DeepExtract>
for (name, api_version) in [(node_name, '2018-06-01'), (node_name + '-nic', '2018-10-01'), (node_name + '-public-ip', '2018-10-01'), (node_name + '-disk', '2018-09-30'), (self._make_storage_account_name(cluster_name, node_name), '2018-07-01')]:
rsc_id = self._inventory[name]
log.debug('Deleting resource %s (`%s`) ...', name, rsc_id)
oper = self._resource_client.resources.delete_by_id(rsc_id, api_version)
oper.wait()
del self._inventory[name]
self._vm_details.pop(node_name, None)
with self.__lock:
if len(self._inventory) == 2:
log.debug('Cleaning up leftover resource group ...')
oper = self._resource_client.resource_groups.delete(cluster_name)
oper.wait()
self._inventory = {}
|
def stop_instance(self, node):
"""
Destroy a VM.
:param Node node: A `Node`:class: instance
"""
with self.__lock:
if self._resource_client is None:
log.debug('Making Azure `ServicePrincipalcredentials` object with tenant=%r, client_id=%r, secret=%r ...', self.tenant_id, self.client_id, '<redacted>' if self.secret else None)
credentials = ServicePrincipalCredentials(tenant=self.tenant_id, client_id=self.client_id, secret=self.secret)
log.debug('Initializing Azure `ComputeManagementclient` ...')
self._compute_client = ComputeManagementClient(credentials, self.subscription_id)
log.debug('Initializing Azure `NetworkManagementclient` ...')
self._network_client = NetworkManagementClient(credentials, self.subscription_id)
log.debug('Initializing Azure `ResourceManagementclient` ...')
self._resource_client = ResourceManagementClient(credentials, self.subscription_id)
log.info('Azure API clients initialized.')
(cluster_name, node_name) = node.instance_id
with self.__lock:
if not self._inventory:
for obj in self._resource_client.resources.list_by_resource_group(cluster_name):
self._inventory[obj.name] = obj.id
for (name, api_version) in [(node_name, '2018-06-01'), (node_name + '-nic', '2018-10-01'), (node_name + '-public-ip', '2018-10-01'), (node_name + '-disk', '2018-09-30'), (self._make_storage_account_name(cluster_name, node_name), '2018-07-01')]:
rsc_id = self._inventory[name]
log.debug('Deleting resource %s (`%s`) ...', name, rsc_id)
oper = self._resource_client.resources.delete_by_id(rsc_id, api_version)
oper.wait()
del self._inventory[name]
self._vm_details.pop(node_name, None)
with self.__lock:
if len(self._inventory) == 2:
log.debug('Cleaning up leftover resource group ...')
oper = self._resource_client.resource_groups.delete(cluster_name)
oper.wait()
self._inventory = {}
|
elasticluster
|
positive
|
def sharpen(pix, width, intensity):
A = pix[..., 3]
<DeepExtract>
assert pix.dtype == np.float32
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, width)
gas = img.to_numpy()
</DeepExtract>
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
|
def sharpen(pix, width, intensity):
A = pix[..., 3]
assert pix.dtype == np.float32
img = cl_builder.new_image_from_ndarray(pix)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, width)
gas = img.to_numpy()
pix += (pix - gas) * intensity
pix[..., 3] = A
return pix
|
blender-texture-tools
|
positive
|
def orderCollection(cur_col_name):
col_names = ['DAZ_ROOT', 'DAZ_FIG_\\d{1,2}', 'DAZ_ENV_\\d{1,2}', 'DAZ_HIDE', 'DAZ_PUB']
last_col = None
rtn = None
for (i, col_name) in enumerate(col_names):
if i == 1 or i == 2:
rep = re.compile(col_name)
if not rep.search(cur_col_name):
continue
col_name = cur_col_name
if (col_name in bpy.data.collections) == False:
bpy.data.collections.new(name=col_name)
col = bpy.data.collections.get(col_name)
if i == 0:
if col_name not in bpy.context.scene.collection.children.keys():
bpy.context.scene.collection.children.link(col)
else:
if i > 1:
last_col = bpy.data.collections.get(col_names[0])
if last_col is not None:
<DeepExtract>
global coll_parents
if len(coll_parents) == 0:
for coll in traverse_tree(bpy.context.scene.collection):
for c in coll.children.keys():
coll_parents.setdefault(c, coll.name)
print('coll_parents.length()=', len(coll_parents))
prtname = coll_parents.get(col_name)
</DeepExtract>
if prtname is not None and prtname != last_col.name:
bpy.data.collections.get(prtname).children.unlink(col)
if col_name not in last_col.children.keys():
last_col.children.link(col)
if i == 3:
col.hide_render = True
col.hide_viewport = True
last_col = col
if col_name == cur_col_name:
rtn = col
if rtn is None:
return last_col
else:
return rtn
|
def orderCollection(cur_col_name):
col_names = ['DAZ_ROOT', 'DAZ_FIG_\\d{1,2}', 'DAZ_ENV_\\d{1,2}', 'DAZ_HIDE', 'DAZ_PUB']
last_col = None
rtn = None
for (i, col_name) in enumerate(col_names):
if i == 1 or i == 2:
rep = re.compile(col_name)
if not rep.search(cur_col_name):
continue
col_name = cur_col_name
if (col_name in bpy.data.collections) == False:
bpy.data.collections.new(name=col_name)
col = bpy.data.collections.get(col_name)
if i == 0:
if col_name not in bpy.context.scene.collection.children.keys():
bpy.context.scene.collection.children.link(col)
else:
if i > 1:
last_col = bpy.data.collections.get(col_names[0])
if last_col is not None:
global coll_parents
if len(coll_parents) == 0:
for coll in traverse_tree(bpy.context.scene.collection):
for c in coll.children.keys():
coll_parents.setdefault(c, coll.name)
print('coll_parents.length()=', len(coll_parents))
prtname = coll_parents.get(col_name)
if prtname is not None and prtname != last_col.name:
bpy.data.collections.get(prtname).children.unlink(col)
if col_name not in last_col.children.keys():
last_col.children.link(col)
if i == 3:
col.hide_render = True
col.hide_viewport = True
last_col = col
if col_name == cur_col_name:
rtn = col
if rtn is None:
return last_col
else:
return rtn
|
DazToBlender
|
positive
|
def test_createkey(self):
environment = Environment()
key = AESCipher.create_key(16)
environment['CIPHER_KEY'] = key
AESCipher(environment)
key = AESCipher.create_key(24)
environment['CIPHER_KEY'] = key
AESCipher(environment)
key = AESCipher.create_key(32)
environment['CIPHER_KEY'] = key
AESCipher(environment)
with self.assertRaises(ValueError):
AESCipher.create_key(12)
with self.assertRaises(ValueError):
AESCipher.create_key(20)
with self.assertRaises(ValueError):
AESCipher.create_key(28)
with self.assertRaises(ValueError):
AESCipher.create_key(36)
def create_key(num_bytes):
return b64encode(AESCipher.random_bytes(num_bytes)).decode('utf8')
<DeepExtract>
key = b64encode(AESCipher.random_bytes(12)).decode('utf8')
</DeepExtract>
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
<DeepExtract>
key = b64encode(AESCipher.random_bytes(20)).decode('utf8')
</DeepExtract>
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
<DeepExtract>
key = b64encode(AESCipher.random_bytes(28)).decode('utf8')
</DeepExtract>
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
<DeepExtract>
key = b64encode(AESCipher.random_bytes(36)).decode('utf8')
</DeepExtract>
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
|
def test_createkey(self):
environment = Environment()
key = AESCipher.create_key(16)
environment['CIPHER_KEY'] = key
AESCipher(environment)
key = AESCipher.create_key(24)
environment['CIPHER_KEY'] = key
AESCipher(environment)
key = AESCipher.create_key(32)
environment['CIPHER_KEY'] = key
AESCipher(environment)
with self.assertRaises(ValueError):
AESCipher.create_key(12)
with self.assertRaises(ValueError):
AESCipher.create_key(20)
with self.assertRaises(ValueError):
AESCipher.create_key(28)
with self.assertRaises(ValueError):
AESCipher.create_key(36)
def create_key(num_bytes):
return b64encode(AESCipher.random_bytes(num_bytes)).decode('utf8')
key = b64encode(AESCipher.random_bytes(12)).decode('utf8')
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
key = b64encode(AESCipher.random_bytes(20)).decode('utf8')
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
key = b64encode(AESCipher.random_bytes(28)).decode('utf8')
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
key = b64encode(AESCipher.random_bytes(36)).decode('utf8')
environment['CIPHER_KEY'] = key
with self.assertRaises(ValueError):
AESCipher(environment)
|
eventsourcing
|
positive
|
def create_lr_dfa(self):
all_status = {}
all_object_set = {}
self.DFA = DFA(set())
def create_get_lr_dfa_node(set_id):
if set_id in all_status:
return all_status[set_id]
return LRDFANode(set_id=set_id)
def expand_production(self, cur_production, ex_object_set):
ex_object_set.add(cur_production)
right = cur_production[2]
point_index = cur_production[3]
tail_set = cur_production[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in ex_object_set:
ex_object_set |= expand_production(self, ex_new_production, ex_object_set)
new_ex_object_set = {}
for eos in ex_object_set:
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
ex_object_set = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
ex_object_set.add(tuple(production))
return ex_object_set
set_id = 0
<DeepExtract>
if set_id in all_status:
new_node = all_status[set_id]
new_node = LRDFANode(set_id=set_id)
</DeepExtract>
<DeepExtract>
(0, 'S', ('start',), 0, '#').add(self)
right = self[2]
point_index = self[3]
tail_set = self[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in (0, 'S', ('start',), 0, '#'):
(0, 'S', ('start',), 0, '#') |= expand_production(self, ex_new_production, (0, 'S', ('start',), 0, '#'))
new_ex_object_set = {}
for eos in (0, 'S', ('start',), 0, '#'):
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
(0, 'S', ('start',), 0, '#') = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
(0, 'S', ('start',), 0, '#').add(tuple(production))
object_set = (0, 'S', ('start',), 0, '#')
</DeepExtract>
new_node.add_object_set_by_set(object_set)
all_object_set[tuple(object_set)] = set_id
all_status[set_id] = new_node
object_set_queue = list()
object_set_queue.append(new_node)
while object_set_queue:
top_object_node = object_set_queue.pop(0)
old_set = top_object_node.object_set
old_set_id = top_object_node.set_id
for cur_production in old_set:
pro_id = cur_production[0]
left = cur_production[1]
right = cur_production[2]
point_index = cur_production[3]
tail_set = cur_production[4]
if point_index >= len(right) or '$' in right:
if old_set_id not in self.lr_analyze_table:
self.lr_analyze_table[old_set_id] = {}
for tail in tail_set:
if tail in self.lr_analyze_table[old_set_id]:
print('the grammar is not a LR(1) grammar!!!')
return
self.lr_analyze_table[old_set_id][tail] = ('r', pro_id)
else:
tar_set_id = 0
new_production = (pro_id, left, right, point_index + 1, tail_set)
<DeepExtract>
new_production.add(self)
right = self[2]
point_index = self[3]
tail_set = self[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in new_production:
new_production |= expand_production(self, ex_new_production, new_production)
new_ex_object_set = {}
for eos in new_production:
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
new_production = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
new_production.add(tuple(production))
new_object_set = new_production
</DeepExtract>
if tuple(new_object_set) in all_object_set.keys():
tar_set_id = all_object_set[tuple(new_object_set)]
else:
set_id += 1
tar_set_id = set_id
all_object_set[tuple(new_object_set)] = set_id
<DeepExtract>
if tar_set_id in all_status:
new_node = all_status[tar_set_id]
new_node = LRDFANode(set_id=tar_set_id)
</DeepExtract>
new_node.add_object_set_by_set(new_object_set)
all_status[tar_set_id] = new_node
object_set_queue.append(new_node)
if old_set_id not in self.lr_analyze_table:
self.lr_analyze_table[old_set_id] = {}
if right[point_index] in self.terminate:
self.lr_analyze_table[old_set_id][right[point_index]] = ('s', tar_set_id)
else:
self.lr_analyze_table[old_set_id][right[point_index]] = ('g', tar_set_id)
self.DFA.status = all_status
|
def create_lr_dfa(self):
all_status = {}
all_object_set = {}
self.DFA = DFA(set())
def create_get_lr_dfa_node(set_id):
if set_id in all_status:
return all_status[set_id]
return LRDFANode(set_id=set_id)
def expand_production(self, cur_production, ex_object_set):
ex_object_set.add(cur_production)
right = cur_production[2]
point_index = cur_production[3]
tail_set = cur_production[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in ex_object_set:
ex_object_set |= expand_production(self, ex_new_production, ex_object_set)
new_ex_object_set = {}
for eos in ex_object_set:
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
ex_object_set = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
ex_object_set.add(tuple(production))
return ex_object_set
set_id = 0
if set_id in all_status:
new_node = all_status[set_id]
new_node = LRDFANode(set_id=set_id)
(0, 'S', ('start',), 0, '#').add(self)
right = self[2]
point_index = self[3]
tail_set = self[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in (0, 'S', ('start',), 0, '#'):
(0, 'S', ('start',), 0, '#') |= expand_production(self, ex_new_production, (0, 'S', ('start',), 0, '#'))
new_ex_object_set = {}
for eos in (0, 'S', ('start',), 0, '#'):
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
(0, 'S', ('start',), 0, '#') = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
(0, 'S', ('start',), 0, '#').add(tuple(production))
object_set = (0, 'S', ('start',), 0, '#')
new_node.add_object_set_by_set(object_set)
all_object_set[tuple(object_set)] = set_id
all_status[set_id] = new_node
object_set_queue = list()
object_set_queue.append(new_node)
while object_set_queue:
top_object_node = object_set_queue.pop(0)
old_set = top_object_node.object_set
old_set_id = top_object_node.set_id
for cur_production in old_set:
pro_id = cur_production[0]
left = cur_production[1]
right = cur_production[2]
point_index = cur_production[3]
tail_set = cur_production[4]
if point_index >= len(right) or '$' in right:
if old_set_id not in self.lr_analyze_table:
self.lr_analyze_table[old_set_id] = {}
for tail in tail_set:
if tail in self.lr_analyze_table[old_set_id]:
print('the grammar is not a LR(1) grammar!!!')
return
self.lr_analyze_table[old_set_id][tail] = ('r', pro_id)
else:
tar_set_id = 0
new_production = (pro_id, left, right, point_index + 1, tail_set)
new_production.add(self)
right = self[2]
point_index = self[3]
tail_set = self[4]
if point_index < len(right) and right[point_index] in self.noterminate:
for pro_right in self.productions_dict[right[point_index]]:
new_tail_set = set()
flag = True
for i in range(point_index + 1, len(right)):
cur_first_set = self.first_set[right[i]]
if '$' in cur_first_set:
new_tail_set = tuple(set(new_tail_set) | cur_first_set - set('$'))
else:
flag = False
new_tail_set = tuple(set(new_tail_set) | cur_first_set)
break
if flag:
new_tail_set = tuple(set(new_tail_set) | set(tail_set))
ex_new_production = (pro_right[1], right[point_index], pro_right[0], 0, new_tail_set)
if ex_new_production not in new_production:
new_production |= expand_production(self, ex_new_production, new_production)
new_ex_object_set = {}
for eos in new_production:
pro_key = (eos[0], eos[1], eos[2], eos[3])
if tuple(pro_key) not in new_ex_object_set:
new_ex_object_set[tuple(pro_key)] = set()
new_ex_object_set[pro_key] |= set(eos[4])
new_production = set()
for key in new_ex_object_set:
production = (key[0], key[1], key[2], key[3], tuple(new_ex_object_set[key]))
new_production.add(tuple(production))
new_object_set = new_production
if tuple(new_object_set) in all_object_set.keys():
tar_set_id = all_object_set[tuple(new_object_set)]
else:
set_id += 1
tar_set_id = set_id
all_object_set[tuple(new_object_set)] = set_id
if tar_set_id in all_status:
new_node = all_status[tar_set_id]
new_node = LRDFANode(set_id=tar_set_id)
new_node.add_object_set_by_set(new_object_set)
all_status[tar_set_id] = new_node
object_set_queue.append(new_node)
if old_set_id not in self.lr_analyze_table:
self.lr_analyze_table[old_set_id] = {}
if right[point_index] in self.terminate:
self.lr_analyze_table[old_set_id][right[point_index]] = ('s', tar_set_id)
else:
self.lr_analyze_table[old_set_id][right[point_index]] = ('g', tar_set_id)
self.DFA.status = all_status
|
C--Compiler
|
positive
|
def visualize_graph_pairs_from_json(json_file_path):
python_data = read_json_from_file(json_file_path)
num_imgs = len(python_data)
track_id_dict = {}
for track_id in range(100):
track_id_dict[track_id] = []
max_track_id = -1
for img_id in range(num_imgs):
image_id = python_data[img_id]['image']['id']
candidates = python_data[img_id]['candidates']
image_path = os.path.join(python_data[img_id]['image']['folder'], python_data[img_id]['image']['name'])
num_candidates = len(candidates)
for candidate_id in range(num_candidates):
candidate = candidates[candidate_id]
track_id = candidate['track_id']
keypoints = candidate['pose_keypoints_2d']
bbox = candidate['det_bbox']
if track_id > max_track_id:
max_track_id = track_id
candidate_dict = {'track_id': track_id, 'img_id': image_id, 'img_path': image_path, 'bbox': bbox, 'keypoints': keypoints}
track_id_dict[track_id].append(candidate_dict)
for img_id in range(num_imgs):
for track_id_A in range(max_track_id):
for track_id_B in range(max_track_id):
if track_id_A == track_id_B:
continue
<DeepExtract>
index_list = []
for (i, dic) in enumerate(track_id_dict[track_id_A]):
if dic['img_id'] == img_id:
index_list.append(i)
candidate_A_index_list = index_list
</DeepExtract>
<DeepExtract>
index_list = []
for (i, dic) in enumerate(track_id_dict[track_id_B]):
if dic['img_id'] == img_id:
index_list.append(i)
candidate_B_index_list = index_list
</DeepExtract>
if candidate_A_index_list == []:
continue
if candidate_B_index_list == []:
continue
index_A = candidate_A_index_list[0]
index_B = candidate_B_index_list[0]
candidate_dict_A = track_id_dict[track_id_A][index_A]
candidate_dict_B = track_id_dict[track_id_B][index_B]
keypoints_A = candidate_dict_A['keypoints']
keypoints_B = candidate_dict_B['keypoints']
bbox_A = candidate_dict_A['bbox']
bbox_B = candidate_dict_B['bbox']
if validate_bbox(bbox_A) is False:
continue
if validate_bbox(bbox_B) is False:
continue
<DeepExtract>
num_elements = len(keypoints_A)
num_keypoints = num_elements / 3
assert num_keypoints == 15
(x0, y0, w, h) = bbox_A
flag_pass_check = True
graph = 15 * [(0, 0)]
for id in range(15):
x = keypoints_A[3 * id] - x0
y = keypoints_A[3 * id + 1] - y0
score = keypoints_A[3 * id + 2]
graph[id] = (int(x), int(y))
(graph_A, flag_pass_check) = (graph, flag_pass_check)
</DeepExtract>
if flag_pass_check is False:
continue
<DeepExtract>
num_elements = len(keypoints_B)
num_keypoints = num_elements / 3
assert num_keypoints == 15
(x0, y0, w, h) = bbox_B
flag_pass_check = True
graph = 15 * [(0, 0)]
for id in range(15):
x = keypoints_B[3 * id] - x0
y = keypoints_B[3 * id + 1] - y0
score = keypoints_B[3 * id + 2]
graph[id] = (int(x), int(y))
(graph_B, flag_pass_check) = (graph, flag_pass_check)
</DeepExtract>
if flag_pass_check is False:
continue
<DeepExtract>
img_path_root = '/export/guanghan/Data_2018/posetrack_data/'
img_path_A = os.path.join(img_path_root, candidate_dict_A['img_path'])
img_path_B = os.path.join(img_path_root, candidate_dict_B['img_path'])
sample_graph_pair = (graph_A, graph_B)
(data_A, data_B) = graph_pair_to_data(sample_graph_pair)
(flag_match, dist) = pose_matching(data_A, data_B)
match_str = 'Match' if flag_match else 'Not_Match'
if img_path_A == img_path_B:
img = cv2.imread(img_path_A)
print(img.shape)
pose_keypoints_2d = candidate_dict_A['keypoints']
joints = reshape_keypoints_into_joints(pose_keypoints_2d)
img = show_poses_from_python_data(img, joints, joint_pairs, joint_names)
pose_keypoints_2d = candidate_dict_B['keypoints']
joints = reshape_keypoints_into_joints(pose_keypoints_2d)
img = show_poses_from_python_data(img, joints, joint_pairs, joint_names)
bbox = candidate_dict_A['bbox']
track_id_A = candidate_dict_A['track_id']
font = cv2.FONT_HERSHEY_SIMPLEX
color = find_color_scalar('red')
cv2.putText(img, 'ID:{}, {}, dist:{:.2f}'.format(track_id_A, match_str, dist), (int(bbox[0]), int(bbox[1])), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
color = find_color_scalar('blue')
cv2.putText(img, 'Frame #: {}'.format(candidate_dict_A['img_id']), (30, 30), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
bbox = candidate_dict_B['bbox']
track_id_B = candidate_dict_B['track_id']
font = cv2.FONT_HERSHEY_SIMPLEX
color = find_color_scalar('red')
cv2.putText(img, 'ID:{}, {}, dist:{:.2f}'.format(track_id_B, match_str, dist), (int(bbox[0]), int(bbox[1])), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
color = find_color_scalar('blue')
(img, flag_match) = (img, flag_match)
</DeepExtract>
match_str = 'Match' if flag_match else 'Not_Match'
img_name = match_str + '_frame_' + str(candidate_dict_A['img_id']) + '_' + str(track_id_A) + '_' + str(track_id_B) + '.jpg'
img_path = os.path.join('/export/guanghan/temp2/', img_name)
cv2.imwrite(img_path, img)
return
|
def visualize_graph_pairs_from_json(json_file_path):
python_data = read_json_from_file(json_file_path)
num_imgs = len(python_data)
track_id_dict = {}
for track_id in range(100):
track_id_dict[track_id] = []
max_track_id = -1
for img_id in range(num_imgs):
image_id = python_data[img_id]['image']['id']
candidates = python_data[img_id]['candidates']
image_path = os.path.join(python_data[img_id]['image']['folder'], python_data[img_id]['image']['name'])
num_candidates = len(candidates)
for candidate_id in range(num_candidates):
candidate = candidates[candidate_id]
track_id = candidate['track_id']
keypoints = candidate['pose_keypoints_2d']
bbox = candidate['det_bbox']
if track_id > max_track_id:
max_track_id = track_id
candidate_dict = {'track_id': track_id, 'img_id': image_id, 'img_path': image_path, 'bbox': bbox, 'keypoints': keypoints}
track_id_dict[track_id].append(candidate_dict)
for img_id in range(num_imgs):
for track_id_A in range(max_track_id):
for track_id_B in range(max_track_id):
if track_id_A == track_id_B:
continue
index_list = []
for (i, dic) in enumerate(track_id_dict[track_id_A]):
if dic['img_id'] == img_id:
index_list.append(i)
candidate_A_index_list = index_list
index_list = []
for (i, dic) in enumerate(track_id_dict[track_id_B]):
if dic['img_id'] == img_id:
index_list.append(i)
candidate_B_index_list = index_list
if candidate_A_index_list == []:
continue
if candidate_B_index_list == []:
continue
index_A = candidate_A_index_list[0]
index_B = candidate_B_index_list[0]
candidate_dict_A = track_id_dict[track_id_A][index_A]
candidate_dict_B = track_id_dict[track_id_B][index_B]
keypoints_A = candidate_dict_A['keypoints']
keypoints_B = candidate_dict_B['keypoints']
bbox_A = candidate_dict_A['bbox']
bbox_B = candidate_dict_B['bbox']
if validate_bbox(bbox_A) is False:
continue
if validate_bbox(bbox_B) is False:
continue
num_elements = len(keypoints_A)
num_keypoints = num_elements / 3
assert num_keypoints == 15
(x0, y0, w, h) = bbox_A
flag_pass_check = True
graph = 15 * [(0, 0)]
for id in range(15):
x = keypoints_A[3 * id] - x0
y = keypoints_A[3 * id + 1] - y0
score = keypoints_A[3 * id + 2]
graph[id] = (int(x), int(y))
(graph_A, flag_pass_check) = (graph, flag_pass_check)
if flag_pass_check is False:
continue
num_elements = len(keypoints_B)
num_keypoints = num_elements / 3
assert num_keypoints == 15
(x0, y0, w, h) = bbox_B
flag_pass_check = True
graph = 15 * [(0, 0)]
for id in range(15):
x = keypoints_B[3 * id] - x0
y = keypoints_B[3 * id + 1] - y0
score = keypoints_B[3 * id + 2]
graph[id] = (int(x), int(y))
(graph_B, flag_pass_check) = (graph, flag_pass_check)
if flag_pass_check is False:
continue
img_path_root = '/export/guanghan/Data_2018/posetrack_data/'
img_path_A = os.path.join(img_path_root, candidate_dict_A['img_path'])
img_path_B = os.path.join(img_path_root, candidate_dict_B['img_path'])
sample_graph_pair = (graph_A, graph_B)
(data_A, data_B) = graph_pair_to_data(sample_graph_pair)
(flag_match, dist) = pose_matching(data_A, data_B)
match_str = 'Match' if flag_match else 'Not_Match'
if img_path_A == img_path_B:
img = cv2.imread(img_path_A)
print(img.shape)
pose_keypoints_2d = candidate_dict_A['keypoints']
joints = reshape_keypoints_into_joints(pose_keypoints_2d)
img = show_poses_from_python_data(img, joints, joint_pairs, joint_names)
pose_keypoints_2d = candidate_dict_B['keypoints']
joints = reshape_keypoints_into_joints(pose_keypoints_2d)
img = show_poses_from_python_data(img, joints, joint_pairs, joint_names)
bbox = candidate_dict_A['bbox']
track_id_A = candidate_dict_A['track_id']
font = cv2.FONT_HERSHEY_SIMPLEX
color = find_color_scalar('red')
cv2.putText(img, 'ID:{}, {}, dist:{:.2f}'.format(track_id_A, match_str, dist), (int(bbox[0]), int(bbox[1])), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
color = find_color_scalar('blue')
cv2.putText(img, 'Frame #: {}'.format(candidate_dict_A['img_id']), (30, 30), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
bbox = candidate_dict_B['bbox']
track_id_B = candidate_dict_B['track_id']
font = cv2.FONT_HERSHEY_SIMPLEX
color = find_color_scalar('red')
cv2.putText(img, 'ID:{}, {}, dist:{:.2f}'.format(track_id_B, match_str, dist), (int(bbox[0]), int(bbox[1])), font, fontScale=1, color=color, thickness=2, lineType=cv2.LINE_AA)
color = find_color_scalar('blue')
(img, flag_match) = (img, flag_match)
match_str = 'Match' if flag_match else 'Not_Match'
img_name = match_str + '_frame_' + str(candidate_dict_A['img_id']) + '_' + str(track_id_A) + '_' + str(track_id_B) + '.jpg'
img_path = os.path.join('/export/guanghan/temp2/', img_name)
cv2.imwrite(img_path, img)
return
|
cvToolkit
|
positive
|
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
<DeepExtract>
if not isinstance(value, str_cls):
raise TypeError(unwrap('\n %s value must be a unicode string, not %s\n ', type_name(self), type_name(value)))
if value.startswith('.'):
encoded_value = b'.' + value[1:].encode(self._encoding)
else:
encoded_value = value.encode(self._encoding)
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b''
</DeepExtract>
for extension in self['tbs_certificate']['extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
|
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap('\n %s value must be a unicode string, not %s\n ', type_name(self), type_name(value)))
if value.startswith('.'):
encoded_value = b'.' + value[1:].encode(self._encoding)
else:
encoded_value = value.encode(self._encoding)
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b''
for extension in self['tbs_certificate']['extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
|
asn1crypto
|
positive
|
def shades(in_col, number=10):
"""
Returns input color as well as shades of that color (darker colors).
number specifies how many new ones to return.
"""
assert is_rgb(in_col) or is_hex(in_col), 'Error, the input must be a hex color string or an RGB tuple.'
assert type(number) is int, 'Error, the input number must be an integer.'
assert 2 <= number and number <= 1000, 'Error, the input number must be between 2 and 1000'
<DeepExtract>
if type(in_col) is not str:
hex_color = False
regular_expression = re.compile('^\t\t\t\t\t\t\t#match beginning of string\n\t\t\t\t\t\t\t\t\t\t[#]{1} \t\t\t\t\t\t#exactly one hash\n\t\t\t\t\t\t\t\t\t\t[0-9a-fA-F]{6}\t\t\t\t#exactly six of the hex symbols 0 to 9, a to f (big or small)\n\t\t\t\t\t\t\t\t\t\t$\t\t\t\t\t\t\t#match end of string\n\t\t\t\t\t\t\t\t\t\t', re.VERBOSE)
if regular_expression.match(in_col) == None:
hex_color = False
else:
hex_color = True
</DeepExtract>
if hex_color:
<DeepExtract>
assert is_hex(in_col) is True, 'Error, %s is not a valid hex color.' % in_col
in_col = in_col.lstrip('#')
in_col = tuple([int(in_col[s:s + 2], 16) for s in range(0, len(in_col), 2)])
</DeepExtract>
(r, g, b) = in_col
(r, g, b) = [x / 255.0 for x in [r, g, b]]
(hue, lightness, saturation) = colorsys.rgb_to_hls(r, g, b)
step = lightness / float(number)
lightness_list = [lightness - step * s for s in range(1, number + 1)]
colors = [in_col]
colors.extend([[int(round(x * 255)) for x in colorsys.hls_to_rgb(hue, l, saturation)] for l in lightness_list])
if hex_color:
colors = [rgb_to_hex(tuple(s)) for s in colors]
return colors
|
def shades(in_col, number=10):
"""
Returns input color as well as shades of that color (darker colors).
number specifies how many new ones to return.
"""
assert is_rgb(in_col) or is_hex(in_col), 'Error, the input must be a hex color string or an RGB tuple.'
assert type(number) is int, 'Error, the input number must be an integer.'
assert 2 <= number and number <= 1000, 'Error, the input number must be between 2 and 1000'
if type(in_col) is not str:
hex_color = False
regular_expression = re.compile('^\t\t\t\t\t\t\t#match beginning of string\n\t\t\t\t\t\t\t\t\t\t[#]{1} \t\t\t\t\t\t#exactly one hash\n\t\t\t\t\t\t\t\t\t\t[0-9a-fA-F]{6}\t\t\t\t#exactly six of the hex symbols 0 to 9, a to f (big or small)\n\t\t\t\t\t\t\t\t\t\t$\t\t\t\t\t\t\t#match end of string\n\t\t\t\t\t\t\t\t\t\t', re.VERBOSE)
if regular_expression.match(in_col) == None:
hex_color = False
else:
hex_color = True
if hex_color:
assert is_hex(in_col) is True, 'Error, %s is not a valid hex color.' % in_col
in_col = in_col.lstrip('#')
in_col = tuple([int(in_col[s:s + 2], 16) for s in range(0, len(in_col), 2)])
(r, g, b) = in_col
(r, g, b) = [x / 255.0 for x in [r, g, b]]
(hue, lightness, saturation) = colorsys.rgb_to_hls(r, g, b)
step = lightness / float(number)
lightness_list = [lightness - step * s for s in range(1, number + 1)]
colors = [in_col]
colors.extend([[int(round(x * 255)) for x in colorsys.hls_to_rgb(hue, l, saturation)] for l in lightness_list])
if hex_color:
colors = [rgb_to_hex(tuple(s)) for s in colors]
return colors
|
DNApy
|
positive
|
def download_waymo(dst, delete_zips=True):
for split in split_list:
<DeepExtract>
remote_folder = f'waymo_open_dataset_v_1_0_0_individual_files/{split}'
</DeepExtract>
local_folder = download_gcloud(remote_folder, dst, note)
for zip_file in filter(lambda x: x.endswith('.tar'), os.listdir(local_folder)):
unzip(os.path.join(local_folder, zip_file), delete_zips=delete_zips)
print(f'Waymo dataset has been downloaded to {dst}')
|
def download_waymo(dst, delete_zips=True):
for split in split_list:
remote_folder = f'waymo_open_dataset_v_1_0_0_individual_files/{split}'
local_folder = download_gcloud(remote_folder, dst, note)
for zip_file in filter(lambda x: x.endswith('.tar'), os.listdir(local_folder)):
unzip(os.path.join(local_folder, zip_file), delete_zips=delete_zips)
print(f'Waymo dataset has been downloaded to {dst}')
|
3D_adapt_auto_driving
|
positive
|
def __init__(self, name, subcon, setflags=0, clearflags=0):
Construct.__init__(self, name, subcon.conflags)
self.subcon = subcon
<DeepExtract>
self.conflags |= setflags
</DeepExtract>
<DeepExtract>
self.conflags &= ~clearflags
</DeepExtract>
|
def __init__(self, name, subcon, setflags=0, clearflags=0):
Construct.__init__(self, name, subcon.conflags)
self.subcon = subcon
self.conflags |= setflags
self.conflags &= ~clearflags
</DeepExtract>
|
ARMV8_Simulator
|
positive
|
def __mul__(self, other):
<DeepExtract>
if type(other) in [float, int, torch.Tensor, np.array, np.ndarray]:
other = Constant(other, self.cuda)
other = other
</DeepExtract>
return Fn('*', lambda a, b: a * b, self, other)
|
def __mul__(self, other):
if type(other) in [float, int, torch.Tensor, np.array, np.ndarray]:
other = Constant(other, self.cuda)
other = other
return Fn('*', lambda a, b: a * b, self, other)
|
dl2
|
positive
|
def _subset_table(hdf5_biom, json_table_str, axis, ids):
if axis not in ['sample', 'observation']:
raise ValueError("Invalid axis '%s'. Must be either 'sample' or 'observation'." % axis)
if hdf5_biom is None and json_table_str is None:
raise ValueError('Must specify an input table')
elif hdf5_biom is not None and json_table_str is not None:
raise ValueError('Can only specify one input table')
if json_table_str is not None:
(idxs, new_axis_md) = get_axis_indices(json_table_str, ids, axis)
new_data = direct_slice_data(json_table_str, idxs, axis)
def subset_generator():
yield '{'
yield direct_parse_key(json_table_str, 'id')
yield ','
yield direct_parse_key(json_table_str, 'format')
yield ','
yield direct_parse_key(json_table_str, 'format_url')
yield ','
yield direct_parse_key(json_table_str, 'type')
yield ','
yield direct_parse_key(json_table_str, 'generated_by')
yield ','
yield direct_parse_key(json_table_str, 'date')
yield ','
yield direct_parse_key(json_table_str, 'matrix_type')
yield ','
yield direct_parse_key(json_table_str, 'matrix_element_type')
yield ','
yield new_data
yield ','
yield new_axis_md
yield ','
if axis == 'observation':
yield direct_parse_key(json_table_str, 'columns')
else:
yield direct_parse_key(json_table_str, 'rows')
yield '}'
format_ = 'json'
<DeepExtract>
yield '{'
yield direct_parse_key(json_table_str, 'id')
yield ','
yield direct_parse_key(json_table_str, 'format')
yield ','
yield direct_parse_key(json_table_str, 'format_url')
yield ','
yield direct_parse_key(json_table_str, 'type')
yield ','
yield direct_parse_key(json_table_str, 'generated_by')
yield ','
yield direct_parse_key(json_table_str, 'date')
yield ','
yield direct_parse_key(json_table_str, 'matrix_type')
yield ','
yield direct_parse_key(json_table_str, 'matrix_element_type')
yield ','
yield new_data
yield ','
yield new_axis_md
yield ','
if axis == 'observation':
yield direct_parse_key(json_table_str, 'columns')
else:
yield direct_parse_key(json_table_str, 'rows')
yield '}'
</DeepExtract>
else:
with biom_open(hdf5_biom) as f:
table = Table.from_hdf5(f, ids=ids, axis=axis)
format_ = 'hdf5'
return (table, format_)
|
def _subset_table(hdf5_biom, json_table_str, axis, ids):
if axis not in ['sample', 'observation']:
raise ValueError("Invalid axis '%s'. Must be either 'sample' or 'observation'." % axis)
if hdf5_biom is None and json_table_str is None:
raise ValueError('Must specify an input table')
elif hdf5_biom is not None and json_table_str is not None:
raise ValueError('Can only specify one input table')
if json_table_str is not None:
(idxs, new_axis_md) = get_axis_indices(json_table_str, ids, axis)
new_data = direct_slice_data(json_table_str, idxs, axis)
def subset_generator():
yield '{'
yield direct_parse_key(json_table_str, 'id')
yield ','
yield direct_parse_key(json_table_str, 'format')
yield ','
yield direct_parse_key(json_table_str, 'format_url')
yield ','
yield direct_parse_key(json_table_str, 'type')
yield ','
yield direct_parse_key(json_table_str, 'generated_by')
yield ','
yield direct_parse_key(json_table_str, 'date')
yield ','
yield direct_parse_key(json_table_str, 'matrix_type')
yield ','
yield direct_parse_key(json_table_str, 'matrix_element_type')
yield ','
yield new_data
yield ','
yield new_axis_md
yield ','
if axis == 'observation':
yield direct_parse_key(json_table_str, 'columns')
else:
yield direct_parse_key(json_table_str, 'rows')
yield '}'
format_ = 'json'
yield '{'
yield direct_parse_key(json_table_str, 'id')
yield ','
yield direct_parse_key(json_table_str, 'format')
yield ','
yield direct_parse_key(json_table_str, 'format_url')
yield ','
yield direct_parse_key(json_table_str, 'type')
yield ','
yield direct_parse_key(json_table_str, 'generated_by')
yield ','
yield direct_parse_key(json_table_str, 'date')
yield ','
yield direct_parse_key(json_table_str, 'matrix_type')
yield ','
yield direct_parse_key(json_table_str, 'matrix_element_type')
yield ','
yield new_data
yield ','
yield new_axis_md
yield ','
if axis == 'observation':
yield direct_parse_key(json_table_str, 'columns')
else:
yield direct_parse_key(json_table_str, 'rows')
yield '}'
else:
with biom_open(hdf5_biom) as f:
table = Table.from_hdf5(f, ids=ids, axis=axis)
format_ = 'hdf5'
return (table, format_)
|
biom-format
|
positive
|
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
<DeepExtract>
assert x.dim() == 2
if self.expand_for_syncbn:
x = self.bn0(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = self.bn0(x)
x = x
</DeepExtract>
else:
x = self.bn0(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc1(x)
x = self.relu(x)
return [x]
|
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
if self.sync_bn:
assert x.dim() == 2
if self.expand_for_syncbn:
x = self.bn0(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1)
else:
x = self.bn0(x)
x = x
else:
x = self.bn0(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc1(x)
x = self.relu(x)
return [x]
|
DenseCL
|
positive
|
def nvmlSystemGetNVMLVersion():
c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)
<DeepExtract>
global nvmlLib
if 'nvmlSystemGetNVMLVersion' in _nvmlGetFunctionPointer_cache:
fn = _nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion']
libLoadLock.acquire()
try:
if nvmlLib == None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion'] = getattr(nvmlLib, 'nvmlSystemGetNVMLVersion')
fn = _nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion']
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
libLoadLock.release()
</DeepExtract>
ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE))
<DeepExtract>
if ret != NVML_SUCCESS:
raise NVMLError(ret)
return ret
</DeepExtract>
return c_version.value
|
def nvmlSystemGetNVMLVersion():
c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)
global nvmlLib
if 'nvmlSystemGetNVMLVersion' in _nvmlGetFunctionPointer_cache:
fn = _nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion']
libLoadLock.acquire()
try:
if nvmlLib == None:
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion'] = getattr(nvmlLib, 'nvmlSystemGetNVMLVersion')
fn = _nvmlGetFunctionPointer_cache['nvmlSystemGetNVMLVersion']
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
libLoadLock.release()
ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE))
if ret != NVML_SUCCESS:
raise NVMLError(ret)
return ret
return c_version.value
|
DeepFaceLab_Linux
|
positive
|
def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, target_means, target_stds, cfg, label_channels=1, sampling=True, unmap_outputs=True):
<DeepExtract>
(img_h, img_w) = img_meta['img_shape'][:2][:2]
if cfg.allowed_border >= 0:
inside_flags = valid_flags & (flat_anchors[:, 0] >= -cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 1] >= -cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 2] < img_w + cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 3] < img_h + cfg.allowed_border).type(torch.uint8)
else:
inside_flags = valid_flags
inside_flags = inside_flags
</DeepExtract>
if not inside_flags.any():
return (None,) * 6
anchors = flat_anchors[inside_flags.type(torch.bool), :]
if sampling:
(assign_result, sampling_result) = assign_and_sample(anchors, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_ignore, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, anchors, gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means, target_stds)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
<DeepExtract>
if labels.dim() == 1:
ret = labels.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = labels
else:
new_size = (num_total_anchors,) + labels.size()[1:]
ret = labels.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = labels
labels = ret
</DeepExtract>
<DeepExtract>
if label_weights.dim() == 1:
ret = label_weights.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = label_weights
else:
new_size = (num_total_anchors,) + label_weights.size()[1:]
ret = label_weights.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = label_weights
label_weights = ret
</DeepExtract>
<DeepExtract>
if bbox_targets.dim() == 1:
ret = bbox_targets.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = bbox_targets
else:
new_size = (num_total_anchors,) + bbox_targets.size()[1:]
ret = bbox_targets.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = bbox_targets
bbox_targets = ret
</DeepExtract>
<DeepExtract>
if bbox_weights.dim() == 1:
ret = bbox_weights.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = bbox_weights
else:
new_size = (num_total_anchors,) + bbox_weights.size()[1:]
ret = bbox_weights.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = bbox_weights
bbox_weights = ret
</DeepExtract>
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds)
|
def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, target_means, target_stds, cfg, label_channels=1, sampling=True, unmap_outputs=True):
(img_h, img_w) = img_meta['img_shape'][:2][:2]
if cfg.allowed_border >= 0:
inside_flags = valid_flags & (flat_anchors[:, 0] >= -cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 1] >= -cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 2] < img_w + cfg.allowed_border).type(torch.uint8) & (flat_anchors[:, 3] < img_h + cfg.allowed_border).type(torch.uint8)
else:
inside_flags = valid_flags
inside_flags = inside_flags
if not inside_flags.any():
return (None,) * 6
anchors = flat_anchors[inside_flags.type(torch.bool), :]
if sampling:
(assign_result, sampling_result) = assign_and_sample(anchors, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_ignore, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, anchors, gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means, target_stds)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
if labels.dim() == 1:
ret = labels.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = labels
else:
new_size = (num_total_anchors,) + labels.size()[1:]
ret = labels.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = labels
labels = ret
if label_weights.dim() == 1:
ret = label_weights.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = label_weights
else:
new_size = (num_total_anchors,) + label_weights.size()[1:]
ret = label_weights.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = label_weights
label_weights = ret
if bbox_targets.dim() == 1:
ret = bbox_targets.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = bbox_targets
else:
new_size = (num_total_anchors,) + bbox_targets.size()[1:]
ret = bbox_targets.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = bbox_targets
bbox_targets = ret
if bbox_weights.dim() == 1:
ret = bbox_weights.new_full((num_total_anchors,), fill)
ret[inside_flags.type(torch.bool)] = bbox_weights
else:
new_size = (num_total_anchors,) + bbox_weights.size()[1:]
ret = bbox_weights.new_full(new_size, fill)
ret[inside_flags.type(torch.bool), :] = bbox_weights
bbox_weights = ret
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds)
|
D2Det
|
positive
|
def write_status_from(conf, **status):
""" if a status_file is known then path is created and the
give status is written as the only content. """
<DeepExtract>
if default is None:
default = self.default_status_file(conf)
if conf is None:
status_file = default
status_file = conf.get('Service', 'StatusFile', default)
status_file = self.expand_special(status_file, conf)
</DeepExtract>
if not status_file:
logg.debug('status %s but no status_file', conf.name())
return False
dirpath = os.path.dirname(os.path.abspath(status_file))
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if conf.status is None:
<DeepExtract>
status_file = self.status_file_from(conf)
status = {}
if hasattr(defaults, 'keys'):
for key in defaults.keys():
status[key] = defaults[key]
elif isinstance(defaults, string_types):
status['ActiveState'] = defaults
if not status_file:
logg.debug('no status file. returning %s', status)
conf.status = status
if not os.path.isfile(status_file):
logg.debug('no status file: %s\n returning %s', status_file, status)
conf.status = status
if self.truncate_old(status_file):
logg.debug('old status file: %s\n returning %s', status_file, status)
conf.status = status
try:
logg.debug('reading %s', status_file)
for line in open(status_file):
if line.strip():
m = re.match('(\\w+)[:=](.*)', line)
if m:
(key, value) = (m.group(1), m.group(2))
if key.strip():
status[key.strip()] = value.strip()
elif line in ['active', 'inactive', 'failed']:
status['ActiveState'] = line
else:
logg.warning('ignored %s', line.strip())
except:
logg.warning("bad read of status file '%s'", status_file)
conf.status = status
</DeepExtract>
if True:
for key in sorted(status.keys()):
value = status[key]
if key.upper() == 'AS':
key = 'ActiveState'
if key.upper() == 'EXIT':
key = 'ExecMainCode'
if value is None:
try:
del conf.status[key]
except KeyError:
pass
else:
conf.status[key] = value
try:
with open(status_file, 'w') as f:
for key in sorted(conf.status):
value = conf.status[key]
if key == 'MainPID' and str(value) == '0':
logg.warning('ignore writing MainPID=0')
continue
content = '{}={}\n'.format(key, str(value))
logg.debug('writing to %s\n\t%s', status_file, content.strip())
f.write(content)
except IOError as e:
logg.error('writing STATUS %s: %s\n\t to status file %s', status, e, status_file)
return True
|
def write_status_from(conf, **status):
""" if a status_file is known then path is created and the
give status is written as the only content. """
if default is None:
default = self.default_status_file(conf)
if conf is None:
status_file = default
status_file = conf.get('Service', 'StatusFile', default)
status_file = self.expand_special(status_file, conf)
if not status_file:
logg.debug('status %s but no status_file', conf.name())
return False
dirpath = os.path.dirname(os.path.abspath(status_file))
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
if conf.status is None:
status_file = self.status_file_from(conf)
status = {}
if hasattr(defaults, 'keys'):
for key in defaults.keys():
status[key] = defaults[key]
elif isinstance(defaults, string_types):
status['ActiveState'] = defaults
if not status_file:
logg.debug('no status file. returning %s', status)
conf.status = status
if not os.path.isfile(status_file):
logg.debug('no status file: %s\n returning %s', status_file, status)
conf.status = status
if self.truncate_old(status_file):
logg.debug('old status file: %s\n returning %s', status_file, status)
conf.status = status
try:
logg.debug('reading %s', status_file)
for line in open(status_file):
if line.strip():
m = re.match('(\\w+)[:=](.*)', line)
if m:
(key, value) = (m.group(1), m.group(2))
if key.strip():
status[key.strip()] = value.strip()
elif line in ['active', 'inactive', 'failed']:
status['ActiveState'] = line
else:
logg.warning('ignored %s', line.strip())
except:
logg.warning("bad read of status file '%s'", status_file)
conf.status = status
if True:
for key in sorted(status.keys()):
value = status[key]
if key.upper() == 'AS':
key = 'ActiveState'
if key.upper() == 'EXIT':
key = 'ExecMainCode'
if value is None:
try:
del conf.status[key]
except KeyError:
pass
else:
conf.status[key] = value
try:
with open(status_file, 'w') as f:
for key in sorted(conf.status):
value = conf.status[key]
if key == 'MainPID' and str(value) == '0':
logg.warning('ignore writing MainPID=0')
continue
content = '{}={}\n'.format(key, str(value))
logg.debug('writing to %s\n\t%s', status_file, content.strip())
f.write(content)
except IOError as e:
logg.error('writing STATUS %s: %s\n\t to status file %s', status, e, status_file)
return True
|
deployment
|
positive
|
@never_cache
def get(self, request, *args, **kwargs):
<DeepExtract>
self.widgets = self._widgets.values()
</DeepExtract>
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
|
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self._widgets.values()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
|
book
|
positive
|
def test_mixed_case_checksums(self):
logger.info(self.getTestHeader(sys._getframe().f_code.co_name))
bag = bagit.make_bag(self.tmpdir, checksums=['md5'])
hashstr = {}
for key in bag.entries.keys():
if key.startswith('data' + os.sep):
hashstr = bag.entries[key]
hashstr = next(iter(hashstr.values()))
<DeepExtract>
with bagit.open_text_file(j(self.tmpdir, 'manifest-md5.txt')) as f:
manifest = f.read()
</DeepExtract>
manifest = manifest.replace(hashstr, hashstr.upper())
with open(j(self.tmpdir, 'manifest-md5.txt'), 'wb') as m:
m.write(manifest.encode('utf-8'))
hasher = hashlib.new('md5')
contents = slurp_text_file(j(self.tmpdir, 'manifest-md5.txt')).encode('utf-8')
hasher.update(contents)
with open(j(self.tmpdir, 'tagmanifest-md5.txt'), 'r') as tagmanifest:
tagman_contents = tagmanifest.read()
tagman_contents = tagman_contents.replace(bag.entries['manifest-md5.txt']['md5'], hasher.hexdigest())
with open(j(self.tmpdir, 'tagmanifest-md5.txt'), 'w') as tagmanifest:
tagmanifest.write(tagman_contents)
bag = bagit.BDBag(self.tmpdir)
self.assertTrue(self.validate(bag))
|
def test_mixed_case_checksums(self):
logger.info(self.getTestHeader(sys._getframe().f_code.co_name))
bag = bagit.make_bag(self.tmpdir, checksums=['md5'])
hashstr = {}
for key in bag.entries.keys():
if key.startswith('data' + os.sep):
hashstr = bag.entries[key]
hashstr = next(iter(hashstr.values()))
with bagit.open_text_file(j(self.tmpdir, 'manifest-md5.txt')) as f:
manifest = f.read()
manifest = manifest.replace(hashstr, hashstr.upper())
with open(j(self.tmpdir, 'manifest-md5.txt'), 'wb') as m:
m.write(manifest.encode('utf-8'))
hasher = hashlib.new('md5')
contents = slurp_text_file(j(self.tmpdir, 'manifest-md5.txt')).encode('utf-8')
hasher.update(contents)
with open(j(self.tmpdir, 'tagmanifest-md5.txt'), 'r') as tagmanifest:
tagman_contents = tagmanifest.read()
tagman_contents = tagman_contents.replace(bag.entries['manifest-md5.txt']['md5'], hasher.hexdigest())
with open(j(self.tmpdir, 'tagmanifest-md5.txt'), 'w') as tagmanifest:
tagmanifest.write(tagman_contents)
bag = bagit.BDBag(self.tmpdir)
self.assertTrue(self.validate(bag))
|
bdbag
|
positive
|
def processor(dircetory, subset, force_process):
""" download and process """
librispeech_urls = SUBSETS
if subset not in librispeech_urls:
raise ValueError(subset, 'is not in Librispeech')
subset_csv = os.path.join(dircetory, subset + '.csv')
if not force_process and os.path.exists(subset_csv):
return subset_csv
dataset_dir = os.path.join(dircetory, subset)
logging.info('Downloading and process the librispeech in %s' % dataset_dir)
logging.info('Preparing dataset %s' % subset)
<DeepExtract>
if not gfile.Exists(dataset_dir):
gfile.MakeDirs(dataset_dir)
(_, tar_filepath) = tempfile.mkstemp(suffix='.tar.gz')
try:
logging.info('Downloading %s to %s' % (librispeech_urls[subset], tar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading {} {:.1f}%'.format(tar_filepath, 100.0 * count * block_size / total_size))
sys.stdout.flush()
urllib.request.urlretrieve(librispeech_urls[subset], tar_filepath, _progress)
statinfo = os.stat(tar_filepath)
logging.info('Successfully downloaded %s, size(bytes): %d' % (librispeech_urls[subset], statinfo.st_size))
with tarfile.open(tar_filepath, 'r') as tar:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(dataset_dir)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tar, dataset_dir)
finally:
gfile.Remove(tar_filepath)
</DeepExtract>
<DeepExtract>
logging.info('Processing audio and transcript for %s' % subset)
source_dir = os.path.join(dataset_dir + '/LibriSpeech', subset)
target_dir = os.path.join(dataset_dir + '/LibriSpeech', subset + '-wav')
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
files = []
tfm = Transformer()
for (root, _, filenames) in gfile.Walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_file = os.path.join(root, filename)
with codecs.open(trans_file, 'r', 'utf-8') as fin:
for line in fin:
(seqid, transcript) = line.split(' ', 1)
transcript = unicodedata.normalize('NFKD', transcript).encode('ascii', 'ignore').decode('ascii', 'ignore').strip().lower()
flac_file = os.path.join(root, seqid + '.flac')
wav_file = os.path.join(target_dir, seqid + '.wav')
if not gfile.Exists(wav_file):
tfm.build(flac_file, wav_file)
wav_length = get_wave_file_length(wav_file)
speaker = seqid.split('-')[0]
files.append((os.path.abspath(wav_file), wav_length, transcript, speaker))
csv_file_path = os.path.join(dircetory, subset + '.csv')
df = pandas.DataFrame(data=files, columns=['wav_filename', 'wav_length_ms', 'transcript', 'speaker'])
df.to_csv(csv_file_path, index=False, sep='\t')
logging.info('Successfully generated csv file {}'.format(csv_file_path))
</DeepExtract>
logging.info('Finished downloading and processing')
return subset_csv
|
def processor(dircetory, subset, force_process):
""" download and process """
librispeech_urls = SUBSETS
if subset not in librispeech_urls:
raise ValueError(subset, 'is not in Librispeech')
subset_csv = os.path.join(dircetory, subset + '.csv')
if not force_process and os.path.exists(subset_csv):
return subset_csv
dataset_dir = os.path.join(dircetory, subset)
logging.info('Downloading and process the librispeech in %s' % dataset_dir)
logging.info('Preparing dataset %s' % subset)
if not gfile.Exists(dataset_dir):
gfile.MakeDirs(dataset_dir)
(_, tar_filepath) = tempfile.mkstemp(suffix='.tar.gz')
try:
logging.info('Downloading %s to %s' % (librispeech_urls[subset], tar_filepath))
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading {} {:.1f}%'.format(tar_filepath, 100.0 * count * block_size / total_size))
sys.stdout.flush()
urllib.request.urlretrieve(librispeech_urls[subset], tar_filepath, _progress)
statinfo = os.stat(tar_filepath)
logging.info('Successfully downloaded %s, size(bytes): %d' % (librispeech_urls[subset], statinfo.st_size))
with tarfile.open(tar_filepath, 'r') as tar:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(dataset_dir)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path='.', members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception('Attempted Path Traversal in Tar File')
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(tar, dataset_dir)
finally:
gfile.Remove(tar_filepath)
logging.info('Processing audio and transcript for %s' % subset)
source_dir = os.path.join(dataset_dir + '/LibriSpeech', subset)
target_dir = os.path.join(dataset_dir + '/LibriSpeech', subset + '-wav')
if not gfile.Exists(target_dir):
gfile.MakeDirs(target_dir)
files = []
tfm = Transformer()
for (root, _, filenames) in gfile.Walk(source_dir):
for filename in fnmatch.filter(filenames, '*.trans.txt'):
trans_file = os.path.join(root, filename)
with codecs.open(trans_file, 'r', 'utf-8') as fin:
for line in fin:
(seqid, transcript) = line.split(' ', 1)
transcript = unicodedata.normalize('NFKD', transcript).encode('ascii', 'ignore').decode('ascii', 'ignore').strip().lower()
flac_file = os.path.join(root, seqid + '.flac')
wav_file = os.path.join(target_dir, seqid + '.wav')
if not gfile.Exists(wav_file):
tfm.build(flac_file, wav_file)
wav_length = get_wave_file_length(wav_file)
speaker = seqid.split('-')[0]
files.append((os.path.abspath(wav_file), wav_length, transcript, speaker))
csv_file_path = os.path.join(dircetory, subset + '.csv')
df = pandas.DataFrame(data=files, columns=['wav_filename', 'wav_length_ms', 'transcript', 'speaker'])
df.to_csv(csv_file_path, index=False, sep='\t')
logging.info('Successfully generated csv file {}'.format(csv_file_path))
logging.info('Finished downloading and processing')
return subset_csv
|
athena
|
positive
|
def patients_address_as_of(self, date, returning=None, round_to_nearest=None):
if returning == 'index_of_multiple_deprivation':
assert round_to_nearest == 100
column = 'ImdRankRounded'
elif returning == 'rural_urban_classification':
assert round_to_nearest is None
column = 'RuralUrbanClassificationCode'
elif returning == 'msoa':
column = 'MSOACode'
else:
raise ValueError(f'Unsupported `returning` value: {returning}')
<DeepExtract>
all_join_tables = set()
sql_expressions = []
for date_expression in date_expressions:
assert date_expression is not None
(sql_expression, join_tables) = self.date_ref_to_sql_expr(date_expression)
sql_expressions.append(sql_expression)
all_join_tables.update(join_tables)
joins = [f"LEFT JOIN {join_table}\nON {join_table}.patient_id = {'PatientAddress'}.patient_id" for join_table in all_join_tables]
join_str = '\n'.join(joins)
(date_sql, date_joins) = (*sql_expressions, join_str)
</DeepExtract>
return f"\n SELECT\n t.Patient_ID AS patient_id,\n {column} AS {returning}\n FROM (\n SELECT PatientAddress.Patient_ID, {column},\n ROW_NUMBER() OVER (\n PARTITION BY PatientAddress.Patient_ID\n ORDER BY\n StartDate DESC,\n EndDate DESC,\n IIF(MSOACode = 'NPC', 1, 0),\n PatientAddress_ID\n ) AS rownum\n FROM PatientAddress\n {date_joins}\n WHERE StartDate <= {date_sql} AND EndDate > {date_sql}\n ) t\n WHERE rownum = 1\n "
|
def patients_address_as_of(self, date, returning=None, round_to_nearest=None):
if returning == 'index_of_multiple_deprivation':
assert round_to_nearest == 100
column = 'ImdRankRounded'
elif returning == 'rural_urban_classification':
assert round_to_nearest is None
column = 'RuralUrbanClassificationCode'
elif returning == 'msoa':
column = 'MSOACode'
else:
raise ValueError(f'Unsupported `returning` value: {returning}')
all_join_tables = set()
sql_expressions = []
for date_expression in date_expressions:
assert date_expression is not None
(sql_expression, join_tables) = self.date_ref_to_sql_expr(date_expression)
sql_expressions.append(sql_expression)
all_join_tables.update(join_tables)
joins = [f"LEFT JOIN {join_table}\nON {join_table}.patient_id = {'PatientAddress'}.patient_id" for join_table in all_join_tables]
join_str = '\n'.join(joins)
(date_sql, date_joins) = (*sql_expressions, join_str)
return f"\n SELECT\n t.Patient_ID AS patient_id,\n {column} AS {returning}\n FROM (\n SELECT PatientAddress.Patient_ID, {column},\n ROW_NUMBER() OVER (\n PARTITION BY PatientAddress.Patient_ID\n ORDER BY\n StartDate DESC,\n EndDate DESC,\n IIF(MSOACode = 'NPC', 1, 0),\n PatientAddress_ID\n ) AS rownum\n FROM PatientAddress\n {date_joins}\n WHERE StartDate <= {date_sql} AND EndDate > {date_sql}\n ) t\n WHERE rownum = 1\n "
|
cohort-extractor
|
positive
|
def __init__(self, radius=0, angle=None, distance=None, speed=None, angspeed=None, **kwargs):
if isinstance(radius, cozmo.util.Distance):
radius = radius.distance_mm
if isinstance(angle, cozmo.util.Angle):
angle = angle.degrees
if isinstance(speed, cozmo.util.Speed):
speed = speed.speed_mmps
if isinstance(angspeed, cozmo.util.Angle):
angspeed = angspeed.degrees
<DeepExtract>
if radius != 0:
if angle is not None:
pass
elif distance is not None:
angle = self.dist2ang(distance, radius)
else:
raise ValueError('DriveArc requires an angle or distance.')
if speed is not None:
pass
elif angspeed is not None:
speed = self.ang2dist(angspeed, radius)
else:
speed = 40
if angle < 0:
speed = -speed
self.angle = angle
self.l_wheel_speed = speed * (1 - wheelbase / radius)
self.r_wheel_speed = speed * (1 + wheelbase / radius)
else:
if angspeed is None:
angspeed = 40
s = angspeed
if angle < 0:
s = -s
self.angle = angle
self.l_wheel_speed = -s
self.r_wheel_speed = s
</DeepExtract>
super().__init__(self.l_wheel_speed, self.r_wheel_speed, **kwargs)
self.polling_interval = 0.05
|
def __init__(self, radius=0, angle=None, distance=None, speed=None, angspeed=None, **kwargs):
if isinstance(radius, cozmo.util.Distance):
radius = radius.distance_mm
if isinstance(angle, cozmo.util.Angle):
angle = angle.degrees
if isinstance(speed, cozmo.util.Speed):
speed = speed.speed_mmps
if isinstance(angspeed, cozmo.util.Angle):
angspeed = angspeed.degrees
if radius != 0:
if angle is not None:
pass
elif distance is not None:
angle = self.dist2ang(distance, radius)
else:
raise ValueError('DriveArc requires an angle or distance.')
if speed is not None:
pass
elif angspeed is not None:
speed = self.ang2dist(angspeed, radius)
else:
speed = 40
if angle < 0:
speed = -speed
self.angle = angle
self.l_wheel_speed = speed * (1 - wheelbase / radius)
self.r_wheel_speed = speed * (1 + wheelbase / radius)
else:
if angspeed is None:
angspeed = 40
s = angspeed
if angle < 0:
s = -s
self.angle = angle
self.l_wheel_speed = -s
self.r_wheel_speed = s
super().__init__(self.l_wheel_speed, self.r_wheel_speed, **kwargs)
self.polling_interval = 0.05
|
cozmo-tools
|
positive
|
def build_importer(self, collection_path: str, resampled_path: str, temporary_directory_path: str, transcription_json_file_path: str, get_config_callback, set_config_callback, settings_change_callback: SettingsChangeCallback) -> DataTransformer:
if not Path(collection_path).is_dir():
raise RuntimeError('path to collection does not exist')
if not Path(resampled_path).is_dir():
raise RuntimeError('path to the resampled directory does not exist')
if not Path(temporary_directory_path).is_dir():
raise RuntimeError('path to temporary directory does not exist')
<DeepExtract>
settings = json.loads(json.dumps(self._import_settings))
</DeepExtract>
ui = {'data': self._import_ui_data_config, 'type': self._import_ui_type_config, 'order': self._import_ui_order_config}
dt = DataTransformer(self._name, settings, ui, get_config_callback, set_config_callback, settings_change_callback)
def reset_annotations():
nonlocal dt
dt._annotation_store = {}
return
def add_annotation(id, obj):
nonlocal dt
if type(obj) != dict:
raise TypeError('annotation top level variable must be a dictionary')
fields = {'audio_file_name', 'transcript', 'start_ms', 'stop_ms', 'speaker_id'}
if set(obj.keys()) != fields:
raise TypeError('annotation object contains an incorrect field name')
if id in dt._annotation_store:
dt._annotation_store[id].append(obj)
else:
dt._annotation_store[id] = [obj]
return
add_audio = lambda id, audio_path: dt._audio_store.update({id: audio_path})
if self._import_directory_callback is not None:
f = self._import_directory_callback
def wrapper():
nonlocal dt
nonlocal f
nonlocal collection_path
nonlocal reset_annotations
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
return f(collection_path, copyJSONable(dt.get_settings()), reset_annotations, add_annotation, add_audio, temporary_directory_path)
setattr(dt, self._obj_to_attr_name[f], wrapper)
obj_to_attr_name = self._obj_to_attr_name
def import_directory_process():
nonlocal dt
nonlocal collection_path
nonlocal resampled_path
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
nonlocal f
nonlocal obj_to_attr_name
callback_name = obj_to_attr_name[f]
callback = getattr(dt, callback_name)
callback()
with Path(transcription_json_file_path).open(mode='w') as fout:
annotations = []
for id in dt._annotation_store:
annotations.extend(dt._annotation_store[id])
fout.write(json.dumps(annotations))
return
setattr(dt, 'process', import_directory_process)
else:
for (_ext, f) in self._import_extension_callbacks.items():
def wrapper(file_paths: str):
"""
Attribute that is assigned to the DataTransformer. This
Handler must only import the given files.
"""
nonlocal dt
nonlocal f
nonlocal reset_annotations
nonlocal add_annotation
nonlocal temporary_directory_path
return f(file_paths, copyJSONable(dt.get_settings()), reset_annotations, add_annotation, temporary_directory_path)
setattr(dt, self._obj_to_attr_name[f], wrapper)
audio_processing_callback = self._audio_processing_callback
if audio_processing_callback is None:
audio_processing_callback = _default_audio_resampler
import_extension_callbacks = self._import_extension_callbacks
audio_extention = self._audio_extention
def import_files_process():
"""
Handler that is set to the .process() function.
"""
nonlocal dt
nonlocal collection_path
nonlocal resampled_path
nonlocal reset_annotations
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
nonlocal audio_processing_callback
nonlocal import_extension_callbacks
nonlocal audio_extention
extention_to_files: FilteredPathList = _filter_files_by_extention(collection_path)
if audio_extention in extention_to_files:
audio_paths: PathList = extention_to_files.pop(audio_extention)
audio_processing_callback(audio_paths, resampled_path, add_audio, temporary_directory_path)
for (extention, file_paths) in extention_to_files.items():
callback = import_extension_callbacks.get(extention, None)
if callback is not None:
callback(file_paths, dt.get_settings(), reset_annotations, add_annotation, temporary_directory_path)
with Path(transcription_json_file_path).open(mode='w') as fout:
annotations = []
for id in dt._annotation_store:
annotations.extend(dt._annotation_store[id])
fout.write(json.dumps(annotations))
return
setattr(dt, 'process', import_files_process)
for (ext, f) in self._import_file_validator_callback.items():
dt._validaters[ext] = f
dt._ui_updater = self._update_ui_callback
dt._extentions = self._import_file_validator_callback.keys()
return dt
|
def build_importer(self, collection_path: str, resampled_path: str, temporary_directory_path: str, transcription_json_file_path: str, get_config_callback, set_config_callback, settings_change_callback: SettingsChangeCallback) -> DataTransformer:
if not Path(collection_path).is_dir():
raise RuntimeError('path to collection does not exist')
if not Path(resampled_path).is_dir():
raise RuntimeError('path to the resampled directory does not exist')
if not Path(temporary_directory_path).is_dir():
raise RuntimeError('path to temporary directory does not exist')
settings = json.loads(json.dumps(self._import_settings))
ui = {'data': self._import_ui_data_config, 'type': self._import_ui_type_config, 'order': self._import_ui_order_config}
dt = DataTransformer(self._name, settings, ui, get_config_callback, set_config_callback, settings_change_callback)
def reset_annotations():
nonlocal dt
dt._annotation_store = {}
return
def add_annotation(id, obj):
nonlocal dt
if type(obj) != dict:
raise TypeError('annotation top level variable must be a dictionary')
fields = {'audio_file_name', 'transcript', 'start_ms', 'stop_ms', 'speaker_id'}
if set(obj.keys()) != fields:
raise TypeError('annotation object contains an incorrect field name')
if id in dt._annotation_store:
dt._annotation_store[id].append(obj)
else:
dt._annotation_store[id] = [obj]
return
add_audio = lambda id, audio_path: dt._audio_store.update({id: audio_path})
if self._import_directory_callback is not None:
f = self._import_directory_callback
def wrapper():
nonlocal dt
nonlocal f
nonlocal collection_path
nonlocal reset_annotations
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
return f(collection_path, copyJSONable(dt.get_settings()), reset_annotations, add_annotation, add_audio, temporary_directory_path)
setattr(dt, self._obj_to_attr_name[f], wrapper)
obj_to_attr_name = self._obj_to_attr_name
def import_directory_process():
nonlocal dt
nonlocal collection_path
nonlocal resampled_path
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
nonlocal f
nonlocal obj_to_attr_name
callback_name = obj_to_attr_name[f]
callback = getattr(dt, callback_name)
callback()
with Path(transcription_json_file_path).open(mode='w') as fout:
annotations = []
for id in dt._annotation_store:
annotations.extend(dt._annotation_store[id])
fout.write(json.dumps(annotations))
return
setattr(dt, 'process', import_directory_process)
else:
for (_ext, f) in self._import_extension_callbacks.items():
def wrapper(file_paths: str):
"""
Attribute that is assigned to the DataTransformer. This
Handler must only import the given files.
"""
nonlocal dt
nonlocal f
nonlocal reset_annotations
nonlocal add_annotation
nonlocal temporary_directory_path
return f(file_paths, copyJSONable(dt.get_settings()), reset_annotations, add_annotation, temporary_directory_path)
setattr(dt, self._obj_to_attr_name[f], wrapper)
audio_processing_callback = self._audio_processing_callback
if audio_processing_callback is None:
audio_processing_callback = _default_audio_resampler
import_extension_callbacks = self._import_extension_callbacks
audio_extention = self._audio_extention
def import_files_process():
"""
Handler that is set to the .process() function.
"""
nonlocal dt
nonlocal collection_path
nonlocal resampled_path
nonlocal reset_annotations
nonlocal add_annotation
nonlocal add_audio
nonlocal temporary_directory_path
nonlocal audio_processing_callback
nonlocal import_extension_callbacks
nonlocal audio_extention
extention_to_files: FilteredPathList = _filter_files_by_extention(collection_path)
if audio_extention in extention_to_files:
audio_paths: PathList = extention_to_files.pop(audio_extention)
audio_processing_callback(audio_paths, resampled_path, add_audio, temporary_directory_path)
for (extention, file_paths) in extention_to_files.items():
callback = import_extension_callbacks.get(extention, None)
if callback is not None:
callback(file_paths, dt.get_settings(), reset_annotations, add_annotation, temporary_directory_path)
with Path(transcription_json_file_path).open(mode='w') as fout:
annotations = []
for id in dt._annotation_store:
annotations.extend(dt._annotation_store[id])
fout.write(json.dumps(annotations))
return
setattr(dt, 'process', import_files_process)
for (ext, f) in self._import_file_validator_callback.items():
dt._validaters[ext] = f
dt._ui_updater = self._update_ui_callback
dt._extentions = self._import_file_validator_callback.keys()
return dt
|
elpis
|
positive
|
def main(self, filenames, outname, endian, verbose, opts={}, embedded=False, basedirectory=''):
self.byteorder = endian
self.verbose = verbose
if 'padding' in opts.keys():
self.padding = int(opts['padding'])
else:
self.padding = 128
self.embedded = embedded
self.basedirectory = basedirectory
self.hash_multiplier = 101
self.outname = outname
<DeepExtract>
self.nodes = []
for filename in filenames:
node = SFATnode()
if filename.endswith('.noname.bin'):
node.has_name = 0
node.name = b''
node.inputname = filename
node.hash = int(filename.lstrip('0x').replace('.noname.bin', ''), 16)
else:
node.has_name = 1
node.inputname = filename
filename = filename.replace(self.basedirectory, '').lstrip(os.path.sep).replace(os.path.sep, '/')
node.name = filename.encode('utf-8')
node.hash = self.calc_hash(filename)
self.nodes.append(node)
self.nodes.sort(key=lambda node: node.hash)
</DeepExtract>
return self.repack_sections()
|
def main(self, filenames, outname, endian, verbose, opts={}, embedded=False, basedirectory=''):
self.byteorder = endian
self.verbose = verbose
if 'padding' in opts.keys():
self.padding = int(opts['padding'])
else:
self.padding = 128
self.embedded = embedded
self.basedirectory = basedirectory
self.hash_multiplier = 101
self.outname = outname
self.nodes = []
for filename in filenames:
node = SFATnode()
if filename.endswith('.noname.bin'):
node.has_name = 0
node.name = b''
node.inputname = filename
node.hash = int(filename.lstrip('0x').replace('.noname.bin', ''), 16)
else:
node.has_name = 1
node.inputname = filename
filename = filename.replace(self.basedirectory, '').lstrip(os.path.sep).replace(os.path.sep, '/')
node.name = filename.encode('utf-8')
node.hash = self.calc_hash(filename)
self.nodes.append(node)
self.nodes.sort(key=lambda node: node.hash)
return self.repack_sections()
|
3DSkit
|
positive
|
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
</DeepExtract>
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
caliban
|
positive
|
def make_string(self):
string = ''
pos_start = self.pos.copy()
escape_character = False
<DeepExtract>
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
</DeepExtract>
escape_characters = {'n': '\n', 't': '\t'}
while self.current_char != None and (self.current_char != '"' or escape_character):
if escape_character:
string += escape_characters.get(self.current_char, self.current_char)
elif self.current_char == '\\':
escape_character = True
else:
string += self.current_char
<DeepExtract>
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
</DeepExtract>
escape_character = False
<DeepExtract>
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
</DeepExtract>
return Token(TT_STRING, string, pos_start, self.pos)
|
def make_string(self):
string = ''
pos_start = self.pos.copy()
escape_character = False
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
escape_characters = {'n': '\n', 't': '\t'}
while self.current_char != None and (self.current_char != '"' or escape_character):
if escape_character:
string += escape_characters.get(self.current_char, self.current_char)
elif self.current_char == '\\':
escape_character = True
else:
string += self.current_char
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
escape_character = False
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
return Token(TT_STRING, string, pos_start, self.pos)
|
Dip
|
positive
|
def create_CharStrValue(oid=1, pv='null', name='String', pv_writable=False):
charval = CharacterStringValueObject(objectIdentifier=('characterstringValue', oid), objectName=name, priorityArray=PriorityArray(), statusFlags=StatusFlags())
<DeepExtract>
for prop in charval.properties:
if prop.identifier == identifier:
prop.mutable = pv_writable
charval = charval
</DeepExtract>
charval.presentValue = CharacterString(pv)
<DeepExtract>
print('*' * 80)
print('create_xx functions are deprecated and will disappear from a future release')
print('BAC0.core.device.local.object using the ObjectFactory will be the new way to define objects')
print('Refer to the doc for details')
print('*' * 80)
</DeepExtract>
return charval
|
def create_CharStrValue(oid=1, pv='null', name='String', pv_writable=False):
charval = CharacterStringValueObject(objectIdentifier=('characterstringValue', oid), objectName=name, priorityArray=PriorityArray(), statusFlags=StatusFlags())
for prop in charval.properties:
if prop.identifier == identifier:
prop.mutable = pv_writable
charval = charval
charval.presentValue = CharacterString(pv)
print('*' * 80)
print('create_xx functions are deprecated and will disappear from a future release')
print('BAC0.core.device.local.object using the ObjectFactory will be the new way to define objects')
print('Refer to the doc for details')
print('*' * 80)
return charval
|
BAC0
|
positive
|
def make_authored_by(name, uri=None, orcid=None):
authored_by = dict()
<DeepExtract>
user_ref = dict()
user_ref['name'] = name
if uri:
user_ref['uri'] = uri
if orcid:
user_ref['orcid'] = orcid
authored_by['authoredBy'] = user_ref
</DeepExtract>
return authored_by
|
def make_authored_by(name, uri=None, orcid=None):
authored_by = dict()
user_ref = dict()
user_ref['name'] = name
if uri:
user_ref['uri'] = uri
if orcid:
user_ref['orcid'] = orcid
authored_by['authoredBy'] = user_ref
return authored_by
|
bdbag
|
positive
|
def rpn_net(self, feature_pyramid):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
<DeepExtract>
rpn_conv2d_3x3 = feature_pyramid[level]
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3, num_outputs=256, kernel_size=[3, 3], stride=1, activation_fn=tf.nn.relu, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope='{}_{}'.format(scope_list[0], i), reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3, num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER, scope=scope_list[2], activation_fn=None, reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM], name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
(rpn_box_scores, rpn_box_probs) = (rpn_box_scores, rpn_box_probs)
</DeepExtract>
<DeepExtract>
rpn_delta_boxes = feature_pyramid[level]
for i in range(4):
rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes, num_outputs=256, kernel_size=[3, 3], weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, stride=1, activation_fn=tf.nn.relu, scope='{}_{}'.format(scope_list[1], i), reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_delta_boxes, num_outputs=5 * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope=scope_list[3], activation_fn=None, reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5], name='rpn_{}_regression_reshape'.format(level))
rpn_delta_boxes = rpn_delta_boxes
</DeepExtract>
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)
rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)
rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)
return (rpn_all_delta_boxes, rpn_all_boxes_scores, rpn_all_boxes_probs)
|
def rpn_net(self, feature_pyramid):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == 'P3' else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level, 'rpn_classification_' + level, 'rpn_regression_' + level]
rpn_conv2d_3x3 = feature_pyramid[level]
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3, num_outputs=256, kernel_size=[3, 3], stride=1, activation_fn=tf.nn.relu, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope='{}_{}'.format(scope_list[0], i), reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3, num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER, scope=scope_list[2], activation_fn=None, reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM], name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
(rpn_box_scores, rpn_box_probs) = (rpn_box_scores, rpn_box_probs)
rpn_delta_boxes = feature_pyramid[level]
for i in range(4):
rpn_delta_boxes = slim.conv2d(inputs=rpn_delta_boxes, num_outputs=256, kernel_size=[3, 3], weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, stride=1, activation_fn=tf.nn.relu, scope='{}_{}'.format(scope_list[1], i), reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_delta_boxes, num_outputs=5 * self.num_anchors_per_location, kernel_size=[3, 3], stride=1, weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER, biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER, scope=scope_list[3], activation_fn=None, reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 5], name='rpn_{}_regression_reshape'.format(level))
rpn_delta_boxes = rpn_delta_boxes
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)
rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)
rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)
return (rpn_all_delta_boxes, rpn_all_boxes_scores, rpn_all_boxes_probs)
|
DCL_RetinaNet_Tensorflow
|
positive
|
def make_app_ready(self):
if not getattr(self, 'is_ready', False):
<DeepExtract>
self.write_lock.acquire()
try:
if self.pending_documents:
self.register_documents_with_backend(self.pending_documents)
self.pending_documents = list()
if self.pending_indexes:
self.register_indexes_with_backend(self.pending_indexes)
self.pending_indexes = list()
finally:
self.write_lock.release()
</DeepExtract>
self.is_ready = True
|
def make_app_ready(self):
if not getattr(self, 'is_ready', False):
self.write_lock.acquire()
try:
if self.pending_documents:
self.register_documents_with_backend(self.pending_documents)
self.pending_documents = list()
if self.pending_indexes:
self.register_indexes_with_backend(self.pending_indexes)
self.pending_indexes = list()
finally:
self.write_lock.release()
self.is_ready = True
|
django-dockit
|
positive
|
def retrieve_files_recursively(dir, file_lst):
for d in sorted(os.listdir(dir)):
dd = osp.join(dir, d)
if osp.isdir(dd):
<DeepExtract>
for d in sorted(os.listdir(dd)):
dd = osp.join(dd, d)
if osp.isdir(dd):
retrieve_files_recursively(dd, file_lst)
elif osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
</DeepExtract>
elif osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
|
def retrieve_files_recursively(dir, file_lst):
for d in sorted(os.listdir(dir)):
dd = osp.join(dir, d)
if osp.isdir(dd):
for d in sorted(os.listdir(dd)):
dd = osp.join(dd, d)
if osp.isdir(dd):
retrieve_files_recursively(dd, file_lst)
elif osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
elif osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
|
EGVSR
|
positive
|
def __init__(self, images, labels, output_shape, chunk_shape, stride=None, tile_shape=(256, 256), tile_overlap=None, max_rand_offset=None):
"""
Parameters
----------
images: ImageSet
Images to train on
labels: ImageSet
Corresponding labels to train on
output_shape: (int, int)
Shape of the corresponding labels for a given chunk or tile size.
chunk_shape: (int, int)
If specified, divide tiles into individual chunks of this shape.
stride: (int, int)
Skip this stride between chunks. Only valid with chunk_shape.
tile_shape: (int, int)
Size of tiles to load from the images at a time.
tile_overlap: (int, int)
If specified, overlap tiles by this amount.
max_rand_offset: int
If specified, in each epoch, offset all tiles by a random amount in x and y
in the range(-max_rand_offset, max_rand_offset).
"""
self._iopool = ThreadPoolExecutor(config.io.threads())
<DeepExtract>
if chunk_shape:
assert len(chunk_shape) == 2, 'Chunk must be two dimensional.'
assert chunk_shape[0] % 2 == chunk_shape[1] % 2 == output_shape[0] % 2 == output_shape[1] % 2, 'Chunk and output shapes must both be even or odd.'
if output_shape:
assert len(output_shape) == 2 or len(output_shape) == 3, 'Output must be two or three dimensional.'
if len(output_shape) == 3:
output_shape = output_shape[0:2]
self._chunk_shape = chunk_shape
self._output_shape = output_shape
</DeepExtract>
self._output_dims = 1
if stride is None:
stride = (1, 1)
self._stride = stride
self._data_type = tf.float32
self._label_type = tf.uint8
self._tile_shape = tile_shape
if tile_overlap is None:
tile_overlap = (0, 0)
self._tile_overlap = tile_overlap
self._max_rand_offset = max_rand_offset if max_rand_offset else 0
if labels:
assert len(images) == len(labels)
self._images = images
self._labels = labels
self._epoch = [0, 0]
self._num_bands = images.load(0).num_bands()
self._random_seed = random.randint(0, 1 << 16)
|
def __init__(self, images, labels, output_shape, chunk_shape, stride=None, tile_shape=(256, 256), tile_overlap=None, max_rand_offset=None):
"""
Parameters
----------
images: ImageSet
Images to train on
labels: ImageSet
Corresponding labels to train on
output_shape: (int, int)
Shape of the corresponding labels for a given chunk or tile size.
chunk_shape: (int, int)
If specified, divide tiles into individual chunks of this shape.
stride: (int, int)
Skip this stride between chunks. Only valid with chunk_shape.
tile_shape: (int, int)
Size of tiles to load from the images at a time.
tile_overlap: (int, int)
If specified, overlap tiles by this amount.
max_rand_offset: int
If specified, in each epoch, offset all tiles by a random amount in x and y
in the range(-max_rand_offset, max_rand_offset).
"""
self._iopool = ThreadPoolExecutor(config.io.threads())
if chunk_shape:
assert len(chunk_shape) == 2, 'Chunk must be two dimensional.'
assert chunk_shape[0] % 2 == chunk_shape[1] % 2 == output_shape[0] % 2 == output_shape[1] % 2, 'Chunk and output shapes must both be even or odd.'
if output_shape:
assert len(output_shape) == 2 or len(output_shape) == 3, 'Output must be two or three dimensional.'
if len(output_shape) == 3:
output_shape = output_shape[0:2]
self._chunk_shape = chunk_shape
self._output_shape = output_shape
self._output_dims = 1
if stride is None:
stride = (1, 1)
self._stride = stride
self._data_type = tf.float32
self._label_type = tf.uint8
self._tile_shape = tile_shape
if tile_overlap is None:
tile_overlap = (0, 0)
self._tile_overlap = tile_overlap
self._max_rand_offset = max_rand_offset if max_rand_offset else 0
if labels:
assert len(images) == len(labels)
self._images = images
self._labels = labels
self._epoch = [0, 0]
self._num_bands = images.load(0).num_bands()
self._random_seed = random.randint(0, 1 << 16)
|
delta
|
positive
|
def _execute_develop_run():
<DeepExtract>
rm(MODULE_INFO_DIR, 'dist', '*.egg-info', '*.egg', 'version.txt')
</DeepExtract>
VersionUtils().write_version()
run_nosetests()
install_dev()
|
def _execute_develop_run():
rm(MODULE_INFO_DIR, 'dist', '*.egg-info', '*.egg', 'version.txt')
VersionUtils().write_version()
run_nosetests()
install_dev()
|
defend_against_fruit
|
positive
|
def worker_harvest_amount(self):
"""The maximum amount of karbonite harvested from a deposit in one turn.
* InappropriateUnitType - the unit is not a worker.
:type self: Unit
:rtype: int
"""
result = _lib.bc_Unit_worker_harvest_amount(self._ptr)
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
return result
|
def worker_harvest_amount(self):
"""The maximum amount of karbonite harvested from a deposit in one turn.
* InappropriateUnitType - the unit is not a worker.
:type self: Unit
:rtype: int
"""
result = _lib.bc_Unit_worker_harvest_amount(self._ptr)
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
return result
|
bc18-scaffold
|
positive
|
def begin(self, path, timestamp=None, caption=None, update=False):
assert not self._current, 'Tried to begin %s while running %s' % (path, self._current)
if not self._auth_tested:
try:
<DeepExtract>
assert self._url, 'No urd configured for this server'
'%s/test/%s' % (self._url, self._user) = '%s/test/%s' % (self._url, self._user).replace(' ', '%20')
return call('%s/test/%s' % (self._url, self._user), data=True, fmt=fmt, headers=self._headers, server_name='urd')
</DeepExtract>
except UrdPermissionError:
raise Exception('Urd says permission denied, did you forget to set URD_AUTH?')
self._auth_tested = True
<DeepExtract>
if '/' not in path:
path = '%s/%s' % (self._user, path)
self._current = path
</DeepExtract>
<DeepExtract>
if timestamp is None:
self._current_timestamp = None
errmsg = 'Specify timestamps as strings, ints, datetimes or (timestamp, integer), not %r' % (timestamp,)
if isinstance(timestamp, (tuple, list)):
assert len(timestamp) == 2, errmsg
(timestamp, integer) = timestamp
assert isinstance(integer, int), errmsg
else:
integer = None
if isinstance(timestamp, (int, date)):
timestamp = str(timestamp)
assert isinstance(timestamp, str_types), errmsg
assert timestamp, errmsg
if integer is None:
self._current_timestamp = timestamp
else:
self._current_timestamp = '%s+%d' % (timestamp, integer)
</DeepExtract>
self._current_caption = caption
self._update = update
self._deps = {}
self._a.clear_record()
self.joblist = self._a.jobs
self._latest_joblist = None
|
def begin(self, path, timestamp=None, caption=None, update=False):
assert not self._current, 'Tried to begin %s while running %s' % (path, self._current)
if not self._auth_tested:
try:
assert self._url, 'No urd configured for this server'
'%s/test/%s' % (self._url, self._user) = '%s/test/%s' % (self._url, self._user).replace(' ', '%20')
return call('%s/test/%s' % (self._url, self._user), data=True, fmt=fmt, headers=self._headers, server_name='urd')
except UrdPermissionError:
raise Exception('Urd says permission denied, did you forget to set URD_AUTH?')
self._auth_tested = True
if '/' not in path:
path = '%s/%s' % (self._user, path)
self._current = path
if timestamp is None:
self._current_timestamp = None
errmsg = 'Specify timestamps as strings, ints, datetimes or (timestamp, integer), not %r' % (timestamp,)
if isinstance(timestamp, (tuple, list)):
assert len(timestamp) == 2, errmsg
(timestamp, integer) = timestamp
assert isinstance(integer, int), errmsg
else:
integer = None
if isinstance(timestamp, (int, date)):
timestamp = str(timestamp)
assert isinstance(timestamp, str_types), errmsg
assert timestamp, errmsg
if integer is None:
self._current_timestamp = timestamp
else:
self._current_timestamp = '%s+%d' % (timestamp, integer)
self._current_caption = caption
self._update = update
self._deps = {}
self._a.clear_record()
self.joblist = self._a.jobs
self._latest_joblist = None
|
accelerator
|
positive
|
def inference_1conv_end(self, input, eval_data=False):
"""
Args:
input: 4D tensor of [batch_size, WIDTH, HEIGHT, DEPTHS] size.
Returns:
logits: 2D tensor of [batch_size, NUM_CLASSES].
"""
if eval_data:
batch_size = int(graphcnn_input.EVAL_BATCH_SIZE)
else:
batch_size = int(graphcnn_input.TRAIN_BATCH_SIZE)
with tf.variable_scope('fc1') as scope:
input = tf.reshape(input, [batch_size, -1])
inputmaps = input.get_shape()[1].value
outputmaps = 512
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=0.04), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
</DeepExtract>
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.1, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
</DeepExtract>
fc = tf.matmul(input, weights)
fc = tf.nn.bias_add(fc, biases)
output = tf.nn.relu(fc, name=scope.name)
<DeepExtract>
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
</DeepExtract>
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
if not eval_data:
output = tf.nn.dropout(output, graphcnn_option.DROPOUT_FRACTION)
with tf.variable_scope('fc2') as scope:
input = output
inputmaps = outputmaps
outputmaps = 64
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=0.04), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
</DeepExtract>
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.1, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
</DeepExtract>
fc = tf.matmul(input, weights)
fc = tf.nn.bias_add(fc, biases)
output = tf.nn.relu(fc, name=scope.name)
<DeepExtract>
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
</DeepExtract>
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
if not eval_data:
output = tf.nn.dropout(output, graphcnn_option.DROPOUT_FRACTION)
with tf.variable_scope('softmax_linear') as scope:
input = output
inputmaps = outputmaps
outputmaps = 1
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=1.0 / inputmaps), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
</DeepExtract>
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.0, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
</DeepExtract>
fc = tf.matmul(input, weights)
softmax_linear = tf.nn.bias_add(fc, biases, name=scope.name)
<DeepExtract>
if graphcnn_option.SUMMARYWRITER:
tensor_name = softmax_linear.op.name
tf.histogram_summary(tensor_name + '/activations', softmax_linear)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(softmax_linear))
</DeepExtract>
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
return softmax_linear
|
def inference_1conv_end(self, input, eval_data=False):
"""
Args:
input: 4D tensor of [batch_size, WIDTH, HEIGHT, DEPTHS] size.
Returns:
logits: 2D tensor of [batch_size, NUM_CLASSES].
"""
if eval_data:
batch_size = int(graphcnn_input.EVAL_BATCH_SIZE)
else:
batch_size = int(graphcnn_input.TRAIN_BATCH_SIZE)
with tf.variable_scope('fc1') as scope:
input = tf.reshape(input, [batch_size, -1])
inputmaps = input.get_shape()[1].value
outputmaps = 512
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=0.04), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.1, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
fc = tf.matmul(input, weights)
fc = tf.nn.bias_add(fc, biases)
output = tf.nn.relu(fc, name=scope.name)
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
if not eval_data:
output = tf.nn.dropout(output, graphcnn_option.DROPOUT_FRACTION)
with tf.variable_scope('fc2') as scope:
input = output
inputmaps = outputmaps
outputmaps = 64
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=0.04), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.1, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
fc = tf.matmul(input, weights)
fc = tf.nn.bias_add(fc, biases)
output = tf.nn.relu(fc, name=scope.name)
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
if not eval_data:
output = tf.nn.dropout(output, graphcnn_option.DROPOUT_FRACTION)
with tf.variable_scope('softmax_linear') as scope:
input = output
inputmaps = outputmaps
outputmaps = 1
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[inputmaps, outputmaps], stddev=1.0 / inputmaps), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.0, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
fc = tf.matmul(input, weights)
softmax_linear = tf.nn.bias_add(fc, biases, name=scope.name)
if graphcnn_option.SUMMARYWRITER:
tensor_name = softmax_linear.op.name
tf.histogram_summary(tensor_name + '/activations', softmax_linear)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(softmax_linear))
self._paramaters_list.append(weights)
self._paramaters_list.append(biases)
return softmax_linear
|
DeepGraphCNNforTexts
|
positive
|
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
<DeepExtract>
env = self.defaults.copy()
env.update(env)
env.update({'_stdout': stdout, '_printlist': stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__})
exec(self.co, env)
if env.get('_rebase'):
(subtpl, rargs) = env.pop('_rebase')
rargs['base'] = ''.join(stdout)
del stdout[:]
return self._include(env, subtpl, **rargs)
return env
</DeepExtract>
return ''.join(stdout)
|
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
env = self.defaults.copy()
env.update(env)
env.update({'_stdout': stdout, '_printlist': stdout.extend, 'include': functools.partial(self._include, env), 'rebase': functools.partial(self._rebase, env), '_rebase': None, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__})
exec(self.co, env)
if env.get('_rebase'):
(subtpl, rargs) = env.pop('_rebase')
rargs['base'] = ''.join(stdout)
del stdout[:]
return self._include(env, subtpl, **rargs)
return env
return ''.join(stdout)
|
bottle
|
positive
|
def pool_3d(input, window_size, strides, pad_mode, pool_mode):
if pad_mode in ['MIRROR', 'mirror']:
<DeepExtract>
assert np.all([window_size.shape[2 + d] - 1 for d in range(3)]) >= 0
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][0] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][0] + 1) // 2)
paddedImage = tf.concat([input[:, :, int([window_size.shape[2 + d] - 1 for d in range(3)][0] // 2) - 1::-1, :, :], input], axis=2) if padLeft > 0 else input
paddedImage = tf.concat([paddedImage, paddedImage[:, :, -1:-1 - int(([window_size.shape[2 + d] - 1 for d in range(3)][0] + 1) // 2):-1, :, :]], axis=2) if padRight > 0 else paddedImage
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][1] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][1] + 1) // 2)
paddedImage = tf.concat([paddedImage[:, :, :, padLeft - 1::-1, :], paddedImage], axis=3) if padLeft > 0 else paddedImage
paddedImage = tf.concat([paddedImage, paddedImage[:, :, :, -1:-1 - padRight:-1, :]], axis=3) if padRight > 0 else paddedImage
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][2] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][2] + 1) // 2)
paddedImage = tf.concat([paddedImage[:, :, :, :, padLeft - 1::-1], paddedImage], axis=4) if padLeft > 0 else paddedImage
paddedImage = tf.concat([paddedImage, paddedImage[:, :, :, :, -1:-1 - padRight:-1]], axis=4) if padRight > 0 else paddedImage
input = paddedImage
</DeepExtract>
pad_mode = 'VALID'
elif padding in ['ZERO', 'zero']:
padding = 'SAME'
elif padding is None or padding in ['none']:
padding = 'VALID'
inp_resh = tf.transpose(input, perm=[0, 4, 3, 2, 1])
pooled_out = tf.nn.pool(input=inp_resh, window_shape=window_size, strides=strides, padding=pad_mode, pooling_type=pool_mode, data_format='NDHWC')
pooled_out = tf.transpose(pooled_out, perm=[0, 4, 3, 2, 1])
return pooled_out
|
def pool_3d(input, window_size, strides, pad_mode, pool_mode):
if pad_mode in ['MIRROR', 'mirror']:
assert np.all([window_size.shape[2 + d] - 1 for d in range(3)]) >= 0
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][0] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][0] + 1) // 2)
paddedImage = tf.concat([input[:, :, int([window_size.shape[2 + d] - 1 for d in range(3)][0] // 2) - 1::-1, :, :], input], axis=2) if padLeft > 0 else input
paddedImage = tf.concat([paddedImage, paddedImage[:, :, -1:-1 - int(([window_size.shape[2 + d] - 1 for d in range(3)][0] + 1) // 2):-1, :, :]], axis=2) if padRight > 0 else paddedImage
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][1] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][1] + 1) // 2)
paddedImage = tf.concat([paddedImage[:, :, :, padLeft - 1::-1, :], paddedImage], axis=3) if padLeft > 0 else paddedImage
paddedImage = tf.concat([paddedImage, paddedImage[:, :, :, -1:-1 - padRight:-1, :]], axis=3) if padRight > 0 else paddedImage
padLeft = int([window_size.shape[2 + d] - 1 for d in range(3)][2] // 2)
padRight = int(([window_size.shape[2 + d] - 1 for d in range(3)][2] + 1) // 2)
paddedImage = tf.concat([paddedImage[:, :, :, :, padLeft - 1::-1], paddedImage], axis=4) if padLeft > 0 else paddedImage
paddedImage = tf.concat([paddedImage, paddedImage[:, :, :, :, -1:-1 - padRight:-1]], axis=4) if padRight > 0 else paddedImage
input = paddedImage
pad_mode = 'VALID'
elif padding in ['ZERO', 'zero']:
padding = 'SAME'
elif padding is None or padding in ['none']:
padding = 'VALID'
inp_resh = tf.transpose(input, perm=[0, 4, 3, 2, 1])
pooled_out = tf.nn.pool(input=inp_resh, window_shape=window_size, strides=strides, padding=pad_mode, pooling_type=pool_mode, data_format='NDHWC')
pooled_out = tf.transpose(pooled_out, perm=[0, 4, 3, 2, 1])
return pooled_out
|
deepmedic
|
positive
|
def _read_byte_array(stream):
<DeepExtract>
(x,) = struct.unpack('!L', stream.read(4))
(byte_array_length, x) = (x, 4)
</DeepExtract>
return (stream.read(byte_array_length), byte_array_length + x)
|
def _read_byte_array(stream):
(x,) = struct.unpack('!L', stream.read(4))
(byte_array_length, x) = (x, 4)
return (stream.read(byte_array_length), byte_array_length + x)
|
asynqp
|
positive
|
def testlist_star_expr_handle(self, original, loc, tokens, is_list=False):
"""Handle naked a, *b."""
<DeepExtract>
groups = [[]]
has_star = False
has_comma = False
for tok_grp in tokens:
if tok_grp == ',':
has_comma = True
elif len(tok_grp) == 1:
groups[-1].append(tok_grp[0])
elif len(tok_grp) == 2:
internal_assert(not tok_grp[0].lstrip('*'), 'invalid star expr item signifier', tok_grp[0])
has_star = True
groups.append(tok_grp[1])
groups.append([])
else:
raise CoconutInternalException('invalid testlist_star_expr tokens', tokens)
if not groups[-1]:
groups.pop()
(groups, has_star, has_comma) = (groups, has_star, has_comma)
</DeepExtract>
is_sequence = has_comma or is_list
if not is_sequence and (not has_star):
<DeepExtract>
if not len(groups) == 1 and len(groups[0]) == 1 or callable(len(groups) == 1 and len(groups[0]) == 1):
internal_assert(len(groups) == 1 and len(groups[0]) == 1, 'invalid single-item testlist_star_expr tokens', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
</DeepExtract>
out = groups[0][0]
elif not has_star:
<DeepExtract>
if not len(groups) == 1 or callable(len(groups) == 1):
internal_assert(len(groups) == 1, 'testlist_star_expr group splitting failed on', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
</DeepExtract>
out = tuple_str_of(groups[0], add_parens=False)
elif is_sequence and self.target_info >= (3, 5):
to_literal = []
for g in groups:
if isinstance(g, list):
to_literal.extend(g)
else:
to_literal.append('*' + g)
out = tuple_str_of(to_literal, add_parens=False)
else:
to_chain = []
for g in groups:
if isinstance(g, list):
if g:
to_chain.append(tuple_str_of(g))
else:
to_chain.append(g)
<DeepExtract>
if not to_chain or callable(to_chain):
internal_assert(to_chain, 'invalid naked a, *b expression', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
</DeepExtract>
if is_list:
return '_coconut.list(_coconut.itertools.chain(' + ', '.join(to_chain) + '))'
else:
return '_coconut.tuple(_coconut.itertools.chain(' + ', '.join(to_chain) + '))'
if is_list:
return '[' + out + ']'
else:
return out
|
def testlist_star_expr_handle(self, original, loc, tokens, is_list=False):
"""Handle naked a, *b."""
groups = [[]]
has_star = False
has_comma = False
for tok_grp in tokens:
if tok_grp == ',':
has_comma = True
elif len(tok_grp) == 1:
groups[-1].append(tok_grp[0])
elif len(tok_grp) == 2:
internal_assert(not tok_grp[0].lstrip('*'), 'invalid star expr item signifier', tok_grp[0])
has_star = True
groups.append(tok_grp[1])
groups.append([])
else:
raise CoconutInternalException('invalid testlist_star_expr tokens', tokens)
if not groups[-1]:
groups.pop()
(groups, has_star, has_comma) = (groups, has_star, has_comma)
is_sequence = has_comma or is_list
if not is_sequence and (not has_star):
if not len(groups) == 1 and len(groups[0]) == 1 or callable(len(groups) == 1 and len(groups[0]) == 1):
internal_assert(len(groups) == 1 and len(groups[0]) == 1, 'invalid single-item testlist_star_expr tokens', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
out = groups[0][0]
elif not has_star:
if not len(groups) == 1 or callable(len(groups) == 1):
internal_assert(len(groups) == 1, 'testlist_star_expr group splitting failed on', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
out = tuple_str_of(groups[0], add_parens=False)
elif is_sequence and self.target_info >= (3, 5):
to_literal = []
for g in groups:
if isinstance(g, list):
to_literal.extend(g)
else:
to_literal.append('*' + g)
out = tuple_str_of(to_literal, add_parens=False)
else:
to_chain = []
for g in groups:
if isinstance(g, list):
if g:
to_chain.append(tuple_str_of(g))
else:
to_chain.append(g)
if not to_chain or callable(to_chain):
internal_assert(to_chain, 'invalid naked a, *b expression', tokens, exc_maker=partial(self.make_internal_syntax_err, original, loc))
if is_list:
return '_coconut.list(_coconut.itertools.chain(' + ', '.join(to_chain) + '))'
else:
return '_coconut.tuple(_coconut.itertools.chain(' + ', '.join(to_chain) + '))'
if is_list:
return '[' + out + ']'
else:
return out
|
coconut
|
positive
|
def test_swap():
sl = setlist('abcdef')
sl.swap(1, 2)
<DeepExtract>
print(sl._list)
print(sl._dict)
for (i, elem) in enumerate(sl):
assert sl._dict[elem] == i
assert len(sl._dict) == len(sl._list)
</DeepExtract>
assert sl == setlist('acbdef')
sl.swap(-1, 1)
<DeepExtract>
print(sl._list)
print(sl._dict)
for (i, elem) in enumerate(sl):
assert sl._dict[elem] == i
assert len(sl._dict) == len(sl._list)
</DeepExtract>
assert sl == setlist('afbdec')
|
def test_swap():
sl = setlist('abcdef')
sl.swap(1, 2)
print(sl._list)
print(sl._dict)
for (i, elem) in enumerate(sl):
assert sl._dict[elem] == i
assert len(sl._dict) == len(sl._list)
assert sl == setlist('acbdef')
sl.swap(-1, 1)
print(sl._list)
print(sl._dict)
for (i, elem) in enumerate(sl):
assert sl._dict[elem] == i
assert len(sl._dict) == len(sl._list)
assert sl == setlist('afbdec')
|
collections-extended
|
positive
|
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
<DeepExtract>
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
</DeepExtract>
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
<DeepExtract>
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
|
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(output, _) = resnet_v1.resnet_v1(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
|
CSL_RetinaNet_Tensorflow
|
positive
|
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
<DeepExtract>
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {'wait': {'resources': [{'type': 'pod', 'labels': {'foo': 'bar'}, 'min_ready': 5}]}}}
return wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
</DeepExtract>
self.assertRaises(manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
<DeepExtract>
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {'wait': {'resources': [{'type': 'job', 'labels': {'foo': 'bar'}, 'min_ready': 5}]}}}
return wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
</DeepExtract>
self.assertRaises(manifest_exceptions.ManifestException, create_job_wait_min_ready)
|
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {'wait': {'resources': [{'type': 'pod', 'labels': {'foo': 'bar'}, 'min_ready': 5}]}}}
return wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
self.assertRaises(manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
chart = {'schema': 'armada/Chart/v{}'.format(str(version)), 'metadata': {'name': 'test'}, const.KEYWORD_DATA: {'wait': {'resources': [{'type': 'job', 'labels': {'foo': 'bar'}, 'min_ready': 5}]}}}
return wait.ChartWait(k8s=mock.MagicMock(), release_id=helm.HelmReleaseId('test', 'test-test'), chart=chart, k8s_wait_attempts=1, k8s_wait_attempt_sleep=1, timeout=timeout)
self.assertRaises(manifest_exceptions.ManifestException, create_job_wait_min_ready)
|
armada
|
positive
|
def enabled_target_services(self, target, sysv='S', igno=[]):
units = []
if self.user_mode():
<DeepExtract>
target = target
if '.' not in target:
target += '.target'
targets = [target]
conf = self.get_target_conf(target)
requires = conf.get(Unit, 'Requires', '')
while requires in target_requires:
targets = [requires] + targets
requires = target_requires[requires]
logg.debug('the %s requires %s', target, targets)
targetlist = targets
</DeepExtract>
logg.debug('check for %s user services : %s', target, targetlist)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.target', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_system_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
else:
<DeepExtract>
target = target
if '.' not in target:
target += '.target'
targets = [target]
conf = self.get_target_conf(target)
requires = conf.get(Unit, 'Requires', '')
while requires in target_requires:
targets = [requires] + targets
requires = target_requires[requires]
logg.debug('the %s requires %s', target, targets)
targetlist = targets
</DeepExtract>
logg.debug('check for %s system services: %s', target, targetlist)
for targets in targetlist:
for unit in self.enabled_target_configured_system_units(targets, '.target', igno + self.igno_targets):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_installed_system_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_installed_system_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_sysv_units(targets, sysv, igno):
if unit not in units:
units.append(unit)
return units
|
def enabled_target_services(self, target, sysv='S', igno=[]):
units = []
if self.user_mode():
target = target
if '.' not in target:
target += '.target'
targets = [target]
conf = self.get_target_conf(target)
requires = conf.get(Unit, 'Requires', '')
while requires in target_requires:
targets = [requires] + targets
requires = target_requires[requires]
logg.debug('the %s requires %s', target, targets)
targetlist = targets
logg.debug('check for %s user services : %s', target, targetlist)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.target', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_local_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_user_system_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
else:
target = target
if '.' not in target:
target += '.target'
targets = [target]
conf = self.get_target_conf(target)
requires = conf.get(Unit, 'Requires', '')
while requires in target_requires:
targets = [requires] + targets
requires = target_requires[requires]
logg.debug('the %s requires %s', target, targets)
targetlist = targets
logg.debug('check for %s system services: %s', target, targetlist)
for targets in targetlist:
for unit in self.enabled_target_configured_system_units(targets, '.target', igno + self.igno_targets):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_installed_system_units(targets, '.socket', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.required_target_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_installed_system_units(targets, '.service', igno):
if unit not in units:
units.append(unit)
for targets in targetlist:
for unit in self.enabled_target_sysv_units(targets, sysv, igno):
if unit not in units:
units.append(unit)
return units
|
docker-systemctl-images
|
positive
|
def run_trials(config_dir, python_cmd, experiment_name, model_name, specific_params, n_inputs, path_prefix, report_errors=False, append_to_csv=False, trial_run=False, trial_run_outfile='', cmd_id=0, conf_cnt=0, sync_gpu=True):
"""
Responsible for recording the time and max memory usage
from running a model (the user must provide a lambda for
actually running the model because different kinds of models
need different kinds of setup and a lambda that generates an
input for running that model)
:params:
trial_run: When set to true, no persistent experiment data will be saved. It is used to
run a baseline trial and record how much memory is used then set the memory budget
for `ratio` commands of DTR experiments
trial_run_out_file: the temporary file that stores the memory usage data of the baseline run
cmd_id: the command id for current model, starting from 0 by default
conf_cnt: the id of confguration generated from `unfold_settings`; this is used for tracking
which exact configuration that caused errors.
sync_gpu: Whether to set PyTorch into synchronous execution mode (synchronize between GPU ops),
useful for profiling
"""
try:
cwd = os.getcwd()
params_file = 'specific_params.json'
try:
write_json(cwd, params_file, specific_params)
if not trial_run:
filename = prepare_out_file(path_prefix, '{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model_name))
mode = 'a' if append_to_csv else 'w'
with open(filename, mode, newline='') as csvfile:
<DeepExtract>
fieldnames = ['input', 'rep', 'num_retries'] + MEASURED_KEYS + list(specific_params.keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
</DeepExtract>
if not append_to_csv:
writer.writeheader()
else:
filename = ''
shared_dir = os.path.dirname(os.path.abspath(__file__))
run_script = os.path.join(shared_dir, 'run_torch_trial.py')
for i in range(n_inputs):
try:
subprocess_env = None
if sync_gpu:
subprocess_env = os.environ.copy()
subprocess_env['CUDA_LAUNCH_BLOCKING'] = '1'
subprocess.run([python_cmd, run_script, '--config-dir', config_dir, '--experiment-mode', experiment_name, '--model-name', model_name, '--input-idx', str(i), '--params-file', params_file, '--out-file', filename, '--trial-run', str(trial_run), '--trial-run-outfile', trial_run_outfile], check=True, timeout=specific_params.get('timeout', 60), env=subprocess_env)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
if not report_errors:
raise e
if trial_run:
return (False, 'Baseline failed: {}'.format(render_exception(e)))
<DeepExtract>
err_info = {'input': i, 'msg': render_exception(e)}
logged_errors = {}
if check_file_exists(path_prefix, 'errors.json'):
logged_errors = read_json(path_prefix, 'errors.json')
if experiment_name not in logged_errors:
logged_errors[experiment_name] = {}
if model_name not in logged_errors[experiment_name]:
logged_errors[experiment_name][model_name] = []
logged_errors[experiment_name][model_name].append({'err_info': err_info, **specific_params})
write_json(path_prefix, 'errors.json', logged_errors)
</DeepExtract>
return (True, 'successfully caught error')
time.sleep(4)
return (True, 'success')
finally:
os.remove(params_file)
except Exception as e:
return (False, 'Encountered exception on ({}, {}, {}):\n'.format(experiment_name, model_name, specific_params) + render_exception(e))
|
def run_trials(config_dir, python_cmd, experiment_name, model_name, specific_params, n_inputs, path_prefix, report_errors=False, append_to_csv=False, trial_run=False, trial_run_outfile='', cmd_id=0, conf_cnt=0, sync_gpu=True):
"""
Responsible for recording the time and max memory usage
from running a model (the user must provide a lambda for
actually running the model because different kinds of models
need different kinds of setup and a lambda that generates an
input for running that model)
:params:
trial_run: When set to true, no persistent experiment data will be saved. It is used to
run a baseline trial and record how much memory is used then set the memory budget
for `ratio` commands of DTR experiments
trial_run_out_file: the temporary file that stores the memory usage data of the baseline run
cmd_id: the command id for current model, starting from 0 by default
conf_cnt: the id of confguration generated from `unfold_settings`; this is used for tracking
which exact configuration that caused errors.
sync_gpu: Whether to set PyTorch into synchronous execution mode (synchronize between GPU ops),
useful for profiling
"""
try:
cwd = os.getcwd()
params_file = 'specific_params.json'
try:
write_json(cwd, params_file, specific_params)
if not trial_run:
filename = prepare_out_file(path_prefix, '{}-{}.csv'.format(get_report_prefix(experiment_name, specific_params, cmd_id), model_name))
mode = 'a' if append_to_csv else 'w'
with open(filename, mode, newline='') as csvfile:
fieldnames = ['input', 'rep', 'num_retries'] + MEASURED_KEYS + list(specific_params.keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if not append_to_csv:
writer.writeheader()
else:
filename = ''
shared_dir = os.path.dirname(os.path.abspath(__file__))
run_script = os.path.join(shared_dir, 'run_torch_trial.py')
for i in range(n_inputs):
try:
subprocess_env = None
if sync_gpu:
subprocess_env = os.environ.copy()
subprocess_env['CUDA_LAUNCH_BLOCKING'] = '1'
subprocess.run([python_cmd, run_script, '--config-dir', config_dir, '--experiment-mode', experiment_name, '--model-name', model_name, '--input-idx', str(i), '--params-file', params_file, '--out-file', filename, '--trial-run', str(trial_run), '--trial-run-outfile', trial_run_outfile], check=True, timeout=specific_params.get('timeout', 60), env=subprocess_env)
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
if not report_errors:
raise e
if trial_run:
return (False, 'Baseline failed: {}'.format(render_exception(e)))
err_info = {'input': i, 'msg': render_exception(e)}
logged_errors = {}
if check_file_exists(path_prefix, 'errors.json'):
logged_errors = read_json(path_prefix, 'errors.json')
if experiment_name not in logged_errors:
logged_errors[experiment_name] = {}
if model_name not in logged_errors[experiment_name]:
logged_errors[experiment_name][model_name] = []
logged_errors[experiment_name][model_name].append({'err_info': err_info, **specific_params})
write_json(path_prefix, 'errors.json', logged_errors)
return (True, 'successfully caught error')
time.sleep(4)
return (True, 'success')
finally:
os.remove(params_file)
except Exception as e:
return (False, 'Encountered exception on ({}, {}, {}):\n'.format(experiment_name, model_name, specific_params) + render_exception(e))
|
dtr-prototype
|
positive
|
def draw(self, stepCount, root):
if self.canvas is None or root is None:
return
<DeepExtract>
(x1, y1) = self.robotPos
</DeepExtract>
x1 = x1 % self.totWidth
if y1 != self.groundY:
raise 'Flying Robot!!'
<DeepExtract>
(armCos, armSin) = self.__getCosAndSin(self.armAngle)
(handCos, handSin) = self.__getCosAndSin(self.handAngle)
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
rotationAngle = math.atan(-y / x)
rotationAngle = 0.0
</DeepExtract>
<DeepExtract>
(cosRot, sinRot) = (math.cos(rotationAngle), math.sin(rotationAngle))
</DeepExtract>
x2 = x1 + self.robotWidth * cosRot
y2 = y1 - self.robotWidth * sinRot
x3 = x1 - self.robotHeight * sinRot
y3 = y1 - self.robotHeight * cosRot
x4 = x3 + cosRot * self.robotWidth
y4 = y3 - sinRot * self.robotWidth
self.canvas.coords(self.robotBody, x1, y1, x2, y2, x4, y4, x3, y3)
<DeepExtract>
(armCos, armSin) = (math.cos(rotationAngle + self.armAngle), math.sin(rotationAngle + self.armAngle))
</DeepExtract>
xArm = x4 + self.armLength * armCos
yArm = y4 - self.armLength * armSin
self.canvas.coords(self.robotArm, x4, y4, xArm, yArm)
<DeepExtract>
(handCos, handSin) = (math.cos(self.handAngle + rotationAngle), math.sin(self.handAngle + rotationAngle))
</DeepExtract>
xHand = xArm + self.handLength * handCos
yHand = yArm - self.handLength * handSin
self.canvas.coords(self.robotHand, xArm, yArm, xHand, yHand)
steps = stepCount - self.lastStep
pos = self.positions[-1]
velocity = pos - self.positions[-2]
vel2 = (pos - self.positions[0]) / len(self.positions)
self.velAvg = 0.9 * self.velAvg + 0.1 * vel2
velMsg = '100-step Avg Velocity: %.2f' % self.velAvg
velocityMsg = 'Velocity: %.2f' % velocity
positionMsg = 'Position: %2.f' % pos
stepMsg = 'Step: %d' % stepCount
if 'vel_msg' in dir(self):
self.canvas.delete(self.vel_msg)
self.canvas.delete(self.pos_msg)
self.canvas.delete(self.step_msg)
self.canvas.delete(self.velavg_msg)
self.velavg_msg = self.canvas.create_text(650, 190, text=velMsg)
self.vel_msg = self.canvas.create_text(450, 190, text=velocityMsg)
self.pos_msg = self.canvas.create_text(250, 190, text=positionMsg)
self.step_msg = self.canvas.create_text(50, 190, text=stepMsg)
self.lastStep = stepCount
root.update()
|
def draw(self, stepCount, root):
if self.canvas is None or root is None:
return
(x1, y1) = self.robotPos
x1 = x1 % self.totWidth
if y1 != self.groundY:
raise 'Flying Robot!!'
(armCos, armSin) = self.__getCosAndSin(self.armAngle)
(handCos, handSin) = self.__getCosAndSin(self.handAngle)
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
rotationAngle = math.atan(-y / x)
rotationAngle = 0.0
(cosRot, sinRot) = (math.cos(rotationAngle), math.sin(rotationAngle))
x2 = x1 + self.robotWidth * cosRot
y2 = y1 - self.robotWidth * sinRot
x3 = x1 - self.robotHeight * sinRot
y3 = y1 - self.robotHeight * cosRot
x4 = x3 + cosRot * self.robotWidth
y4 = y3 - sinRot * self.robotWidth
self.canvas.coords(self.robotBody, x1, y1, x2, y2, x4, y4, x3, y3)
(armCos, armSin) = (math.cos(rotationAngle + self.armAngle), math.sin(rotationAngle + self.armAngle))
xArm = x4 + self.armLength * armCos
yArm = y4 - self.armLength * armSin
self.canvas.coords(self.robotArm, x4, y4, xArm, yArm)
(handCos, handSin) = (math.cos(self.handAngle + rotationAngle), math.sin(self.handAngle + rotationAngle))
xHand = xArm + self.handLength * handCos
yHand = yArm - self.handLength * handSin
self.canvas.coords(self.robotHand, xArm, yArm, xHand, yHand)
steps = stepCount - self.lastStep
pos = self.positions[-1]
velocity = pos - self.positions[-2]
vel2 = (pos - self.positions[0]) / len(self.positions)
self.velAvg = 0.9 * self.velAvg + 0.1 * vel2
velMsg = '100-step Avg Velocity: %.2f' % self.velAvg
velocityMsg = 'Velocity: %.2f' % velocity
positionMsg = 'Position: %2.f' % pos
stepMsg = 'Step: %d' % stepCount
if 'vel_msg' in dir(self):
self.canvas.delete(self.vel_msg)
self.canvas.delete(self.pos_msg)
self.canvas.delete(self.step_msg)
self.canvas.delete(self.velavg_msg)
self.velavg_msg = self.canvas.create_text(650, 190, text=velMsg)
self.vel_msg = self.canvas.create_text(450, 190, text=velocityMsg)
self.pos_msg = self.canvas.create_text(250, 190, text=positionMsg)
self.step_msg = self.canvas.create_text(50, 190, text=stepMsg)
self.lastStep = stepCount
root.update()
|
Deep-RL-Bootcamp-Labs
|
positive
|
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps, use_tpu):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_training = mode == tf.estimator.ModeKeys.TRAIN
<DeepExtract>
tags = set()
if is_training:
tags.add('train')
bert_module = hub.Module(FLAGS.bert_hub_module_handle, tags=tags, trainable=True)
bert_inputs = dict(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
bert_outputs = bert_module(inputs=bert_inputs, signature='tokens', as_dict=True)
output_layer = bert_outputs['pooled_output']
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable('output_weights', [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable('output_bias', [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope('loss'):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(total_loss, per_example_loss, logits) = (loss, per_example_loss, logits)
</DeepExtract>
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics)
else:
raise ValueError('Only TRAIN and EVAL modes are supported: %s' % mode)
return output_spec
return model_fn
|
def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps, use_tpu):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_training = mode == tf.estimator.ModeKeys.TRAIN
tags = set()
if is_training:
tags.add('train')
bert_module = hub.Module(FLAGS.bert_hub_module_handle, tags=tags, trainable=True)
bert_inputs = dict(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids)
bert_outputs = bert_module(inputs=bert_inputs, signature='tokens', as_dict=True)
output_layer = bert_outputs['pooled_output']
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable('output_weights', [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable('output_bias', [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope('loss'):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
(total_loss, per_example_loss, logits) = (loss, per_example_loss, logits)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(label_ids, predictions)
loss = tf.metrics.mean(per_example_loss)
return {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics)
else:
raise ValueError('Only TRAIN and EVAL modes are supported: %s' % mode)
return output_spec
return model_fn
|
coref
|
positive
|
@extra_pad_end.setter
def extra_pad_end(self, value):
<DeepExtract>
if self.built:
raise RuntimeError("Can't modify layer attributes after it has been built.")
</DeepExtract>
self._extra_pad_end = None if value is None else bool(value)
|
@extra_pad_end.setter
def extra_pad_end(self, value):
if self.built:
raise RuntimeError("Can't modify layer attributes after it has been built.")
self._extra_pad_end = None if value is None else bool(value)
|
compression
|
positive
|
def Main():
global oLogger
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except:
pass
oParser = optparse.OptionParser(usage='usage: %prog [options] [file/directory]\nEnvironment variable for options: PECHECK_OPTIONS\n' + __description__, version='%prog ' + __version__)
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
oParser.add_option('-d', '--db', default='', help='the PeID user db file, default userdb.txt in same directory as pecheck')
oParser.add_option('-s', '--scan', action='store_true', default=False, help='scan folder')
oParser.add_option('-e', '--entropy', type=float, default=7.0, help='the minimum entropy value for a file to be listed in scan mode (default 7.0)')
oParser.add_option('-y', '--yara', help="YARA rule-file, @file or directory to check streams (YARA search doesn't work with -s option)")
oParser.add_option('--yarastrings', action='store_true', default=False, help='Print YARA strings')
oParser.add_option('-g', '--getdata', type=str, default='', help='Get data from the PE file (example 0x1234,0x100)')
oParser.add_option('-D', '--dump', action='store_true', default=False, help='perform dump')
oParser.add_option('-x', '--hexdump', action='store_true', default=False, help='perform hex dump')
oParser.add_option('-a', '--asciidump', action='store_true', default=False, help='perform ascii dump')
oParser.add_option('-A', '--asciidumprle', action='store_true', default=False, help='perform ascii dump with RLE')
oParser.add_option('-S', '--strings', action='store_true', default=False, help='perform strings dump')
oParser.add_option('-o', '--overview', type=str, default='', help='Accepted value: r for overview of resources, s for sections')
oParser.add_option('-l', '--locate', type=str, default='', help='Locate PE files inside binary data (P for list of MZ/PE headers)')
(options, args) = oParser.parse_args(GetArgumentsUpdatedWithEnvironmentVariable('PECHECK_OPTIONS'))
if options.man:
oParser.print_help()
<DeepExtract>
manual = '\nManual:\n\nThis manual is a work in progress.\n\nUse option -l to locate and select PE files embedded inside the provided file.\nUse -l P to get an overview of all embedded PE files, like this:\n\nC:\\Demo>pecheck.py -l P sample.png.vir\n1: 0x00002ebb DLL 32-bit 0x00016eba 0x000270ba (EOF)\n2: 0x00016ebb DLL 64-bit 0x000270ba 0x000270ba (EOF)\n\nThen select an embedded PE file for further analysis, like this:\n\nC:\\Demo>pecheck.py -l 2 sample.png.vir\n\nUse option -g o (o = overlay) to extract the overlay, and -g s (s = stripped) to extract the PE file without overlay.\n\n'
for line in manual.split('\n'):
print(textwrap.fill(line))
</DeepExtract>
return 0
if len(args) > 1 or (options.overview != '' and options.overview != 'r' and (options.overview != 's')):
oParser.print_help()
print('')
print(' Source code put in the public domain by Didier Stevens, no Copyright')
print(' Use at your own risk')
print(' https://DidierStevens.com')
return
else:
try:
dbfile = options.db
if dbfile == '':
dbfile = os.path.join(os.path.dirname(sys.argv[0]), 'userdb.txt')
if os.path.exists(dbfile):
signatures = peutils.SignatureDatabase(dbfile)
else:
signatures = 'Error: signature database missing'
except:
signatures = 'Error: while reading the signature database: %s' % sys.exc_info()[1].message
if len(args) == 0:
<DeepExtract>
data = ReadFile('')
if options.locate == 'P':
for (index, position) in enumerate(FindAllPEFiles(data)):
print('%d: 0x%08x%s' % (index + 1, position, PrefixIfNeeded(GetInfoCarvedFile(data, position))))
return
elif options.locate != '':
try:
index = int(options.locate)
except:
print('Error with option locate: %s' % options.locate)
return
index -= 1
locations = FindAllPEFiles(data)
if index < 0 or index >= len(locations):
print('Error with index option locate: %s' % options.locate)
return
data = data[locations[index]:]
if options.overview == 'r':
Resources(data, options)
elif options.overview == 's':
Sections(data, options)
elif options.getdata != '':
pe = pefile.PE(data=data)
DumpFunction = GetDumpFunction(options)
if options.getdata == 'o':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
print('No overlay')
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[overlayOffset:])))
elif options.getdata == 's':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
StdoutWriteChunked(DumpFunction(str(pe.write())))
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[:overlayOffset])))
else:
parsed = ParseGetData(options.getdata)
if parsed == None:
print('Error getdata syntax error: %s' % options.getdata)
return
StdoutWriteChunked(DumpFunction(pe.get_data(parsed[0], parsed[1])))
else:
SingleFileInfo('', data, signatures, options)
</DeepExtract>
elif options.scan:
oLogger = CSVLogger('pecheck', ('Filename', 'Entropy', 'Sections', 'Executable sections', 'Executable and writable sections', 'Size AuthentiCode', 'Stored CRC', 'Calculated CRC', 'CRC anomaly', 'Compile date', 'CompanyName', 'ProductName', 'MD5'))
<DeepExtract>
try:
if os.path.isdir(args[0]):
for entry in os.listdir(args[0]):
ScanFiles(os.path.join(args[0], entry), signatures, options.entropy)
else:
ScanFile(args[0], signatures, options.entropy)
except WindowsError:
if sys.exc_value.winerror == 5:
pass
else:
print(sys.exc_value)
sys.exit()
</DeepExtract>
else:
<DeepExtract>
data = ReadFile(args[0])
if options.locate == 'P':
for (index, position) in enumerate(FindAllPEFiles(data)):
print('%d: 0x%08x%s' % (index + 1, position, PrefixIfNeeded(GetInfoCarvedFile(data, position))))
return
elif options.locate != '':
try:
index = int(options.locate)
except:
print('Error with option locate: %s' % options.locate)
return
index -= 1
locations = FindAllPEFiles(data)
if index < 0 or index >= len(locations):
print('Error with index option locate: %s' % options.locate)
return
data = data[locations[index]:]
if options.overview == 'r':
Resources(data, options)
elif options.overview == 's':
Sections(data, options)
elif options.getdata != '':
pe = pefile.PE(data=data)
DumpFunction = GetDumpFunction(options)
if options.getdata == 'o':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
print('No overlay')
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[overlayOffset:])))
elif options.getdata == 's':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
StdoutWriteChunked(DumpFunction(str(pe.write())))
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[:overlayOffset])))
else:
parsed = ParseGetData(options.getdata)
if parsed == None:
print('Error getdata syntax error: %s' % options.getdata)
return
StdoutWriteChunked(DumpFunction(pe.get_data(parsed[0], parsed[1])))
else:
SingleFileInfo(args[0], data, signatures, options)
</DeepExtract>
|
def Main():
global oLogger
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except:
pass
oParser = optparse.OptionParser(usage='usage: %prog [options] [file/directory]\nEnvironment variable for options: PECHECK_OPTIONS\n' + __description__, version='%prog ' + __version__)
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
oParser.add_option('-d', '--db', default='', help='the PeID user db file, default userdb.txt in same directory as pecheck')
oParser.add_option('-s', '--scan', action='store_true', default=False, help='scan folder')
oParser.add_option('-e', '--entropy', type=float, default=7.0, help='the minimum entropy value for a file to be listed in scan mode (default 7.0)')
oParser.add_option('-y', '--yara', help="YARA rule-file, @file or directory to check streams (YARA search doesn't work with -s option)")
oParser.add_option('--yarastrings', action='store_true', default=False, help='Print YARA strings')
oParser.add_option('-g', '--getdata', type=str, default='', help='Get data from the PE file (example 0x1234,0x100)')
oParser.add_option('-D', '--dump', action='store_true', default=False, help='perform dump')
oParser.add_option('-x', '--hexdump', action='store_true', default=False, help='perform hex dump')
oParser.add_option('-a', '--asciidump', action='store_true', default=False, help='perform ascii dump')
oParser.add_option('-A', '--asciidumprle', action='store_true', default=False, help='perform ascii dump with RLE')
oParser.add_option('-S', '--strings', action='store_true', default=False, help='perform strings dump')
oParser.add_option('-o', '--overview', type=str, default='', help='Accepted value: r for overview of resources, s for sections')
oParser.add_option('-l', '--locate', type=str, default='', help='Locate PE files inside binary data (P for list of MZ/PE headers)')
(options, args) = oParser.parse_args(GetArgumentsUpdatedWithEnvironmentVariable('PECHECK_OPTIONS'))
if options.man:
oParser.print_help()
manual = '\nManual:\n\nThis manual is a work in progress.\n\nUse option -l to locate and select PE files embedded inside the provided file.\nUse -l P to get an overview of all embedded PE files, like this:\n\nC:\\Demo>pecheck.py -l P sample.png.vir\n1: 0x00002ebb DLL 32-bit 0x00016eba 0x000270ba (EOF)\n2: 0x00016ebb DLL 64-bit 0x000270ba 0x000270ba (EOF)\n\nThen select an embedded PE file for further analysis, like this:\n\nC:\\Demo>pecheck.py -l 2 sample.png.vir\n\nUse option -g o (o = overlay) to extract the overlay, and -g s (s = stripped) to extract the PE file without overlay.\n\n'
for line in manual.split('\n'):
print(textwrap.fill(line))
return 0
if len(args) > 1 or (options.overview != '' and options.overview != 'r' and (options.overview != 's')):
oParser.print_help()
print('')
print(' Source code put in the public domain by Didier Stevens, no Copyright')
print(' Use at your own risk')
print(' https://DidierStevens.com')
return
else:
try:
dbfile = options.db
if dbfile == '':
dbfile = os.path.join(os.path.dirname(sys.argv[0]), 'userdb.txt')
if os.path.exists(dbfile):
signatures = peutils.SignatureDatabase(dbfile)
else:
signatures = 'Error: signature database missing'
except:
signatures = 'Error: while reading the signature database: %s' % sys.exc_info()[1].message
if len(args) == 0:
data = ReadFile('')
if options.locate == 'P':
for (index, position) in enumerate(FindAllPEFiles(data)):
print('%d: 0x%08x%s' % (index + 1, position, PrefixIfNeeded(GetInfoCarvedFile(data, position))))
return
elif options.locate != '':
try:
index = int(options.locate)
except:
print('Error with option locate: %s' % options.locate)
return
index -= 1
locations = FindAllPEFiles(data)
if index < 0 or index >= len(locations):
print('Error with index option locate: %s' % options.locate)
return
data = data[locations[index]:]
if options.overview == 'r':
Resources(data, options)
elif options.overview == 's':
Sections(data, options)
elif options.getdata != '':
pe = pefile.PE(data=data)
DumpFunction = GetDumpFunction(options)
if options.getdata == 'o':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
print('No overlay')
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[overlayOffset:])))
elif options.getdata == 's':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
StdoutWriteChunked(DumpFunction(str(pe.write())))
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[:overlayOffset])))
else:
parsed = ParseGetData(options.getdata)
if parsed == None:
print('Error getdata syntax error: %s' % options.getdata)
return
StdoutWriteChunked(DumpFunction(pe.get_data(parsed[0], parsed[1])))
else:
SingleFileInfo('', data, signatures, options)
elif options.scan:
oLogger = CSVLogger('pecheck', ('Filename', 'Entropy', 'Sections', 'Executable sections', 'Executable and writable sections', 'Size AuthentiCode', 'Stored CRC', 'Calculated CRC', 'CRC anomaly', 'Compile date', 'CompanyName', 'ProductName', 'MD5'))
try:
if os.path.isdir(args[0]):
for entry in os.listdir(args[0]):
ScanFiles(os.path.join(args[0], entry), signatures, options.entropy)
else:
ScanFile(args[0], signatures, options.entropy)
except WindowsError:
if sys.exc_value.winerror == 5:
pass
else:
print(sys.exc_value)
sys.exit()
else:
data = ReadFile(args[0])
if options.locate == 'P':
for (index, position) in enumerate(FindAllPEFiles(data)):
print('%d: 0x%08x%s' % (index + 1, position, PrefixIfNeeded(GetInfoCarvedFile(data, position))))
return
elif options.locate != '':
try:
index = int(options.locate)
except:
print('Error with option locate: %s' % options.locate)
return
index -= 1
locations = FindAllPEFiles(data)
if index < 0 or index >= len(locations):
print('Error with index option locate: %s' % options.locate)
return
data = data[locations[index]:]
if options.overview == 'r':
Resources(data, options)
elif options.overview == 's':
Sections(data, options)
elif options.getdata != '':
pe = pefile.PE(data=data)
DumpFunction = GetDumpFunction(options)
if options.getdata == 'o':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
print('No overlay')
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[overlayOffset:])))
elif options.getdata == 's':
overlayOffset = pe.get_overlay_data_start_offset()
if overlayOffset == None:
StdoutWriteChunked(DumpFunction(str(pe.write())))
else:
StdoutWriteChunked(DumpFunction(str(pe.write()[:overlayOffset])))
else:
parsed = ParseGetData(options.getdata)
if parsed == None:
print('Error getdata syntax error: %s' % options.getdata)
return
StdoutWriteChunked(DumpFunction(pe.get_data(parsed[0], parsed[1])))
else:
SingleFileInfo(args[0], data, signatures, options)
</DeepExtract>
|
analyst-scripts
|
positive
|
def run_query(self, query, user):
logger.debug('Spreadsheet is about to execute query: %s', query)
values = query.split('|')
key = values[0]
worksheet_num = 0 if len(values) != 2 else int(values[1])
try:
<DeepExtract>
scope = ['https://spreadsheets.google.com/feeds']
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
timeout_session = HTTPSession()
timeout_session.requests_session = TimeoutSession()
spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)
spreadsheetservice.login()
spreadsheet_service = spreadsheetservice
</DeepExtract>
spreadsheet = spreadsheet_service.open_by_key(key)
<DeepExtract>
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
data = parse_worksheet(worksheet)
</DeepExtract>
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except gspread.SpreadsheetNotFound:
error = 'Spreadsheet ({}) not found. Make sure you used correct id.'.format(key)
json_data = None
return (json_data, error)
|
def run_query(self, query, user):
logger.debug('Spreadsheet is about to execute query: %s', query)
values = query.split('|')
key = values[0]
worksheet_num = 0 if len(values) != 2 else int(values[1])
try:
scope = ['https://spreadsheets.google.com/feeds']
key = json.loads(b64decode(self.configuration['jsonKeyFile']))
creds = ServiceAccountCredentials.from_json_keyfile_dict(key, scope)
timeout_session = HTTPSession()
timeout_session.requests_session = TimeoutSession()
spreadsheetservice = gspread.Client(auth=creds, http_session=timeout_session)
spreadsheetservice.login()
spreadsheet_service = spreadsheetservice
spreadsheet = spreadsheet_service.open_by_key(key)
worksheets = spreadsheet.worksheets()
worksheet_count = len(worksheets)
if worksheet_num >= worksheet_count:
raise WorksheetNotFoundError(worksheet_num, worksheet_count)
worksheet = worksheets[worksheet_num].get_all_values()
data = parse_worksheet(worksheet)
json_data = json.dumps(data, cls=JSONEncoder)
error = None
except gspread.SpreadsheetNotFound:
error = 'Spreadsheet ({}) not found. Make sure you used correct id.'.format(key)
json_data = None
return (json_data, error)
|
docker-redash
|
positive
|
def get_num_examples(bb_task_name, bb_json_metadata=None, bb_json_subtask_names=None):
"""Get number of examples in the task."""
<DeepExtract>
if bb_json_metadata is None:
bb_json_metadata = BigBenchJsonPathsFetcher.bigbench_json_metadata()
bb_json_metadata = bb_json_metadata
</DeepExtract>
<DeepExtract>
if bb_json_subtask_names is None:
bb_json_subtask_names = BigBenchJsonPathsFetcher.bigbench_json_subtask_names()
bb_json_subtask_names = bb_json_subtask_names
</DeepExtract>
if bb_task_name in bb_json_subtask_names:
return sum([get_num_examples(subtask_name, bb_json_metadata=bb_json_metadata, bb_json_subtask_names=bb_json_subtask_names) for subtask_name in bb_json_subtask_names[bb_task_name]])
return max(bb_json_metadata[bb_task_name]['num_generate_text'], bb_json_metadata[bb_task_name]['num_multiple_choice'])
|
def get_num_examples(bb_task_name, bb_json_metadata=None, bb_json_subtask_names=None):
"""Get number of examples in the task."""
if bb_json_metadata is None:
bb_json_metadata = BigBenchJsonPathsFetcher.bigbench_json_metadata()
bb_json_metadata = bb_json_metadata
if bb_json_subtask_names is None:
bb_json_subtask_names = BigBenchJsonPathsFetcher.bigbench_json_subtask_names()
bb_json_subtask_names = bb_json_subtask_names
if bb_task_name in bb_json_subtask_names:
return sum([get_num_examples(subtask_name, bb_json_metadata=bb_json_metadata, bb_json_subtask_names=bb_json_subtask_names) for subtask_name in bb_json_subtask_names[bb_task_name]])
return max(bb_json_metadata[bb_task_name]['num_generate_text'], bb_json_metadata[bb_task_name]['num_multiple_choice'])
|
BIG-bench
|
positive
|
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
<DeepExtract>
output_embeddings = None
</DeepExtract>
if output_embeddings is not None:
<DeepExtract>
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(self.get_input_embeddings().weight.clone())
else:
output_embeddings.weight = self.get_input_embeddings().weight
if hasattr(output_embeddings, 'bias') and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), 'constant', 0)
if hasattr(output_embeddings, 'out_features') and hasattr(self.get_input_embeddings(), 'num_embeddings'):
output_embeddings.out_features = self.get_input_embeddings().num_embeddings
</DeepExtract>
|
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
output_embeddings = None
if output_embeddings is not None:
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(self.get_input_embeddings().weight.clone())
else:
output_embeddings.weight = self.get_input_embeddings().weight
if hasattr(output_embeddings, 'bias') and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), 'constant', 0)
if hasattr(output_embeddings, 'out_features') and hasattr(self.get_input_embeddings(), 'num_embeddings'):
output_embeddings.out_features = self.get_input_embeddings().num_embeddings
</DeepExtract>
|
BERT-Relation-Extraction
|
positive
|
@pytest.mark.parametrize('n_jobs', [1, 3, -2])
def test_em_missing_data(self, n_jobs):
"""Test EM algorithm given some "missing" data"""
<DeepExtract>
def mode(lst: Iterable) -> Any:
(df, sm, node_states, true_lv_values) = Counter(lst).most_common()[0][0] if len(lst) > 0 else np.nan
np.random.seed(seed)
par_samples = np.random.choice(2, size=[5000, 1])
if 1 == 0:
true_lv_values = np.random.choice(2, size=[5000, 1])
else:
true_lv_values = np.array([[(mode(el) + np.random.choice(2, p=[0.6, 1 - 0.6])) % 2] for el in par_samples])
child_samples = np.random.random(size=[5000, children])
aux = true_lv_values.repeat(children, axis=1)
child_samples = np.where(child_samples < 0.6, aux, (aux + 1) % 2)
df = pd.concat([pd.DataFrame(par_samples, columns=[f'p_{i}' for i in range(1)]), pd.DataFrame(child_samples, columns=[f'c_{i}' for i in range(children)]), pd.DataFrame(true_lv_values, columns=['z'])], axis=1)
df.loc[int(5000 * 0.25):, 'z'] = np.nan
sm = StructureModel()
sm.add_edges_from([(f'p_{i}', 'z') for i in range(1)])
sm.add_edges_from([('z', f'c_{i}') for i in range(children)])
node_states = {'z': list(range(2))}
for i in range(1):
node_states[f'p_{i}'] = list(range(2))
for i in range(children):
node_states[f'c_{i}'] = list(range(2))
(df, sm, node_states, true_lv_values) = (df, sm, node_states, true_lv_values)
</DeepExtract>
em = EMSingleLatentVariable(data=df, sm=sm, node_states=node_states, lv_name='z', n_jobs=n_jobs)
em.run(n_runs=50, stopping_delta=0.001, verbose=2)
<DeepExtract>
df['z'] = true_lv_values.reshape(-1)
bn = BayesianNetwork(sm)
bn.fit_node_states(states_to_df(node_states))
bn.fit_cpds(df)
max_delta = -1
avg_delta = 0
for node in em.cpds:
deltas = (em.cpds[node] - bn.cpds[node]).abs().values
max_delta = max(max_delta, deltas.max())
avg_delta += np.mean(deltas ** 2)
avg_delta = np.sqrt(avg_delta / len(em.cpds))
(max_error, rmse_error) = (max_delta, avg_delta)
</DeepExtract>
assert max_error < 0.02
assert rmse_error < 0.01
|
@pytest.mark.parametrize('n_jobs', [1, 3, -2])
def test_em_missing_data(self, n_jobs):
"""Test EM algorithm given some "missing" data"""
def mode(lst: Iterable) -> Any:
(df, sm, node_states, true_lv_values) = Counter(lst).most_common()[0][0] if len(lst) > 0 else np.nan
np.random.seed(seed)
par_samples = np.random.choice(2, size=[5000, 1])
if 1 == 0:
true_lv_values = np.random.choice(2, size=[5000, 1])
else:
true_lv_values = np.array([[(mode(el) + np.random.choice(2, p=[0.6, 1 - 0.6])) % 2] for el in par_samples])
child_samples = np.random.random(size=[5000, children])
aux = true_lv_values.repeat(children, axis=1)
child_samples = np.where(child_samples < 0.6, aux, (aux + 1) % 2)
df = pd.concat([pd.DataFrame(par_samples, columns=[f'p_{i}' for i in range(1)]), pd.DataFrame(child_samples, columns=[f'c_{i}' for i in range(children)]), pd.DataFrame(true_lv_values, columns=['z'])], axis=1)
df.loc[int(5000 * 0.25):, 'z'] = np.nan
sm = StructureModel()
sm.add_edges_from([(f'p_{i}', 'z') for i in range(1)])
sm.add_edges_from([('z', f'c_{i}') for i in range(children)])
node_states = {'z': list(range(2))}
for i in range(1):
node_states[f'p_{i}'] = list(range(2))
for i in range(children):
node_states[f'c_{i}'] = list(range(2))
(df, sm, node_states, true_lv_values) = (df, sm, node_states, true_lv_values)
em = EMSingleLatentVariable(data=df, sm=sm, node_states=node_states, lv_name='z', n_jobs=n_jobs)
em.run(n_runs=50, stopping_delta=0.001, verbose=2)
df['z'] = true_lv_values.reshape(-1)
bn = BayesianNetwork(sm)
bn.fit_node_states(states_to_df(node_states))
bn.fit_cpds(df)
max_delta = -1
avg_delta = 0
for node in em.cpds:
deltas = (em.cpds[node] - bn.cpds[node]).abs().values
max_delta = max(max_delta, deltas.max())
avg_delta += np.mean(deltas ** 2)
avg_delta = np.sqrt(avg_delta / len(em.cpds))
(max_error, rmse_error) = (max_delta, avg_delta)
assert max_error < 0.02
assert rmse_error < 0.01
|
causalnex
|
positive
|
def test_distribution_dct_format_other_uri(self):
<DeepExtract>
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
distribution = URIRef('http://example.org/datasets/1/ds/1')
g.add((dataset, DCAT.distribution, distribution))
g.add((distribution, RDF.type, DCAT.Distribution))
if URIRef('https://example.com/my/format'):
g.add((distribution, DCT['format'], URIRef('https://example.com/my/format')))
if mediatype_item:
g.add((distribution, DCAT.mediaType, mediatype_item))
if URIRef('https://example.com/my/format') is None and mediatype_item is None:
raise AssertionError('At least one of format or mediaType is required!')
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
dataset = [d for d in p.datasets()][0]
resources = dataset.get('resources')
</DeepExtract>
assert u'https://example.com/my/format' == resources[0].get('format')
assert None == resources[0].get('mimetype')
|
def test_distribution_dct_format_other_uri(self):
g = Graph()
dataset = URIRef('http://example.org/datasets/1')
g.add((dataset, RDF.type, DCAT.Dataset))
distribution = URIRef('http://example.org/datasets/1/ds/1')
g.add((dataset, DCAT.distribution, distribution))
g.add((distribution, RDF.type, DCAT.Distribution))
if URIRef('https://example.com/my/format'):
g.add((distribution, DCT['format'], URIRef('https://example.com/my/format')))
if mediatype_item:
g.add((distribution, DCAT.mediaType, mediatype_item))
if URIRef('https://example.com/my/format') is None and mediatype_item is None:
raise AssertionError('At least one of format or mediaType is required!')
p = RDFParser(profiles=['euro_dcat_ap'])
p.g = g
dataset = [d for d in p.datasets()][0]
resources = dataset.get('resources')
assert u'https://example.com/my/format' == resources[0].get('format')
assert None == resources[0].get('mimetype')
|
ckanext-dcat
|
positive
|
def arrangeLabels(F, E, index):
""" this function does the actual positioning of all labels"""
Labels = F + E
Labels = sorted(Labels, key=lambda x: x[0])
horzI = None
horzN = self.dnaLength
if index == -1:
target = self.dnaLength / 4 * 1
else:
target = self.dnaLength / 4 * 3
i = 0
for l in Labels:
<DeepExtract>
angle = self.position2angle(l[0])
(x, y) = self.radial2cartesian(self.radiusLabels - 2, angle)
(x, y) = (x, y)
</DeepExtract>
l[4] = round(y, 3)
if abs(l[0] - target) < horzN:
horzN = abs(l[0] - target)
horzI = i
i = i + 1
h = 1
loop = [horzI]
newI = horzI
switch = 1
both = True
while h < len(Labels):
if newI > 0 and newI < len(Labels) - 1 and (both == True):
newI = newI + switch * h
switch = -1 * switch
elif newI == 0:
switch = 1
newI = newI + switch * h
both = False
elif newI == len(Labels) - 1:
switch = -1
newI = newI + switch * h
both = False
else:
newI = newI + switch * 1
loop.append(newI)
h = h + 1
dy = self.ctx.device_to_user_distance(5, 0)[0]
i = 0
n = 100
overlapp = 1
while i < n and overlapp > 0:
overlapp = 0
for a in loop:
l = Labels[a]
y = l[4]
U = y + self.lH
L = y
if index > 0:
if a == 0:
nL = +self.radiusLabels
else:
nextLabel = Labels[a - 1]
nL = nextLabel[4]
if a == len(Labels) - 1:
lU = -self.radiusLabels
else:
prevLabel = Labels[a + 1]
lU = prevLabel[4] + self.lH
else:
if a == len(Labels) - 1:
nL = self.radiusLabels
else:
nextLabel = Labels[a + 1]
nL = nextLabel[4]
if a == 0:
lU = -self.radiusLabels
else:
prevLabel = Labels[a - 1]
lU = prevLabel[4] + self.lH
dU = U - nL
dL = lU - L
if dU > 0 or dL > 0:
if dU > 0 and dL <= 0:
y = y - dy
overlapp = overlapp + dU
elif dU <= 0 and dL > 0:
y = y + dy
overlapp = overlapp + dL
elif dU > 0 and dL > 0:
y = y + (dU - (dU + dL) / 2)
overlapp = overlapp + dU + dL
if y > self.radiusLabels:
y = self.radiusLabels
elif y < -self.radiusLabels:
y = -self.radiusLabels
l[4] = y
i = i + 1
return Labels
|
def arrangeLabels(F, E, index):
""" this function does the actual positioning of all labels"""
Labels = F + E
Labels = sorted(Labels, key=lambda x: x[0])
horzI = None
horzN = self.dnaLength
if index == -1:
target = self.dnaLength / 4 * 1
else:
target = self.dnaLength / 4 * 3
i = 0
for l in Labels:
angle = self.position2angle(l[0])
(x, y) = self.radial2cartesian(self.radiusLabels - 2, angle)
(x, y) = (x, y)
l[4] = round(y, 3)
if abs(l[0] - target) < horzN:
horzN = abs(l[0] - target)
horzI = i
i = i + 1
h = 1
loop = [horzI]
newI = horzI
switch = 1
both = True
while h < len(Labels):
if newI > 0 and newI < len(Labels) - 1 and (both == True):
newI = newI + switch * h
switch = -1 * switch
elif newI == 0:
switch = 1
newI = newI + switch * h
both = False
elif newI == len(Labels) - 1:
switch = -1
newI = newI + switch * h
both = False
else:
newI = newI + switch * 1
loop.append(newI)
h = h + 1
dy = self.ctx.device_to_user_distance(5, 0)[0]
i = 0
n = 100
overlapp = 1
while i < n and overlapp > 0:
overlapp = 0
for a in loop:
l = Labels[a]
y = l[4]
U = y + self.lH
L = y
if index > 0:
if a == 0:
nL = +self.radiusLabels
else:
nextLabel = Labels[a - 1]
nL = nextLabel[4]
if a == len(Labels) - 1:
lU = -self.radiusLabels
else:
prevLabel = Labels[a + 1]
lU = prevLabel[4] + self.lH
else:
if a == len(Labels) - 1:
nL = self.radiusLabels
else:
nextLabel = Labels[a + 1]
nL = nextLabel[4]
if a == 0:
lU = -self.radiusLabels
else:
prevLabel = Labels[a - 1]
lU = prevLabel[4] + self.lH
dU = U - nL
dL = lU - L
if dU > 0 or dL > 0:
if dU > 0 and dL <= 0:
y = y - dy
overlapp = overlapp + dU
elif dU <= 0 and dL > 0:
y = y + dy
overlapp = overlapp + dL
elif dU > 0 and dL > 0:
y = y + (dU - (dU + dL) / 2)
overlapp = overlapp + dU + dL
if y > self.radiusLabels:
y = self.radiusLabels
elif y < -self.radiusLabels:
y = -self.radiusLabels
l[4] = y
i = i + 1
return Labels
|
DNApy
|
positive
|
def encode_sample_value(datatype, value, format=False):
<DeepExtract>
if value is None:
r = None
if wsme.types.iscomplex(datatype):
d = dict()
for attr in wsme.types.list_attributes(datatype):
attr_value = getattr(value, attr.key)
if attr_value is not Unset:
d[attr.name] = tojson(attr.datatype, attr_value)
r = d
elif wsme.types.isusertype(datatype):
r = tojson(datatype.basetype, datatype.tobasetype(value))
r = value
</DeepExtract>
content = json.dumps(r, ensure_ascii=False, indent=4 if format else 0, sort_keys=format)
return ('javascript', content)
|
def encode_sample_value(datatype, value, format=False):
if value is None:
r = None
if wsme.types.iscomplex(datatype):
d = dict()
for attr in wsme.types.list_attributes(datatype):
attr_value = getattr(value, attr.key)
if attr_value is not Unset:
d[attr.name] = tojson(attr.datatype, attr_value)
r = d
elif wsme.types.isusertype(datatype):
r = tojson(datatype.basetype, datatype.tobasetype(value))
r = value
content = json.dumps(r, ensure_ascii=False, indent=4 if format else 0, sort_keys=format)
return ('javascript', content)
|
Django_web
|
positive
|
def ask(self, command):
<DeepExtract>
self._intf.write(command)
</DeepExtract>
time.sleep(0.1)
return self.read()
|
def ask(self, command):
self._intf.write(command)
time.sleep(0.1)
return self.read()
|
basil
|
positive
|
def worker_main(master_host: str, master_port: int, log_filename: str) -> None:
site_config = SiteConfig()
hostname = socket.gethostname()
master_proc = None
SigHandler()
site_config.enable_logging('serial_mode', filename=log_filename + f'.{hostname}')
if hostname == master_host:
logger.info(f'Launching master subprocess on {hostname}')
<DeepExtract>
args = [sys.executable] + sys.argv + ['--run-master']
master_proc = subprocess.Popen(args)
</DeepExtract>
else:
logger.info(f'Worker on {hostname} will connect to remote master on {master_host}')
launch_settings = site_config.settings.launcher
node_cls = launch_settings.compute_node
nodes = [node for node in node_cls.get_job_nodelist() if node.hostname == hostname]
node_manager = NodeManager(nodes, allow_node_packing=True)
worker = Worker(app_run=launch_settings.local_app_launcher, node_manager=node_manager, master_host=master_host, master_port=master_port, delay_sec=launch_settings.delay_sec, error_tail_num_lines=launch_settings.error_tail_num_lines, num_prefetch_jobs=launch_settings.serial_mode_prefetch_per_rank, master_subproc=master_proc)
try:
logger.debug('Launching worker')
worker.run()
except:
raise
finally:
worker.exit()
if master_proc is not None:
try:
master_proc.wait(timeout=5)
logger.info('master process shutdown OK')
except subprocess.TimeoutExpired:
logger.warning('Force-killing master process')
master_proc.kill()
|
def worker_main(master_host: str, master_port: int, log_filename: str) -> None:
site_config = SiteConfig()
hostname = socket.gethostname()
master_proc = None
SigHandler()
site_config.enable_logging('serial_mode', filename=log_filename + f'.{hostname}')
if hostname == master_host:
logger.info(f'Launching master subprocess on {hostname}')
args = [sys.executable] + sys.argv + ['--run-master']
master_proc = subprocess.Popen(args)
else:
logger.info(f'Worker on {hostname} will connect to remote master on {master_host}')
launch_settings = site_config.settings.launcher
node_cls = launch_settings.compute_node
nodes = [node for node in node_cls.get_job_nodelist() if node.hostname == hostname]
node_manager = NodeManager(nodes, allow_node_packing=True)
worker = Worker(app_run=launch_settings.local_app_launcher, node_manager=node_manager, master_host=master_host, master_port=master_port, delay_sec=launch_settings.delay_sec, error_tail_num_lines=launch_settings.error_tail_num_lines, num_prefetch_jobs=launch_settings.serial_mode_prefetch_per_rank, master_subproc=master_proc)
try:
logger.debug('Launching worker')
worker.run()
except:
raise
finally:
worker.exit()
if master_proc is not None:
try:
master_proc.wait(timeout=5)
logger.info('master process shutdown OK')
except subprocess.TimeoutExpired:
logger.warning('Force-killing master process')
master_proc.kill()
|
balsam
|
positive
|
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
<DeepExtract>
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
new = new
</DeepExtract>
new.xs = new.xs + term
return new
|
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
new = new
new.xs = new.xs + term
return new
|
DataExploration
|
positive
|
def lr_schedule(num_epochs):
<DeepExtract>
lr_factors = []
step = 0
cycle = 0
for _ in range(0, num_epochs + 1):
step += 1
completed_fraction = step / 7
cosine_decayed = 0.5 * (1 + math.cos(math.pi * completed_fraction))
lr_factors.append(cosine_decayed)
if completed_fraction == 1:
step = 0
cycle += 1
7 = 7 * increase_restart_interval_factor
factors = lr_factors
</DeepExtract>
return lambda epoch: factors[epoch]
|
def lr_schedule(num_epochs):
lr_factors = []
step = 0
cycle = 0
for _ in range(0, num_epochs + 1):
step += 1
completed_fraction = step / 7
cosine_decayed = 0.5 * (1 + math.cos(math.pi * completed_fraction))
lr_factors.append(cosine_decayed)
if completed_fraction == 1:
step = 0
cycle += 1
7 = 7 * increase_restart_interval_factor
factors = lr_factors
return lambda epoch: factors[epoch]
|
cockpit
|
positive
|
def outer_concat_aligned_mapping(mappings, reindexers=None, index=None, fill_value=None, axis=0):
result = {}
ns = [m.parent.shape[axis] for m in mappings]
for k in union_keys(mappings):
els = [m.get(k, MissingVal) for m in mappings]
if reindexers is None:
<DeepExtract>
if all((isinstance(el, pd.DataFrame) for el in els if not_missing(el))):
reindexers = [(lambda x: x) if not_missing(el) else lambda _, shape=shape: pd.DataFrame(index=range(shape)) for (el, shape) in zip(els, ns)]
elif any((isinstance(el, AwkArray) for el in els if not_missing(el))):
import awkward as ak
if not all((isinstance(el, AwkArray) for el in els if not_missing(el))):
raise NotImplementedError('Cannot concatenate an AwkwardArray with other array types.')
warn('Outer joins on awkward.Arrays will have different return values in the future.For details, and to offer input, please see:\n\n\thttps://github.com/scverse/anndata/issues/898', ExperimentalFeatureWarning)
filterwarnings('ignore', category=ExperimentalFeatureWarning, message='Outer joins on awkward.Arrays will have different return values.*')
reindexers = []
for el in els:
if not_missing(el):
reindexers.append(lambda x: x)
else:
reindexers.append(lambda x: ak.pad_none(ak.Array([]), len(x), 0))
else:
max_col = max((el.shape[1] for el in els if not_missing(el)))
orig_cols = [el.shape[1] if not_missing(el) else 0 for el in els]
reindexers = [gen_reindexer(pd.RangeIndex(max_col), pd.RangeIndex(n)) for n in orig_cols]
cur_reindexers = reindexers
</DeepExtract>
else:
cur_reindexers = reindexers
<DeepExtract>
[el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)] = list([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])
if fill_value is None:
fill_value = default_fill_value([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])
if any((isinstance(a, pd.DataFrame) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
if not all((isinstance(a, pd.DataFrame) or a is MissingVal or 0 in a.shape for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
raise NotImplementedError('Cannot concatenate a dataframe with other array types.')
df = pd.concat(unify_categorical_dtypes([f(x) for (f, x) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])]), ignore_index=True, axis=axis)
df.index = index
result[k] = df
elif any((isinstance(a, AwkArray) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
from ..compat import awkward as ak
if not all((isinstance(a, AwkArray) or a is MissingVal or 0 in a.shape for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
raise NotImplementedError('Cannot concatenate an AwkwardArray with other array types.')
result[k] = ak.concatenate([f(a) for (f, a) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], axis=axis)
elif any((isinstance(a, sparse.spmatrix) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
sparse_stack = (sparse.vstack, sparse.hstack)[axis]
result[k] = sparse_stack([f(as_sparse(a), axis=1 - axis, fill_value=fill_value) for (f, a) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], format='csr')
else:
result[k] = np.concatenate([f(x, fill_value=fill_value, axis=1 - axis) for (f, x) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], axis=axis)
</DeepExtract>
return result
|
def outer_concat_aligned_mapping(mappings, reindexers=None, index=None, fill_value=None, axis=0):
result = {}
ns = [m.parent.shape[axis] for m in mappings]
for k in union_keys(mappings):
els = [m.get(k, MissingVal) for m in mappings]
if reindexers is None:
if all((isinstance(el, pd.DataFrame) for el in els if not_missing(el))):
reindexers = [(lambda x: x) if not_missing(el) else lambda _, shape=shape: pd.DataFrame(index=range(shape)) for (el, shape) in zip(els, ns)]
elif any((isinstance(el, AwkArray) for el in els if not_missing(el))):
import awkward as ak
if not all((isinstance(el, AwkArray) for el in els if not_missing(el))):
raise NotImplementedError('Cannot concatenate an AwkwardArray with other array types.')
warn('Outer joins on awkward.Arrays will have different return values in the future.For details, and to offer input, please see:\n\n\thttps://github.com/scverse/anndata/issues/898', ExperimentalFeatureWarning)
filterwarnings('ignore', category=ExperimentalFeatureWarning, message='Outer joins on awkward.Arrays will have different return values.*')
reindexers = []
for el in els:
if not_missing(el):
reindexers.append(lambda x: x)
else:
reindexers.append(lambda x: ak.pad_none(ak.Array([]), len(x), 0))
else:
max_col = max((el.shape[1] for el in els if not_missing(el)))
orig_cols = [el.shape[1] if not_missing(el) else 0 for el in els]
reindexers = [gen_reindexer(pd.RangeIndex(max_col), pd.RangeIndex(n)) for n in orig_cols]
cur_reindexers = reindexers
else:
cur_reindexers = reindexers
[el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)] = list([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])
if fill_value is None:
fill_value = default_fill_value([el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])
if any((isinstance(a, pd.DataFrame) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
if not all((isinstance(a, pd.DataFrame) or a is MissingVal or 0 in a.shape for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
raise NotImplementedError('Cannot concatenate a dataframe with other array types.')
df = pd.concat(unify_categorical_dtypes([f(x) for (f, x) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])]), ignore_index=True, axis=axis)
df.index = index
result[k] = df
elif any((isinstance(a, AwkArray) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
from ..compat import awkward as ak
if not all((isinstance(a, AwkArray) or a is MissingVal or 0 in a.shape for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
raise NotImplementedError('Cannot concatenate an AwkwardArray with other array types.')
result[k] = ak.concatenate([f(a) for (f, a) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], axis=axis)
elif any((isinstance(a, sparse.spmatrix) for a in [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])):
sparse_stack = (sparse.vstack, sparse.hstack)[axis]
result[k] = sparse_stack([f(as_sparse(a), axis=1 - axis, fill_value=fill_value) for (f, a) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], format='csr')
else:
result[k] = np.concatenate([f(x, fill_value=fill_value, axis=1 - axis) for (f, x) in zip(cur_reindexers, [el if not_missing(el) else np.zeros((n, 0), dtype=bool) for (el, n) in zip(els, ns)])], axis=axis)
return result
|
anndata
|
positive
|
def test_given_observed_sample_when_estimate_counterfactual_then_returns_correct_sample_values():
<DeepExtract>
X0 = np.random.uniform(-1, 1, 10000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 10000)
X2 = 0.5 * X0 + np.random.normal(0, 0.1, 10000)
X3 = 0.5 * X2 + np.random.normal(0, 0.1, 10000)
original_observations = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
causal_model = InvertibleStructuralCausalModel(nx.DiGraph([('X0', 'X1'), ('X0', 'X2'), ('X2', 'X3')]))
causal_model.set_causal_mechanism('X0', EmpiricalDistribution())
causal_model.set_causal_mechanism('X1', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
causal_model.set_causal_mechanism('X2', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
causal_model.set_causal_mechanism('X3', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
fit(causal_model, original_observations)
(causal_model, _) = (causal_model, original_observations)
</DeepExtract>
observed_samples = pd.DataFrame({'X0': [1], 'X1': [3], 'X2': [3], 'X3': [4]})
sample = counterfactual_samples(causal_model, dict(X2=lambda x: 2), observed_data=observed_samples)
assert sample['X0'].to_numpy().squeeze() == 1
assert sample['X1'].to_numpy().squeeze() == 3
assert sample['X2'].to_numpy().squeeze() == 2
assert sample['X3'].to_numpy().squeeze() == approx(3.5, abs=0.05)
|
def test_given_observed_sample_when_estimate_counterfactual_then_returns_correct_sample_values():
X0 = np.random.uniform(-1, 1, 10000)
X1 = 2 * X0 + np.random.normal(0, 0.1, 10000)
X2 = 0.5 * X0 + np.random.normal(0, 0.1, 10000)
X3 = 0.5 * X2 + np.random.normal(0, 0.1, 10000)
original_observations = pd.DataFrame({'X0': X0, 'X1': X1, 'X2': X2, 'X3': X3})
causal_model = InvertibleStructuralCausalModel(nx.DiGraph([('X0', 'X1'), ('X0', 'X2'), ('X2', 'X3')]))
causal_model.set_causal_mechanism('X0', EmpiricalDistribution())
causal_model.set_causal_mechanism('X1', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
causal_model.set_causal_mechanism('X2', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
causal_model.set_causal_mechanism('X3', AdditiveNoiseModel(prediction_model=create_linear_regressor()))
fit(causal_model, original_observations)
(causal_model, _) = (causal_model, original_observations)
observed_samples = pd.DataFrame({'X0': [1], 'X1': [3], 'X2': [3], 'X3': [4]})
sample = counterfactual_samples(causal_model, dict(X2=lambda x: 2), observed_data=observed_samples)
assert sample['X0'].to_numpy().squeeze() == 1
assert sample['X1'].to_numpy().squeeze() == 3
assert sample['X2'].to_numpy().squeeze() == 2
assert sample['X3'].to_numpy().squeeze() == approx(3.5, abs=0.05)
|
dowhy
|
positive
|
def get_times_from_utterance(utterance: str, char_offset_to_token_index: Dict[int, int], indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
"""
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``.
"""
<DeepExtract>
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+pm')
for match in number_regex.finditer(utterance):
query_values = pm_map_match_to_query_value(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
pm_linking_dict = linking_scores_dict
</DeepExtract>
<DeepExtract>
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+am')
for match in number_regex.finditer(utterance):
query_values = am_map_match_to_query_value(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
am_linking_dict = linking_scores_dict
</DeepExtract>
<DeepExtract>
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile("\\d+ o'clock")
for match in number_regex.finditer(utterance):
query_values = lambda match: digit_to_query_time(match.rstrip(" o'clock"))(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
oclock_linking_dict = linking_scores_dict
</DeepExtract>
<DeepExtract>
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+ hours')
for match in number_regex.finditer(utterance):
query_values = lambda match: [int(match.rstrip(' hours'))](match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
hours_linking_dict = linking_scores_dict
</DeepExtract>
times_linking_dict: Dict[str, List[int]] = defaultdict(list)
linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]
for linking_dict in linking_dicts:
for (key, value) in linking_dict.items():
times_linking_dict[key].extend(value)
return times_linking_dict
|
def get_times_from_utterance(utterance: str, char_offset_to_token_index: Dict[int, int], indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]:
"""
Given an utterance, we get the numbers that correspond to times and convert them to
values that may appear in the query. For example: convert ``7pm`` to ``1900``.
"""
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+pm')
for match in number_regex.finditer(utterance):
query_values = pm_map_match_to_query_value(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
pm_linking_dict = linking_scores_dict
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+am')
for match in number_regex.finditer(utterance):
query_values = am_map_match_to_query_value(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
am_linking_dict = linking_scores_dict
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile("\\d+ o'clock")
for match in number_regex.finditer(utterance):
query_values = lambda match: digit_to_query_time(match.rstrip(" o'clock"))(match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
oclock_linking_dict = linking_scores_dict
linking_scores_dict: Dict[str, List[int]] = defaultdict(list)
number_regex = re.compile('\\d+ hours')
for match in number_regex.finditer(utterance):
query_values = lambda match: [int(match.rstrip(' hours'))](match.group())
approximate_times = []
if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words:
approximate_times.extend(get_approximate_times(query_values))
query_values.extend(approximate_times)
if match.start() in char_offset_to_token_index:
for query_value in query_values:
linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1])
hours_linking_dict = linking_scores_dict
times_linking_dict: Dict[str, List[int]] = defaultdict(list)
linking_dicts = [pm_linking_dict, am_linking_dict, oclock_linking_dict, hours_linking_dict]
for linking_dict in linking_dicts:
for (key, value) in linking_dict.items():
times_linking_dict[key].extend(value)
return times_linking_dict
|
allennlp-semparse
|
positive
|
def get_template_string(initial_data, data):
<DeepExtract>
initial_data = re.sub('\\s(?=[^\\{\\}]*}})', '', unicode(initial_data))
</DeepExtract>
html_template = Template(initial_data)
return_value = ''
has_variables = False
for node in html_template.nodelist:
if isinstance(node, VariableNode):
return_value += unicode(data.get(node.token.contents, ''))
has_variables = True
else:
return_value += unicode(node.token.contents)
return (return_value, has_variables)
|
def get_template_string(initial_data, data):
initial_data = re.sub('\\s(?=[^\\{\\}]*}})', '', unicode(initial_data))
html_template = Template(initial_data)
return_value = ''
has_variables = False
for node in html_template.nodelist:
if isinstance(node, VariableNode):
return_value += unicode(data.get(node.token.contents, ''))
has_variables = True
else:
return_value += unicode(node.token.contents)
return (return_value, has_variables)
|
daemo
|
positive
|
def difference_plot(self, level_1, level_2, absolute=True, groupby=None):
"""Plot representing the difference between group 1 and 2.
- Difference in means or proportions, depending
on the response variable type.
Frequentist:
- Plot interval plot with confidence interval of the
difference between groups
Bayesian:
- Plot KDE representing the posterior distribution of the difference.
- Probability that group2 > group1
- Mean difference
- 95% interval.
Args:
level_1 (str, tuple of str): Name of first level.
level_2 (str, tuple of str): Name of second level.
absolute (bool): If True then return the absolute
difference (level2 - level1)
otherwise return the relative difference (level2 / level1 - 1)
groupby (str): Name of column, or list of columns.
If specified, will return an interval for each level
of the grouped dimension, or a confidence band if the
grouped dimension is ordinal
Returns:
GroupedChart object.
"""
<DeepExtract>
is_ordinal_difference_plot = groupby is not None and self._ordinal_group_column is not None and (self._ordinal_group_column in groupby)
use_ordinal_axis = is_ordinal_difference_plot
</DeepExtract>
if use_ordinal_axis:
<DeepExtract>
difference_df = self.difference(level_1, level_2, absolute, groupby)
remaining_groups = self._remaining_categorical_groups(groupby)
title = 'Change from {} to {}'.format(level_1, level_2)
y_axis_label = self.get_difference_plot_label(absolute)
ch = self._ordinal_plot('difference', difference_df, groupby=None, level_name='', remaining_groups=remaining_groups, absolute=absolute, title=title, y_axis_label=y_axis_label)
ch.callout.line(0)
ch = ch
</DeepExtract>
chart_grid = ChartGrid()
chart_grid.charts.append(ch)
else:
<DeepExtract>
pass
</DeepExtract>
return chart_grid
|
def difference_plot(self, level_1, level_2, absolute=True, groupby=None):
"""Plot representing the difference between group 1 and 2.
- Difference in means or proportions, depending
on the response variable type.
Frequentist:
- Plot interval plot with confidence interval of the
difference between groups
Bayesian:
- Plot KDE representing the posterior distribution of the difference.
- Probability that group2 > group1
- Mean difference
- 95% interval.
Args:
level_1 (str, tuple of str): Name of first level.
level_2 (str, tuple of str): Name of second level.
absolute (bool): If True then return the absolute
difference (level2 - level1)
otherwise return the relative difference (level2 / level1 - 1)
groupby (str): Name of column, or list of columns.
If specified, will return an interval for each level
of the grouped dimension, or a confidence band if the
grouped dimension is ordinal
Returns:
GroupedChart object.
"""
is_ordinal_difference_plot = groupby is not None and self._ordinal_group_column is not None and (self._ordinal_group_column in groupby)
use_ordinal_axis = is_ordinal_difference_plot
if use_ordinal_axis:
difference_df = self.difference(level_1, level_2, absolute, groupby)
remaining_groups = self._remaining_categorical_groups(groupby)
title = 'Change from {} to {}'.format(level_1, level_2)
y_axis_label = self.get_difference_plot_label(absolute)
ch = self._ordinal_plot('difference', difference_df, groupby=None, level_name='', remaining_groups=remaining_groups, absolute=absolute, title=title, y_axis_label=y_axis_label)
ch.callout.line(0)
ch = ch
chart_grid = ChartGrid()
chart_grid.charts.append(ch)
else:
pass
return chart_grid
|
confidence
|
positive
|
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0]
with torch.no_grad():
result = runner.model(return_loss=False, rescale=True, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
dist.barrier()
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
<DeepExtract>
raise NotImplementedError
</DeepExtract>
else:
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
|
def after_train_epoch(self, runner):
if not self.every_n_epochs(runner, self.interval):
return
runner.model.eval()
results = [None for _ in range(len(self.dataset))]
if runner.rank == 0:
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(runner.rank, len(self.dataset), runner.world_size):
data = self.dataset[idx]
data_gpu = scatter(collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0]
with torch.no_grad():
result = runner.model(return_loss=False, rescale=True, **data_gpu)
results[idx] = result
batch_size = runner.world_size
if runner.rank == 0:
for _ in range(batch_size):
prog_bar.update()
if runner.rank == 0:
print('\n')
dist.barrier()
for i in range(1, runner.world_size):
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i))
tmp_results = mmcv.load(tmp_file)
for idx in range(i, len(results), runner.world_size):
results[idx] = tmp_results[idx]
os.remove(tmp_file)
raise NotImplementedError
else:
tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank))
mmcv.dump(results, tmp_file)
dist.barrier()
dist.barrier()
|
BalancedGroupSoftmax
|
positive
|
def make_rooted_graph(graph):
"""
Create a new root node if it is not already present.
"""
<DeepExtract>
if 'head_node' in graph.graph:
head_node = graph.graph['head_node']
else:
assert sum(not list((graph.predecessors(n) for n in graph.nodes()))) == 1
for n in graph.nodes():
if not list(graph.predecessors(n)):
head_node = n
raise AttributeError('Failed to find head node for graph {0}'.format(graph.graph))
</DeepExtract>
root_id = max(graph.nodes()) + 1
graph.graph['head_node'] = root_id
graph.add_node(root_id, label='root', type='root')
graph.add_edge(root_id, head_node)
return graph
|
def make_rooted_graph(graph):
"""
Create a new root node if it is not already present.
"""
if 'head_node' in graph.graph:
head_node = graph.graph['head_node']
else:
assert sum(not list((graph.predecessors(n) for n in graph.nodes()))) == 1
for n in graph.nodes():
if not list(graph.predecessors(n)):
head_node = n
raise AttributeError('Failed to find head node for graph {0}'.format(graph.graph))
root_id = max(graph.nodes()) + 1
graph.graph['head_node'] = root_id
graph.add_node(root_id, label='root', type='root')
graph.add_edge(root_id, head_node)
return graph
|
ccg2lambda
|
positive
|
def save(self, apath, prefix, epoch, is_best=False):
<DeepExtract>
if self.n_GPUs == 1:
target = self.model
else:
target = self.model.module
</DeepExtract>
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + 'latest.pt'))
if is_best:
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + 'best.pt'))
if self.save_models:
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + '{}.pt'.format(epoch)))
|
def save(self, apath, prefix, epoch, is_best=False):
if self.n_GPUs == 1:
target = self.model
else:
target = self.model.module
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + 'latest.pt'))
if is_best:
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + 'best.pt'))
if self.save_models:
torch.save(target.state_dict(), os.path.join(apath, 'model', 'model_' + prefix + '{}.pt'.format(epoch)))
|
CVPR-2020-Semi-Low-Light
|
positive
|
def _validate_option(option, val):
if option in 'field_names':
<DeepExtract>
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._rows[0])))
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception('Field names must be unique!')
</DeepExtract>
elif option in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
<DeepExtract>
try:
assert int(val) >= 0
except AssertionError:
raise Exception('Invalid type for %s: %s!' % (option, self._unicode(val)))
</DeepExtract>
elif option in 'sortby':
<DeepExtract>
try:
assert val in self._field_names or val is None
except AssertionError:
raise Exception('Invalid field name: %s!' % val)
</DeepExtract>
elif option in 'sort_key':
<DeepExtract>
try:
assert hasattr(val, '__call__')
except AssertionError:
raise Exception('Invalid type for %s! Must be a function.' % option)
</DeepExtract>
elif option in 'hrules':
<DeepExtract>
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception('Invalid type for %s! Must be ALL, FRAME, HEADER or NONE.' % option)
</DeepExtract>
elif option in 'vrules':
<DeepExtract>
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception('Invalid type for %s! Must be ALL, FRAME, or NONE.' % option)
</DeepExtract>
elif option in 'fields':
<DeepExtract>
try:
for x in val:
self._validate_field_name(option, x)
except AssertionError:
raise Exception('fields must be a sequence of field names!')
</DeepExtract>
elif option in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
<DeepExtract>
try:
assert val in (True, False)
except AssertionError:
raise Exception('Invalid type for %s! Must be True or False.' % option)
</DeepExtract>
elif option in 'header_style':
<DeepExtract>
try:
assert val in ('cap', 'title', 'upper', 'lower', None)
except AssertionError:
raise Exception('Invalid header style, use cap, title, upper, lower or None!')
</DeepExtract>
elif option in 'int_format':
<DeepExtract>
if val == '':
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception('Invalid type for %s! Must be an integer format string.' % option)
</DeepExtract>
elif option in 'float_format':
<DeepExtract>
if val == '':
return
try:
assert type(val) in (str, unicode)
assert '.' in val
bits = val.split('.')
assert len(bits) <= 2
assert bits[0] == '' or bits[0].isdigit()
assert bits[1] == '' or bits[1].isdigit()
except AssertionError:
raise Exception('Invalid type for %s! Must be a float format string.' % option)
</DeepExtract>
elif option in ('vertical_char', 'horizontal_char', 'junction_char'):
<DeepExtract>
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception('Invalid type for %s! Must be a string of length 1.' % option)
</DeepExtract>
elif option in 'attributes':
<DeepExtract>
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception('attributes must be a dictionary of name/type pairs!')
</DeepExtract>
else:
raise Exception('Unrecognised option: %s!' % option)
|
def _validate_option(option, val):
if option in 'field_names':
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' % (len(val), len(self._rows[0])))
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception('Field names must be unique!')
elif option in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width', 'right_padding_width', 'format'):
try:
assert int(val) >= 0
except AssertionError:
raise Exception('Invalid type for %s: %s!' % (option, self._unicode(val)))
elif option in 'sortby':
try:
assert val in self._field_names or val is None
except AssertionError:
raise Exception('Invalid field name: %s!' % val)
elif option in 'sort_key':
try:
assert hasattr(val, '__call__')
except AssertionError:
raise Exception('Invalid type for %s! Must be a function.' % option)
elif option in 'hrules':
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception('Invalid type for %s! Must be ALL, FRAME, HEADER or NONE.' % option)
elif option in 'vrules':
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception('Invalid type for %s! Must be ALL, FRAME, or NONE.' % option)
elif option in 'fields':
try:
for x in val:
self._validate_field_name(option, x)
except AssertionError:
raise Exception('fields must be a sequence of field names!')
elif option in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
try:
assert val in (True, False)
except AssertionError:
raise Exception('Invalid type for %s! Must be True or False.' % option)
elif option in 'header_style':
try:
assert val in ('cap', 'title', 'upper', 'lower', None)
except AssertionError:
raise Exception('Invalid header style, use cap, title, upper, lower or None!')
elif option in 'int_format':
if val == '':
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception('Invalid type for %s! Must be an integer format string.' % option)
elif option in 'float_format':
if val == '':
return
try:
assert type(val) in (str, unicode)
assert '.' in val
bits = val.split('.')
assert len(bits) <= 2
assert bits[0] == '' or bits[0].isdigit()
assert bits[1] == '' or bits[1].isdigit()
except AssertionError:
raise Exception('Invalid type for %s! Must be a float format string.' % option)
elif option in ('vertical_char', 'horizontal_char', 'junction_char'):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception('Invalid type for %s! Must be a string of length 1.' % option)
elif option in 'attributes':
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception('attributes must be a dictionary of name/type pairs!')
else:
raise Exception('Unrecognised option: %s!' % option)
|
C--Compiler
|
positive
|
def __init__(self, plugin):
self._plugin = plugin
self._status_bar = deluge.component.get('StatusBar')
self._store = None
self._status_item = None
self._tries = 0
self._calls = []
try:
self._store = plugin.store.copy()
if __debug__:
RT.register(self._store, __name__)
log.debug('Installing widgets...')
<DeepExtract>
self._status_item = self._status_bar.add_item(image=STATUS_ICON_FILE)
self._status_item._ebox.hide_all()
</DeepExtract>
self._plugin.register_update_func(self.update_store)
except:
<DeepExtract>
labelplus.common.cancel_calls(self._calls)
self._plugin.deregister_update_func(self.update_store)
self._uninstall_status_item()
self._destroy_store()
</DeepExtract>
raise
twisted.internet.reactor.callLater(0, self._update_loop)
|
def __init__(self, plugin):
self._plugin = plugin
self._status_bar = deluge.component.get('StatusBar')
self._store = None
self._status_item = None
self._tries = 0
self._calls = []
try:
self._store = plugin.store.copy()
if __debug__:
RT.register(self._store, __name__)
log.debug('Installing widgets...')
self._status_item = self._status_bar.add_item(image=STATUS_ICON_FILE)
self._status_item._ebox.hide_all()
self._plugin.register_update_func(self.update_store)
except:
labelplus.common.cancel_calls(self._calls)
self._plugin.deregister_update_func(self.update_store)
self._uninstall_status_item()
self._destroy_store()
raise
twisted.internet.reactor.callLater(0, self._update_loop)
|
deluge-labelplus
|
positive
|
def delete(self, context, key_id, force=False):
"""Represents deleting the key.
The 'force' parameter is not used whatsoever and only kept to allow
consistency with the Barbican implementation.
"""
if not key_id:
raise exception.KeyManagerError('key identifier not provided')
<DeepExtract>
verify = self._verify_server
headers = self._build_auth_headers()
try:
resp = self._session.delete(self._get_resource_url(key_id), headers=headers, json=json, verify=verify)
except requests.exceptions.Timeout as ex:
raise exception.KeyManagerError(str(ex))
except requests.exceptions.ConnectionError as ex:
raise exception.KeyManagerError(str(ex))
except Exception as ex:
raise exception.KeyManagerError(str(ex))
if resp.status_code in _EXCEPTIONS_BY_CODE:
raise exception.KeyManagerError(resp.reason)
if resp.status_code == requests.codes['forbidden']:
raise exception.Forbidden()
resp = resp
</DeepExtract>
if resp.status_code == requests.codes['not_found']:
raise exception.ManagedObjectNotFoundError(uuid=key_id)
|
def delete(self, context, key_id, force=False):
"""Represents deleting the key.
The 'force' parameter is not used whatsoever and only kept to allow
consistency with the Barbican implementation.
"""
if not key_id:
raise exception.KeyManagerError('key identifier not provided')
verify = self._verify_server
headers = self._build_auth_headers()
try:
resp = self._session.delete(self._get_resource_url(key_id), headers=headers, json=json, verify=verify)
except requests.exceptions.Timeout as ex:
raise exception.KeyManagerError(str(ex))
except requests.exceptions.ConnectionError as ex:
raise exception.KeyManagerError(str(ex))
except Exception as ex:
raise exception.KeyManagerError(str(ex))
if resp.status_code in _EXCEPTIONS_BY_CODE:
raise exception.KeyManagerError(resp.reason)
if resp.status_code == requests.codes['forbidden']:
raise exception.Forbidden()
resp = resp
if resp.status_code == requests.codes['not_found']:
raise exception.ManagedObjectNotFoundError(uuid=key_id)
|
castellan
|
positive
|
def swapPairs(self, head):
if not head or head.next is None:
return head
lh = ListNode(0)
lh.next = head
p = lh
while p.next and p.next.next:
<DeepExtract>
p.next.next = p.next.next.next
p.next.next.next = p.next
p.next = p.next.next
</DeepExtract>
p = p.next.next
return lh.next
|
def swapPairs(self, head):
if not head or head.next is None:
return head
lh = ListNode(0)
lh.next = head
p = lh
while p.next and p.next.next:
p.next.next = p.next.next.next
p.next.next.next = p.next
p.next = p.next.next
p = p.next.next
return lh.next
|
cowry
|
positive
|
def test_tpc_commit_without_prepare(self):
<DeepExtract>
raise NotImplementedError
</DeepExtract>
try:
<DeepExtract>
id = TwoPhaseCommitTests._last_id
TwoPhaseCommitTests._last_id += 1
xid = con.xid(42, '%s%d' % (self._global_id_prefix, id), 'qualifier')
</DeepExtract>
con.tpc_begin(xid)
cursor = con.cursor()
cursor.execute('SELECT 1')
con.tpc_commit()
finally:
con.close()
|
def test_tpc_commit_without_prepare(self):
raise NotImplementedError
try:
id = TwoPhaseCommitTests._last_id
TwoPhaseCommitTests._last_id += 1
xid = con.xid(42, '%s%d' % (self._global_id_prefix, id), 'qualifier')
con.tpc_begin(xid)
cursor = con.cursor()
cursor.execute('SELECT 1')
con.tpc_commit()
finally:
con.close()
|
aws-lambda-redshift-copy
|
positive
|
def __getitem__(self, idx):
(session, filename) = self.files[idx]
if session == self.root_dir:
session = ''
<DeepExtract>
file_path = os.path.join(self.root_dir, session, filename)
rgb = load_image(file_path)
rgb = rgb
</DeepExtract>
sample = {'idx': idx, 'filename': '%s_%s' % (session, os.path.splitext(filename)[0]), 'rgb': rgb, 'intrinsics': dummy_calibration(w=rgb.size[0], h=rgb.size[1])}
if self.with_depth:
sample.update({'depth': None})
if self.has_context:
<DeepExtract>
context_paths = self._get_context_file_paths_random(filename, sorted(self.file_tree[session]))
sample['rgb_context'] = [self._read_rgb_file(session, filename) for filename in context_paths]
</DeepExtract>
if self.data_transform:
sample = self.data_transform(sample)
return sample
|
def __getitem__(self, idx):
(session, filename) = self.files[idx]
if session == self.root_dir:
session = ''
file_path = os.path.join(self.root_dir, session, filename)
rgb = load_image(file_path)
rgb = rgb
sample = {'idx': idx, 'filename': '%s_%s' % (session, os.path.splitext(filename)[0]), 'rgb': rgb, 'intrinsics': dummy_calibration(w=rgb.size[0], h=rgb.size[1])}
if self.with_depth:
sample.update({'depth': None})
if self.has_context:
context_paths = self._get_context_file_paths_random(filename, sorted(self.file_tree[session]))
sample['rgb_context'] = [self._read_rgb_file(session, filename) for filename in context_paths]
if self.data_transform:
sample = self.data_transform(sample)
return sample
|
dro-sfm
|
positive
|
def __parse_issue_submission(soup):
"""
"""
try:
submission = {}
submitted = soup.find({'label': True}, text=ISSUE_SUBMISSION_PATTERN).findNext('p')
if submitted.a:
submission['name'] = submitted.a.get('title')
submission['id'] = submitted.a.string
<DeepExtract>
dt = parse(ISSUE_SUBMISSION_DATE_PATTERN.match(submitted.contents[2]).group(1)).replace(tzinfo=None)
submission['date'] = dt
</DeepExtract>
else:
submission['name'] = 'nobody'
submission['id'] = 'nobody'
aux1 = submitted.contents[0]
aux2 = aux1[aux1.find(' - '):]
<DeepExtract>
dt = parse(ISSUE_SUBMISSION_DATE_PATTERN.match(aux2).group(1)).replace(tzinfo=None)
submission['date'] = dt
</DeepExtract>
return submission
except:
raise SourceForgeParserError('Error parsing issue submission')
|
def __parse_issue_submission(soup):
"""
"""
try:
submission = {}
submitted = soup.find({'label': True}, text=ISSUE_SUBMISSION_PATTERN).findNext('p')
if submitted.a:
submission['name'] = submitted.a.get('title')
submission['id'] = submitted.a.string
dt = parse(ISSUE_SUBMISSION_DATE_PATTERN.match(submitted.contents[2]).group(1)).replace(tzinfo=None)
submission['date'] = dt
else:
submission['name'] = 'nobody'
submission['id'] = 'nobody'
aux1 = submitted.contents[0]
aux2 = aux1[aux1.find(' - '):]
dt = parse(ISSUE_SUBMISSION_DATE_PATTERN.match(aux2).group(1)).replace(tzinfo=None)
submission['date'] = dt
return submission
except:
raise SourceForgeParserError('Error parsing issue submission')
|
Bicho
|
positive
|
def __removeItem(widgetItem):
parent = widgetItem.parent()
<DeepExtract>
variant = widgetItem.data(3, Qt.UserRole)
widgetItem = variant.toPyObject()
</DeepExtract>
<DeepExtract>
if model.TriggerMode.HOTKEY in item.modes:
self.topLevelWidget().app.hotkey_removed(item)
if isinstance(item, model.Folder):
for subFolder in item.folders:
self.__deleteHotkeys(subFolder)
for item in item.items:
if model.TriggerMode.HOTKEY in item.modes:
self.topLevelWidget().app.hotkey_removed(item)
</DeepExtract>
if parent is None:
removedIndex = self.treeWidget.indexOfTopLevelItem(widgetItem)
self.treeWidget.takeTopLevelItem(removedIndex)
self.configManager.folders.remove(item)
else:
removedIndex = parent.indexOfChild(widgetItem)
parent.removeChild(widgetItem)
if isinstance(item, model.Folder):
item.parent.remove_folder(item)
else:
item.parent.remove_item(item)
item.remove_data()
self.treeWidget.sortItems(0, Qt.AscendingOrder)
if parent is not None:
if parent.childCount() > 0:
newIndex = min([removedIndex, parent.childCount() - 1])
self.treeWidget.setCurrentItem(parent.child(newIndex))
else:
self.treeWidget.setCurrentItem(parent)
else:
newIndex = min([removedIndex, self.treeWidget.topLevelItemCount() - 1])
self.treeWidget.setCurrentItem(self.treeWidget.topLevelItem(newIndex))
|
def __removeItem(widgetItem):
parent = widgetItem.parent()
variant = widgetItem.data(3, Qt.UserRole)
widgetItem = variant.toPyObject()
if model.TriggerMode.HOTKEY in item.modes:
self.topLevelWidget().app.hotkey_removed(item)
if isinstance(item, model.Folder):
for subFolder in item.folders:
self.__deleteHotkeys(subFolder)
for item in item.items:
if model.TriggerMode.HOTKEY in item.modes:
self.topLevelWidget().app.hotkey_removed(item)
if parent is None:
removedIndex = self.treeWidget.indexOfTopLevelItem(widgetItem)
self.treeWidget.takeTopLevelItem(removedIndex)
self.configManager.folders.remove(item)
else:
removedIndex = parent.indexOfChild(widgetItem)
parent.removeChild(widgetItem)
if isinstance(item, model.Folder):
item.parent.remove_folder(item)
else:
item.parent.remove_item(item)
item.remove_data()
self.treeWidget.sortItems(0, Qt.AscendingOrder)
if parent is not None:
if parent.childCount() > 0:
newIndex = min([removedIndex, parent.childCount() - 1])
self.treeWidget.setCurrentItem(parent.child(newIndex))
else:
self.treeWidget.setCurrentItem(parent)
else:
newIndex = min([removedIndex, self.treeWidget.topLevelItemCount() - 1])
self.treeWidget.setCurrentItem(self.treeWidget.topLevelItem(newIndex))
|
autokey-python2
|
positive
|
def fetch_local_data(self, address, size, max_round_trips=None):
"""Immediately read a block of data from the remote device into the local cache.
All cached bytes will stay in the cache permanently, and writes will no longer go to hardware.
Returns the length of the block we actually read, in bytes.
"""
block = read_block(self.device, address, size, max_round_trips=max_round_trips)
<DeepExtract>
self.local_addresses.seek(address)
self.local_addresses.write(b'\xff' * (address + len(block) - 1 - address + 1))
</DeepExtract>
self.local_data.seek(address)
self.local_data.write(block)
return len(block)
|
def fetch_local_data(self, address, size, max_round_trips=None):
"""Immediately read a block of data from the remote device into the local cache.
All cached bytes will stay in the cache permanently, and writes will no longer go to hardware.
Returns the length of the block we actually read, in bytes.
"""
block = read_block(self.device, address, size, max_round_trips=max_round_trips)
self.local_addresses.seek(address)
self.local_addresses.write(b'\xff' * (address + len(block) - 1 - address + 1))
self.local_data.seek(address)
self.local_data.write(block)
return len(block)
|
coastermelt
|
positive
|
def validate_header(header: Header, parent_header: Header) -> None:
"""
Verifies a block header.
In order to consider a block's header valid, the logic for the
quantities in the header should match the logic for the block itself.
For example the header timestamp should be greater than the block's parent
timestamp because the block was created *after* the parent block.
Additionally, the block's number should be directly folowing the parent
block's number since it is the next block in the sequence.
Parameters
----------
header :
Header to check for correctness.
parent_header :
Parent Header of the header to check for correctness
"""
parent_has_ommers = parent_header.ommers_hash != EMPTY_OMMER_HASH
ensure(header.timestamp > parent_header.timestamp, InvalidBlock)
ensure(header.number == parent_header.number + 1, InvalidBlock)
ensure(check_gas_limit(header.gas_limit, parent_header.gas_limit), InvalidBlock)
ensure(len(header.extra_data) <= 32, InvalidBlock)
<DeepExtract>
offset = int(parent_header.difficulty) // 2048 * max((2 if parent_has_ommers else 1) - int(header.timestamp - parent_header.timestamp) // 9, -99)
difficulty = int(parent_header.difficulty) + offset
num_bomb_periods = (int(header.number) - BOMB_DELAY_BLOCKS) // 100000 - 2
if num_bomb_periods >= 0:
difficulty += 2 ** num_bomb_periods
block_difficulty = Uint(max(difficulty, MINIMUM_DIFFICULTY))
</DeepExtract>
ensure(header.difficulty == block_difficulty, InvalidBlock)
block_parent_hash = keccak256(rlp.encode(parent_header))
ensure(header.parent_hash == block_parent_hash, InvalidBlock)
<DeepExtract>
header_hash = generate_header_hash_for_pow(header)
cache = generate_cache(header.number)
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
ensure(mix_digest == header.mix_digest, InvalidBlock)
ensure(Uint.from_be_bytes(result) <= U256_CEIL_VALUE // header.difficulty, InvalidBlock)
</DeepExtract>
|
def validate_header(header: Header, parent_header: Header) -> None:
"""
Verifies a block header.
In order to consider a block's header valid, the logic for the
quantities in the header should match the logic for the block itself.
For example the header timestamp should be greater than the block's parent
timestamp because the block was created *after* the parent block.
Additionally, the block's number should be directly folowing the parent
block's number since it is the next block in the sequence.
Parameters
----------
header :
Header to check for correctness.
parent_header :
Parent Header of the header to check for correctness
"""
parent_has_ommers = parent_header.ommers_hash != EMPTY_OMMER_HASH
ensure(header.timestamp > parent_header.timestamp, InvalidBlock)
ensure(header.number == parent_header.number + 1, InvalidBlock)
ensure(check_gas_limit(header.gas_limit, parent_header.gas_limit), InvalidBlock)
ensure(len(header.extra_data) <= 32, InvalidBlock)
offset = int(parent_header.difficulty) // 2048 * max((2 if parent_has_ommers else 1) - int(header.timestamp - parent_header.timestamp) // 9, -99)
difficulty = int(parent_header.difficulty) + offset
num_bomb_periods = (int(header.number) - BOMB_DELAY_BLOCKS) // 100000 - 2
if num_bomb_periods >= 0:
difficulty += 2 ** num_bomb_periods
block_difficulty = Uint(max(difficulty, MINIMUM_DIFFICULTY))
ensure(header.difficulty == block_difficulty, InvalidBlock)
block_parent_hash = keccak256(rlp.encode(parent_header))
ensure(header.parent_hash == block_parent_hash, InvalidBlock)
header_hash = generate_header_hash_for_pow(header)
cache = generate_cache(header.number)
(mix_digest, result) = hashimoto_light(header_hash, header.nonce, cache, dataset_size(header.number))
ensure(mix_digest == header.mix_digest, InvalidBlock)
ensure(Uint.from_be_bytes(result) <= U256_CEIL_VALUE // header.difficulty, InvalidBlock)
</DeepExtract>
|
eth1.0-specs
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.