before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def train_mobile_net(data_dir, resume_dir=None, train_args_collection_name=None):
"""
Should see eval steering error of about 0.1135
Original Deepdrive 2.0 baseline steering error eval was ~0.2,
train steering error: ~0.08
"""
if not check_tensorflow_gpu():
raise RuntimeError('Invalid Tensorflow version detected. See above for details.')
train_args = TRAIN_ARG_COLLECTIONS.get(train_args_collection_name, {})
if not os.path.exists(c.MNET2_PRETRAINED_PATH + '.meta'):
util.download.download(c.MNET2_PRETRAINED_URL + '?cache_bust=1', c.WEIGHTS_DIR, warn_existing=False, overwrite=True)
if not glob.glob(data_dir + '/*.tfrecord'):
if glob.glob(data_dir + '/*/*.hdf5'):
raise RuntimeError('No tfrecords in %s - Run main.py --hdf5-2-tfrecord --recording-dir="%s" to convert hdf5 records' % (data_dir, data_dir))
else:
raise RuntimeError('No tfrecords found in %s - aborting' % data_dir)
if resume_dir is None:
train_dir = datetime.now().strftime(os.path.join(c.TENSORFLOW_OUT_DIR, '%Y-%m-%d__%I-%M-%S%p'))
print('train_dir is ', train_dir)
<DeepExtract>
p = Process(target=fine_tune_new_layers, args=(data_dir, train_dir, train_args.get('fine_tune_new_layers', None)))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
</DeepExtract>
<DeepExtract>
p = Process(target=eval_mobile_net, args=(data_dir,))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
</DeepExtract>
else:
train_dir = resume_dir
print('resume_dir is ', resume_dir)
<DeepExtract>
p = Process(target=fine_tune_all_layers, args=(data_dir, train_dir, train_args.get('fine_tune_all_layers', None)))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
</DeepExtract>
<DeepExtract>
p = Process(target=eval_mobile_net, args=(data_dir,))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
</DeepExtract>
log.info('Finished training')
|
def train_mobile_net(data_dir, resume_dir=None, train_args_collection_name=None):
"""
Should see eval steering error of about 0.1135
Original Deepdrive 2.0 baseline steering error eval was ~0.2,
train steering error: ~0.08
"""
if not check_tensorflow_gpu():
raise RuntimeError('Invalid Tensorflow version detected. See above for details.')
train_args = TRAIN_ARG_COLLECTIONS.get(train_args_collection_name, {})
if not os.path.exists(c.MNET2_PRETRAINED_PATH + '.meta'):
util.download.download(c.MNET2_PRETRAINED_URL + '?cache_bust=1', c.WEIGHTS_DIR, warn_existing=False, overwrite=True)
if not glob.glob(data_dir + '/*.tfrecord'):
if glob.glob(data_dir + '/*/*.hdf5'):
raise RuntimeError('No tfrecords in %s - Run main.py --hdf5-2-tfrecord --recording-dir="%s" to convert hdf5 records' % (data_dir, data_dir))
else:
raise RuntimeError('No tfrecords found in %s - aborting' % data_dir)
if resume_dir is None:
train_dir = datetime.now().strftime(os.path.join(c.TENSORFLOW_OUT_DIR, '%Y-%m-%d__%I-%M-%S%p'))
print('train_dir is ', train_dir)
p = Process(target=fine_tune_new_layers, args=(data_dir, train_dir, train_args.get('fine_tune_new_layers', None)))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
p = Process(target=eval_mobile_net, args=(data_dir,))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
else:
train_dir = resume_dir
print('resume_dir is ', resume_dir)
p = Process(target=fine_tune_all_layers, args=(data_dir, train_dir, train_args.get('fine_tune_all_layers', None)))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
p = Process(target=eval_mobile_net, args=(data_dir,))
p.start()
p.join()
if p.exitcode != 0:
raise RuntimeError("\nProcess finished with error. See above for details. HINTS: \n\n1) If you see CUDA errors like:\n\n Error polling for event status: failed to query event: CUDA_ERROR_LAUNCH_FAILED\n\n try restarting (esp. Windows), then running with \n \n --use-latest-model to resume training from the last checkpoint. \n\n2) Running training outside of IDE's like PyCharm seems to be more stable.")
log.info('Finished training')
|
deepdrive
|
positive
|
def setup(cfg, writer, logger):
"""
set optimizer and load pretrained model
"""
for net in self.nets:
<DeepExtract>
init_type = cfg['model']['init'].get('init_type', init_type)
init_gain = cfg['model']['init'].get('init_gain', init_gain)
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func)
pass
</DeepExtract>
print('Initializition completed')
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print('loading pretrained model for {}'.format(net.__class__.__name__))
net._load_pretrained_model()
'load pretrained model\n '
if cfg['training']['resume_flag']:
<DeepExtract>
if os.path.isfile(cfg['training']['resume']):
logger.info("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume']))
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
for net in self.nets:
name = net.__class__.__name__
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
self.adaptive_load_nets(net, checkpoint[name]['model_state'])
if cfg['training']['optimizer_resume']:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]['optimizer_state'])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]['scheduler_state'])
self.iter = checkpoint['iter']
self.best_iou = checkpoint['best_iou']
logger.info("Loaded checkpoint '{}' (iter {})".format(cfg['training']['resume'], checkpoint['iter']))
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
</DeepExtract>
pass
|
def setup(cfg, writer, logger):
"""
set optimizer and load pretrained model
"""
for net in self.nets:
init_type = cfg['model']['init'].get('init_type', init_type)
init_gain = cfg['model']['init'].get('init_gain', init_gain)
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func)
pass
print('Initializition completed')
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print('loading pretrained model for {}'.format(net.__class__.__name__))
net._load_pretrained_model()
'load pretrained model\n '
if cfg['training']['resume_flag']:
if os.path.isfile(cfg['training']['resume']):
logger.info("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume']))
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
for net in self.nets:
name = net.__class__.__name__
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
self.adaptive_load_nets(net, checkpoint[name]['model_state'])
if cfg['training']['optimizer_resume']:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]['optimizer_state'])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]['scheduler_state'])
self.iter = checkpoint['iter']
self.best_iou = checkpoint['best_iou']
logger.info("Loaded checkpoint '{}' (iter {})".format(cfg['training']['resume'], checkpoint['iter']))
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
pass
|
CAG_UDA
|
positive
|
def get(self, request, *args, **kwargs):
project_obj = get_object_or_404(Project, pk=self.kwargs.get('project_pk'))
<DeepExtract>
publications_do_delete = [{'title': publication.title, 'year': publication.year, 'unique_id': publication.unique_id} for publication in project_obj.publication_set.all().order_by('-year')]
publications_do_export = publications_do_delete
</DeepExtract>
context = {}
if publications_do_export:
formset = formset_factory(PublicationExportForm, max_num=len(publications_do_export))
formset = formset(initial=publications_do_export, prefix='publicationform')
context['formset'] = formset
context['project'] = project_obj
return render(request, self.template_name, context)
|
def get(self, request, *args, **kwargs):
project_obj = get_object_or_404(Project, pk=self.kwargs.get('project_pk'))
publications_do_delete = [{'title': publication.title, 'year': publication.year, 'unique_id': publication.unique_id} for publication in project_obj.publication_set.all().order_by('-year')]
publications_do_export = publications_do_delete
context = {}
if publications_do_export:
formset = formset_factory(PublicationExportForm, max_num=len(publications_do_export))
formset = formset(initial=publications_do_export, prefix='publicationform')
context['formset'] = formset
context['project'] = project_obj
return render(request, self.template_name, context)
|
coldfront
|
positive
|
@swagger_auto_schema(tags=['Cases'], manual_parameters=swagger_params.cases_list_get_params)
def get(self, request, *args, **kwargs):
<DeepExtract>
params = self.request.post_data
queryset = self.model.objects.filter(org=self.request.org).order_by('-id')
accounts = Account.objects.filter(org=self.request.org).order_by('-id')
contacts = Contact.objects.filter(org=self.request.org).order_by('-id')
profiles = Profile.objects.filter(is_active=True, org=self.request.org)
if self.request.profile.role != 'ADMIN' and (not self.request.profile.is_admin):
queryset = queryset.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
accounts = accounts.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
contacts = contacts.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
profiles = profiles.filter(role='ADMIN')
if params:
if params.get('name'):
queryset = queryset.filter(name__icontains=params.get('name'))
if params.get('status'):
queryset = queryset.filter(status=params.get('status'))
if params.get('priority'):
queryset = queryset.filter(priority=params.get('priority'))
if params.get('account'):
queryset = queryset.filter(account=params.get('account'))
context = {}
results_cases = self.paginate_queryset(queryset, self.request, view=self)
cases = CaseSerializer(results_cases, many=True).data
if results_cases:
offset = queryset.filter(id__gte=results_cases[-1].id).count()
if offset == queryset.count():
offset = None
else:
offset = 0
context.update({'cases_count': self.count, 'offset': offset})
context['cases'] = cases
context['status'] = STATUS_CHOICE
context['priority'] = PRIORITY_CHOICE
context['type_of_case'] = CASE_TYPE
context['accounts_list'] = AccountSerializer(accounts, many=True).data
context['contacts_list'] = ContactSerializer(contacts, many=True).data
context = context
</DeepExtract>
return Response(context)
|
@swagger_auto_schema(tags=['Cases'], manual_parameters=swagger_params.cases_list_get_params)
def get(self, request, *args, **kwargs):
params = self.request.post_data
queryset = self.model.objects.filter(org=self.request.org).order_by('-id')
accounts = Account.objects.filter(org=self.request.org).order_by('-id')
contacts = Contact.objects.filter(org=self.request.org).order_by('-id')
profiles = Profile.objects.filter(is_active=True, org=self.request.org)
if self.request.profile.role != 'ADMIN' and (not self.request.profile.is_admin):
queryset = queryset.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
accounts = accounts.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
contacts = contacts.filter(Q(created_by=self.request.profile) | Q(assigned_to=self.request.profile)).distinct()
profiles = profiles.filter(role='ADMIN')
if params:
if params.get('name'):
queryset = queryset.filter(name__icontains=params.get('name'))
if params.get('status'):
queryset = queryset.filter(status=params.get('status'))
if params.get('priority'):
queryset = queryset.filter(priority=params.get('priority'))
if params.get('account'):
queryset = queryset.filter(account=params.get('account'))
context = {}
results_cases = self.paginate_queryset(queryset, self.request, view=self)
cases = CaseSerializer(results_cases, many=True).data
if results_cases:
offset = queryset.filter(id__gte=results_cases[-1].id).count()
if offset == queryset.count():
offset = None
else:
offset = 0
context.update({'cases_count': self.count, 'offset': offset})
context['cases'] = cases
context['status'] = STATUS_CHOICE
context['priority'] = PRIORITY_CHOICE
context['type_of_case'] = CASE_TYPE
context['accounts_list'] = AccountSerializer(accounts, many=True).data
context['contacts_list'] = ContactSerializer(contacts, many=True).data
context = context
return Response(context)
|
Django-CRM
|
positive
|
def process(self):
"""Process an html google bookmarks export and import them into bookie
The export format is a tag as a heading, with urls that have that tag
under that heading. If a url has N tags, it will appear N times, once
under each heading.
"""
count = 0
if self.file_handle.closed:
self.file_handle = open(self.file_handle.name)
soup = BeautifulSoup(self.file_handle)
if not soup.contents[0] == 'DOCTYPE NETSCAPE-Bookmark-file-1':
raise Exception('File is not a google bookmarks file')
htmlParser = HTMLParser()
urls = dict()
for tag in soup.findAll('h3'):
links = tag.findNextSibling('dl')
if links is not None:
links = links.findAll('a')
for link in links:
url = link['href']
if url.startswith('javascript:'):
continue
tag_text = tag.text.replace(' ', '-')
if url in urls:
urls[url]['tags'].append(tag_text)
else:
tags = [tag_text] if tag_text != 'Unlabeled' else []
has_extended = link.parent.nextSibling and link.parent.nextSibling.name == 'dd'
if has_extended:
extended = link.parent.nextSibling.text
else:
extended = ''
if link.has_key('add_date'):
if int(link['add_date']) < 9999999999:
timestamp_added = int(link['add_date'])
else:
timestamp_added = float(link['add_date']) / 1000000.0
else:
link['add_date'] = time.time()
urls[url] = {'description': htmlParser.unescape(link.text), 'tags': tags, 'extended': extended, 'date_added': datetime.fromtimestamp(timestamp_added)}
ids = []
for (url, metadata) in urls.items():
try:
<DeepExtract>
if u' '.join(metadata['tags']) and 'private' in u' '.join(metadata['tags']).lower().split(' '):
bmark = None
check_hash = generate_hash(unicode(url))
if check_hash not in self.hash_list:
bmark = BmarkMgr.store(unicode(url), self.username, unicode(metadata['description']), unicode(metadata['extended']), u' '.join(metadata['tags']), dt=metadata['date_added'], inserted_by=IMPORTED, is_private=is_private)
self.hash_list.add(check_hash)
bmark = bmark
bmark = None
</DeepExtract>
DBSession.flush()
except InvalidBookmark:
bmark = None
if bmark:
ids.append(bmark.bid)
if count % COMMIT_SIZE == 0:
transaction.commit()
transaction.begin()
transaction.commit()
from bookie.bcelery import tasks
for bid in ids:
tasks.fetch_bmark_content.delay(bid)
|
def process(self):
"""Process an html google bookmarks export and import them into bookie
The export format is a tag as a heading, with urls that have that tag
under that heading. If a url has N tags, it will appear N times, once
under each heading.
"""
count = 0
if self.file_handle.closed:
self.file_handle = open(self.file_handle.name)
soup = BeautifulSoup(self.file_handle)
if not soup.contents[0] == 'DOCTYPE NETSCAPE-Bookmark-file-1':
raise Exception('File is not a google bookmarks file')
htmlParser = HTMLParser()
urls = dict()
for tag in soup.findAll('h3'):
links = tag.findNextSibling('dl')
if links is not None:
links = links.findAll('a')
for link in links:
url = link['href']
if url.startswith('javascript:'):
continue
tag_text = tag.text.replace(' ', '-')
if url in urls:
urls[url]['tags'].append(tag_text)
else:
tags = [tag_text] if tag_text != 'Unlabeled' else []
has_extended = link.parent.nextSibling and link.parent.nextSibling.name == 'dd'
if has_extended:
extended = link.parent.nextSibling.text
else:
extended = ''
if link.has_key('add_date'):
if int(link['add_date']) < 9999999999:
timestamp_added = int(link['add_date'])
else:
timestamp_added = float(link['add_date']) / 1000000.0
else:
link['add_date'] = time.time()
urls[url] = {'description': htmlParser.unescape(link.text), 'tags': tags, 'extended': extended, 'date_added': datetime.fromtimestamp(timestamp_added)}
ids = []
for (url, metadata) in urls.items():
try:
if u' '.join(metadata['tags']) and 'private' in u' '.join(metadata['tags']).lower().split(' '):
bmark = None
check_hash = generate_hash(unicode(url))
if check_hash not in self.hash_list:
bmark = BmarkMgr.store(unicode(url), self.username, unicode(metadata['description']), unicode(metadata['extended']), u' '.join(metadata['tags']), dt=metadata['date_added'], inserted_by=IMPORTED, is_private=is_private)
self.hash_list.add(check_hash)
bmark = bmark
bmark = None
DBSession.flush()
except InvalidBookmark:
bmark = None
if bmark:
ids.append(bmark.bid)
if count % COMMIT_SIZE == 0:
transaction.commit()
transaction.begin()
transaction.commit()
from bookie.bcelery import tasks
for bid in ids:
tasks.fetch_bmark_content.delay(bid)
|
Bookie
|
positive
|
def run_tests(tests, lit_config, opts, discovered_tests):
workers = min(len(tests), opts.workers)
display = lit.display.create_display(opts, len(tests), discovered_tests, workers)
def progress_callback(test):
display.update(test)
if opts.order == 'failing-first':
<DeepExtract>
if test.isFailure():
os.utime(test.getFilePath(), None)
</DeepExtract>
run = lit.run.Run(tests, lit_config, workers, progress_callback, opts.max_failures, opts.timeout)
display.print_header()
interrupted = False
error = None
try:
<DeepExtract>
tmp_dir = None
if 'LIT_PRESERVES_TMP' not in os.environ:
import tempfile
tmp_dir = tempfile.mkdtemp(prefix='lit_tmp_')
os.environ.update({'TMPDIR': tmp_dir, 'TMP': tmp_dir, 'TEMP': tmp_dir, 'TEMPDIR': tmp_dir})
try:
run.execute()
finally:
if tmp_dir:
try:
import shutil
shutil.rmtree(tmp_dir)
except Exception as e:
lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
</DeepExtract>
except KeyboardInterrupt:
interrupted = True
error = ' interrupted by user'
except lit.run.MaxFailuresError:
error = 'warning: reached maximum number of test failures'
except lit.run.TimeoutError:
error = 'warning: reached timeout'
display.clear(interrupted)
if error:
sys.stderr.write('%s, skipping remaining tests\n' % error)
|
def run_tests(tests, lit_config, opts, discovered_tests):
workers = min(len(tests), opts.workers)
display = lit.display.create_display(opts, len(tests), discovered_tests, workers)
def progress_callback(test):
display.update(test)
if opts.order == 'failing-first':
if test.isFailure():
os.utime(test.getFilePath(), None)
run = lit.run.Run(tests, lit_config, workers, progress_callback, opts.max_failures, opts.timeout)
display.print_header()
interrupted = False
error = None
try:
tmp_dir = None
if 'LIT_PRESERVES_TMP' not in os.environ:
import tempfile
tmp_dir = tempfile.mkdtemp(prefix='lit_tmp_')
os.environ.update({'TMPDIR': tmp_dir, 'TMP': tmp_dir, 'TEMP': tmp_dir, 'TEMPDIR': tmp_dir})
try:
run.execute()
finally:
if tmp_dir:
try:
import shutil
shutil.rmtree(tmp_dir)
except Exception as e:
lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
except KeyboardInterrupt:
interrupted = True
error = ' interrupted by user'
except lit.run.MaxFailuresError:
error = 'warning: reached maximum number of test failures'
except lit.run.TimeoutError:
error = 'warning: reached timeout'
display.clear(interrupted)
if error:
sys.stderr.write('%s, skipping remaining tests\n' % error)
|
alive
|
positive
|
def attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=None, scope=None, initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, shapes
of attention_states are not set, or input size cannot be inferred
from the input.
"""
if not decoder_inputs:
raise ValueError('Must provide at least 1 input to attention decoder.')
if num_heads < 1:
raise ValueError('With less than 1 heads, use a non-attention decoder.')
if attention_states.get_shape()[2].value is None:
raise ValueError('Shape[2] of attention_states must be known: %s' % attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(scope or 'attention_decoder', dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0]
attn_length = attention_states.get_shape()[1].value
if attn_length is None:
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size
for a in xrange(num_heads):
k = variable_scope.get_variable('AttnW_%d' % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], 'SAME'))
v.append(variable_scope.get_variable('AttnV_%d' % a, [attention_vec_size]))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = []
if nest.is_sequence(query):
query_list = nest.flatten(query)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)]
for a in attns:
a.set_shape([None, attn_size])
if initial_state_attention:
<DeepExtract>
ds = []
if nest.is_sequence(initial_state):
query_list = nest.flatten(initial_state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
initial_state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(initial_state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
</DeepExtract>
for (i, inp) in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
if loop_function is not None and prev is not None:
with variable_scope.variable_scope('loop_function', reuse=True):
<DeepExtract>
if output_projection is not None:
prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
inp = emb_prev
</DeepExtract>
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError('Could not infer input size from input: %s' % inp.name)
x = linear([inp] + attns, input_size, True)
(cell_output, state) = cell(x, state)
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True):
<DeepExtract>
ds = []
if nest.is_sequence(state):
query_list = nest.flatten(state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
</DeepExtract>
else:
<DeepExtract>
ds = []
if nest.is_sequence(state):
query_list = nest.flatten(state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
</DeepExtract>
with variable_scope.variable_scope('AttnOutputProjection'):
output = linear([cell_output] + attns, output_size, True)
if loop_function is not None:
prev = output
outputs.append(output)
return (outputs, state)
|
def attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=None, scope=None, initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: tf.nn.rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, shapes
of attention_states are not set, or input size cannot be inferred
from the input.
"""
if not decoder_inputs:
raise ValueError('Must provide at least 1 input to attention decoder.')
if num_heads < 1:
raise ValueError('With less than 1 heads, use a non-attention decoder.')
if attention_states.get_shape()[2].value is None:
raise ValueError('Shape[2] of attention_states must be known: %s' % attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(scope or 'attention_decoder', dtype=dtype) as scope:
dtype = scope.dtype
batch_size = array_ops.shape(decoder_inputs[0])[0]
attn_length = attention_states.get_shape()[1].value
if attn_length is None:
attn_length = array_ops.shape(attention_states)[1]
attn_size = attention_states.get_shape()[2].value
hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size
for a in xrange(num_heads):
k = variable_scope.get_variable('AttnW_%d' % a, [1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], 'SAME'))
v.append(variable_scope.get_variable('AttnV_%d' % a, [attention_vec_size]))
state = initial_state
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
ds = []
if nest.is_sequence(query):
query_list = nest.flatten(query)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
query = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
return ds
outputs = []
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [array_ops.zeros(batch_attn_size, dtype=dtype) for _ in xrange(num_heads)]
for a in attns:
a.set_shape([None, attn_size])
if initial_state_attention:
ds = []
if nest.is_sequence(initial_state):
query_list = nest.flatten(initial_state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
initial_state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(initial_state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
for (i, inp) in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
if loop_function is not None and prev is not None:
with variable_scope.variable_scope('loop_function', reuse=True):
if output_projection is not None:
prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
inp = emb_prev
input_size = inp.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError('Could not infer input size from input: %s' % inp.name)
x = linear([inp] + attns, input_size, True)
(cell_output, state) = cell(x, state)
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=True):
ds = []
if nest.is_sequence(state):
query_list = nest.flatten(state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
else:
ds = []
if nest.is_sequence(state):
query_list = nest.flatten(state)
for q in query_list:
ndims = q.get_shape().ndims
if ndims:
assert ndims == 2
state = array_ops.concat(query_list, 1)
for a in xrange(num_heads):
with variable_scope.variable_scope('Attention_%d' % a):
y = linear(state, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
d = math_ops.reduce_sum(array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
attns = ds
with variable_scope.variable_scope('AttnOutputProjection'):
output = linear([cell_output] + attns, output_size, True)
if loop_function is not None:
prev = output
outputs.append(output)
return (outputs, state)
|
DeepAffinity
|
positive
|
def conv(self, num_out_channels, k_height, k_width, d_height=1, d_width=1, mode='SAME', input_layer=None, num_channels_in=None, use_batch_norm=None, stddev=None, activation='relu', bias=0.0, kernel_initializer=None, for_shadow=False, specify_padding=None):
"""Construct a conv2d layer on top of cnn."""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if not for_shadow:
name = 'conv' + str(self.counts['conv'])
self.counts['conv'] += 1
else:
name = 'conv' + str(self.counts['conv'] - 1)
with tf.variable_scope(name, reuse=for_shadow):
strides = [1, d_height, d_width, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != 'SAME_RESNET':
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding=mode, kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif d_height == 1 and d_width == 1:
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = k_height + (k_height - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='VALID', kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable('biases', [num_out_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(bias))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
<DeepExtract>
assert conv is not None
if for_shadow:
name = 'batchnorm' + str(self.counts['batchnorm'] - 1)
else:
name = 'batchnorm' + str(self.counts['batchnorm'])
self.counts['batchnorm'] += 1
with tf.variable_scope(name, reuse=for_shadow) as scope:
if self.use_tf_layers:
bn = tf.contrib.layers.batch_norm(conv, decay=decay, scale=scale, epsilon=epsilon, is_training=self.phase_train, fused=True, data_format=self.data_format, scope=scope)
else:
bn = self._batch_norm_without_layers(conv, decay, scale, epsilon)
biased = bn
</DeepExtract>
if activation == 'relu':
conv1 = tf.nn.relu(biased)
elif activation == 'linear' or activation is None:
conv1 = biased
elif activation == 'tanh':
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type '%s'" % activation)
if for_shadow:
return conv1
if self.cur_layer_idx in self.bds_params.target_layers:
filter_pos = 3 if self.data_format == 'NHWC' else 1
base_ph = tf.placeholder(dtype=tf.float32, shape=conv1.get_shape()[filter_pos])
self.base_masks[self.cur_layer_idx] = base_ph
ds_ph = tf.placeholder(dtype=tf.float32, shape=(conv1.get_shape()[filter_pos],))
self.ds_masks[self.cur_layer_idx] = ds_ph
print('the base mask named {} shape {}'.format(base_ph.name, base_ph.get_shape()))
conv1_shape = [v.value for v in conv1.get_shape()]
if self.data_format == 'NHWC':
print('tile shape:', [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1])
expanded_base = tf.reshape(base_ph, (1, 1, 1, conv1_shape[3]))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, 1, 1, conv1_shape[3]))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='ds_mask_layer{}'.format(self.cur_layer_idx))
else:
assert self.data_format == 'NCHW'
print('tile shape:', [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]])
expanded_base = tf.reshape(base_ph, (1, conv1_shape[1], 1, 1))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, conv1_shape[1], 1, 1))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='ds_mask_layer{}'.format(self.cur_layer_idx))
self.dsed[self.cur_layer_idx] = conv1 * tiled_ds_ph
self.based[self.cur_layer_idx] = conv1 * tiled_base_ph
self.top_layer = self.based[self.cur_layer_idx]
else:
self.top_layer = conv1
self.normal_output[self.cur_layer_idx] = conv1
self.top_size = num_out_channels
if self.cur_layer_idx > 0 and self.cur_layer_idx - 1 in self.bds_params.target_layers:
<DeepExtract>
if self.dsed[self.cur_layer_idx - 1] is None:
self.dsed[self.cur_layer_idx - 1] = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if not True:
name = 'conv' + str(self.counts['conv'])
self.counts['conv'] += 1
else:
name = 'conv' + str(self.counts['conv'] - 1)
with tf.variable_scope(name, reuse=True):
strides = [1, d_height, d_width, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != 'SAME_RESNET':
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding=mode, kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif d_height == 1 and d_width == 1:
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = k_height + (k_height - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
self.dsed[self.cur_layer_idx - 1] = tf.pad(self.dsed[self.cur_layer_idx - 1], padding)
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='VALID', kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable('biases', [num_out_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(bias))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
biased = self.batch_norm(**self.batch_norm_config, input_layer=conv, for_shadow=True)
if activation == 'relu':
conv1 = tf.nn.relu(biased)
elif activation == 'linear' or activation is None:
conv1 = biased
elif activation == 'tanh':
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type '%s'" % activation)
if True:
self.shadow_output[self.cur_layer_idx] = conv1
if self.cur_layer_idx in self.bds_params.target_layers:
filter_pos = 3 if self.data_format == 'NHWC' else 1
base_ph = tf.placeholder(dtype=tf.float32, shape=conv1.get_shape()[filter_pos])
self.base_masks[self.cur_layer_idx] = base_ph
ds_ph = tf.placeholder(dtype=tf.float32, shape=(conv1.get_shape()[filter_pos],))
self.ds_masks[self.cur_layer_idx] = ds_ph
print('the base mask named {} shape {}'.format(base_ph.name, base_ph.get_shape()))
conv1_shape = [v.value for v in conv1.get_shape()]
if self.data_format == 'NHWC':
print('tile shape:', [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1])
expanded_base = tf.reshape(base_ph, (1, 1, 1, conv1_shape[3]))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, 1, 1, conv1_shape[3]))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='ds_mask_layer{}'.format(self.cur_layer_idx))
else:
assert self.data_format == 'NCHW'
print('tile shape:', [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]])
expanded_base = tf.reshape(base_ph, (1, conv1_shape[1], 1, 1))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, conv1_shape[1], 1, 1))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='ds_mask_layer{}'.format(self.cur_layer_idx))
self.dsed[self.cur_layer_idx] = conv1 * tiled_ds_ph
self.based[self.cur_layer_idx] = conv1 * tiled_base_ph
self.top_layer = self.based[self.cur_layer_idx]
else:
self.top_layer = conv1
self.normal_output[self.cur_layer_idx] = conv1
self.top_size = num_out_channels
if self.cur_layer_idx > 0 and self.cur_layer_idx - 1 in self.bds_params.target_layers:
self.shadow_output[self.cur_layer_idx] = self.conv(num_out_channels=num_out_channels, k_height=k_height, k_width=k_width, d_height=d_height, d_width=d_width, mode=mode, input_layer=self.dsed[self.cur_layer_idx - 1], num_channels_in=num_channels_in, use_batch_norm=use_batch_norm, stddev=stddev, activation=activation, bias=bias, kernel_initializer=kernel_initializer, for_shadow=True, specify_padding=specify_padding)
self.cur_layer_idx += 1
self.shadow_output[self.cur_layer_idx] = conv1
</DeepExtract>
self.cur_layer_idx += 1
return conv1
|
def conv(self, num_out_channels, k_height, k_width, d_height=1, d_width=1, mode='SAME', input_layer=None, num_channels_in=None, use_batch_norm=None, stddev=None, activation='relu', bias=0.0, kernel_initializer=None, for_shadow=False, specify_padding=None):
"""Construct a conv2d layer on top of cnn."""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if not for_shadow:
name = 'conv' + str(self.counts['conv'])
self.counts['conv'] += 1
else:
name = 'conv' + str(self.counts['conv'] - 1)
with tf.variable_scope(name, reuse=for_shadow):
strides = [1, d_height, d_width, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != 'SAME_RESNET':
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding=mode, kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif d_height == 1 and d_width == 1:
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = k_height + (k_height - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='VALID', kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable('biases', [num_out_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(bias))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
assert conv is not None
if for_shadow:
name = 'batchnorm' + str(self.counts['batchnorm'] - 1)
else:
name = 'batchnorm' + str(self.counts['batchnorm'])
self.counts['batchnorm'] += 1
with tf.variable_scope(name, reuse=for_shadow) as scope:
if self.use_tf_layers:
bn = tf.contrib.layers.batch_norm(conv, decay=decay, scale=scale, epsilon=epsilon, is_training=self.phase_train, fused=True, data_format=self.data_format, scope=scope)
else:
bn = self._batch_norm_without_layers(conv, decay, scale, epsilon)
biased = bn
if activation == 'relu':
conv1 = tf.nn.relu(biased)
elif activation == 'linear' or activation is None:
conv1 = biased
elif activation == 'tanh':
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type '%s'" % activation)
if for_shadow:
return conv1
if self.cur_layer_idx in self.bds_params.target_layers:
filter_pos = 3 if self.data_format == 'NHWC' else 1
base_ph = tf.placeholder(dtype=tf.float32, shape=conv1.get_shape()[filter_pos])
self.base_masks[self.cur_layer_idx] = base_ph
ds_ph = tf.placeholder(dtype=tf.float32, shape=(conv1.get_shape()[filter_pos],))
self.ds_masks[self.cur_layer_idx] = ds_ph
print('the base mask named {} shape {}'.format(base_ph.name, base_ph.get_shape()))
conv1_shape = [v.value for v in conv1.get_shape()]
if self.data_format == 'NHWC':
print('tile shape:', [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1])
expanded_base = tf.reshape(base_ph, (1, 1, 1, conv1_shape[3]))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, 1, 1, conv1_shape[3]))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='ds_mask_layer{}'.format(self.cur_layer_idx))
else:
assert self.data_format == 'NCHW'
print('tile shape:', [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]])
expanded_base = tf.reshape(base_ph, (1, conv1_shape[1], 1, 1))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, conv1_shape[1], 1, 1))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='ds_mask_layer{}'.format(self.cur_layer_idx))
self.dsed[self.cur_layer_idx] = conv1 * tiled_ds_ph
self.based[self.cur_layer_idx] = conv1 * tiled_base_ph
self.top_layer = self.based[self.cur_layer_idx]
else:
self.top_layer = conv1
self.normal_output[self.cur_layer_idx] = conv1
self.top_size = num_out_channels
if self.cur_layer_idx > 0 and self.cur_layer_idx - 1 in self.bds_params.target_layers:
if self.dsed[self.cur_layer_idx - 1] is None:
self.dsed[self.cur_layer_idx - 1] = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
if not True:
name = 'conv' + str(self.counts['conv'])
self.counts['conv'] += 1
else:
name = 'conv' + str(self.counts['conv'] - 1)
with tf.variable_scope(name, reuse=True):
strides = [1, d_height, d_width, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != 'SAME_RESNET':
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding=mode, kernel_initializer=kernel_initializer, specify_padding=specify_padding)
elif d_height == 1 and d_width == 1:
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='SAME', kernel_initializer=kernel_initializer)
else:
rate = 1
kernel_height_effective = k_height + (k_height - 1) * (rate - 1)
pad_h_beg = (kernel_height_effective - 1) // 2
pad_h_end = kernel_height_effective - 1 - pad_h_beg
kernel_width_effective = k_width + (k_width - 1) * (rate - 1)
pad_w_beg = (kernel_width_effective - 1) // 2
pad_w_end = kernel_width_effective - 1 - pad_w_beg
padding = [[0, 0], [pad_h_beg, pad_h_end], [pad_w_beg, pad_w_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
self.dsed[self.cur_layer_idx - 1] = tf.pad(self.dsed[self.cur_layer_idx - 1], padding)
conv = self._conv2d_impl(self.dsed[self.cur_layer_idx - 1], num_channels_in, num_out_channels, kernel_size=[k_height, k_width], strides=[d_height, d_width], padding='VALID', kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable('biases', [num_out_channels], self.variable_dtype, self.dtype, initializer=tf.constant_initializer(bias))
biased = tf.reshape(tf.nn.bias_add(conv, biases, data_format=self.data_format), conv.get_shape())
else:
biased = conv
else:
biased = self.batch_norm(**self.batch_norm_config, input_layer=conv, for_shadow=True)
if activation == 'relu':
conv1 = tf.nn.relu(biased)
elif activation == 'linear' or activation is None:
conv1 = biased
elif activation == 'tanh':
conv1 = tf.nn.tanh(biased)
else:
raise KeyError("Invalid activation type '%s'" % activation)
if True:
self.shadow_output[self.cur_layer_idx] = conv1
if self.cur_layer_idx in self.bds_params.target_layers:
filter_pos = 3 if self.data_format == 'NHWC' else 1
base_ph = tf.placeholder(dtype=tf.float32, shape=conv1.get_shape()[filter_pos])
self.base_masks[self.cur_layer_idx] = base_ph
ds_ph = tf.placeholder(dtype=tf.float32, shape=(conv1.get_shape()[filter_pos],))
self.ds_masks[self.cur_layer_idx] = ds_ph
print('the base mask named {} shape {}'.format(base_ph.name, base_ph.get_shape()))
conv1_shape = [v.value for v in conv1.get_shape()]
if self.data_format == 'NHWC':
print('tile shape:', [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1])
expanded_base = tf.reshape(base_ph, (1, 1, 1, conv1_shape[3]))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, 1, 1, conv1_shape[3]))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], conv1_shape[1], conv1_shape[2], 1], name='ds_mask_layer{}'.format(self.cur_layer_idx))
else:
assert self.data_format == 'NCHW'
print('tile shape:', [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]])
expanded_base = tf.reshape(base_ph, (1, conv1_shape[1], 1, 1))
tiled_base_ph = tf.manip.tile(expanded_base, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='base_mask_layer{}'.format(self.cur_layer_idx))
expanded_ds = tf.reshape(ds_ph, (1, conv1_shape[1], 1, 1))
tiled_ds_ph = tf.manip.tile(expanded_ds, [conv1_shape[0], 1, conv1_shape[2], conv1_shape[3]], name='ds_mask_layer{}'.format(self.cur_layer_idx))
self.dsed[self.cur_layer_idx] = conv1 * tiled_ds_ph
self.based[self.cur_layer_idx] = conv1 * tiled_base_ph
self.top_layer = self.based[self.cur_layer_idx]
else:
self.top_layer = conv1
self.normal_output[self.cur_layer_idx] = conv1
self.top_size = num_out_channels
if self.cur_layer_idx > 0 and self.cur_layer_idx - 1 in self.bds_params.target_layers:
self.shadow_output[self.cur_layer_idx] = self.conv(num_out_channels=num_out_channels, k_height=k_height, k_width=k_width, d_height=d_height, d_width=d_width, mode=mode, input_layer=self.dsed[self.cur_layer_idx - 1], num_channels_in=num_channels_in, use_batch_norm=use_batch_norm, stddev=stddev, activation=activation, bias=bias, kernel_initializer=kernel_initializer, for_shadow=True, specify_padding=specify_padding)
self.cur_layer_idx += 1
self.shadow_output[self.cur_layer_idx] = conv1
self.cur_layer_idx += 1
return conv1
|
AOFP
|
positive
|
def _remap_feed(self, feed, feed_val=None):
"""
Remap the feeds to the right element in the transformed graph.
For example, there are N copies of a placeholder for N replicas
and we have to feed all of them with tensors.
Args:
feed: feed graph element or name
feed_val: feed value
Returns:
List of (new_feed, new_feed_value) pairs
"""
feed_name = feed if isinstance(feed, str) else feed.name
try:
transformed_feeds = [self._graph_item.graph.as_graph_element(feed_name)]
except KeyError:
transformed_feeds = [self._graph_item.graph.as_graph_element(ops.prepend_name_scope(feed_name, replica_prefix(i))) for i in range(self._graph_transformer.num_local_replicas)]
num_replicated_feeds = self._graph_transformer.num_local_replicas
feed = feed if not isinstance(feed, str) else transformed_feeds[0]
def expand_feed_val(feed_val, feed=feed):
"""Given a original feed or replicated feed, expand the feed value."""
<DeepExtract>
if isinstance(feed, ops.Tensor) and self._graph_transformer.num_local_replicas > 1 and bool(feed.shape) and (not feed.shape.is_fully_defined()):
polymorphic_dim = feed.shape.as_list().index(None)
polymorphic_dim = None
</DeepExtract>
if polymorphic_dim:
feed_vals = np.array_split(np.asarray(feed_val), num_replicated_feeds, axis=polymorphic_dim)
else:
feed_vals = [feed_val for _ in range(num_replicated_feeds)]
return feed_vals
if feed_val is not None:
<DeepExtract>
polymorphic_dim = self._polymorphic_dim(feed)
if polymorphic_dim:
feed_vals = np.array_split(np.asarray(feed_val), num_replicated_feeds, axis=polymorphic_dim)
else:
feed_vals = [feed_val for _ in range(num_replicated_feeds)]
feed_vals = feed_vals
</DeepExtract>
transformed_feeds = list(zip(transformed_feeds, feed_vals))
return (transformed_feeds, expand_feed_val)
|
def _remap_feed(self, feed, feed_val=None):
"""
Remap the feeds to the right element in the transformed graph.
For example, there are N copies of a placeholder for N replicas
and we have to feed all of them with tensors.
Args:
feed: feed graph element or name
feed_val: feed value
Returns:
List of (new_feed, new_feed_value) pairs
"""
feed_name = feed if isinstance(feed, str) else feed.name
try:
transformed_feeds = [self._graph_item.graph.as_graph_element(feed_name)]
except KeyError:
transformed_feeds = [self._graph_item.graph.as_graph_element(ops.prepend_name_scope(feed_name, replica_prefix(i))) for i in range(self._graph_transformer.num_local_replicas)]
num_replicated_feeds = self._graph_transformer.num_local_replicas
feed = feed if not isinstance(feed, str) else transformed_feeds[0]
def expand_feed_val(feed_val, feed=feed):
"""Given a original feed or replicated feed, expand the feed value."""
if isinstance(feed, ops.Tensor) and self._graph_transformer.num_local_replicas > 1 and bool(feed.shape) and (not feed.shape.is_fully_defined()):
polymorphic_dim = feed.shape.as_list().index(None)
polymorphic_dim = None
if polymorphic_dim:
feed_vals = np.array_split(np.asarray(feed_val), num_replicated_feeds, axis=polymorphic_dim)
else:
feed_vals = [feed_val for _ in range(num_replicated_feeds)]
return feed_vals
if feed_val is not None:
polymorphic_dim = self._polymorphic_dim(feed)
if polymorphic_dim:
feed_vals = np.array_split(np.asarray(feed_val), num_replicated_feeds, axis=polymorphic_dim)
else:
feed_vals = [feed_val for _ in range(num_replicated_feeds)]
feed_vals = feed_vals
transformed_feeds = list(zip(transformed_feeds, feed_vals))
return (transformed_feeds, expand_feed_val)
|
autodist
|
positive
|
def cmd_prev_marked(self):
start = self.callbacks['get_var']('selected')
if start:
cur = start.prev_story
else:
start = self.last_story
cur = start
if not cur:
return
curpos = cur.curpos
while not cur or not cur.marked:
if cur == None:
cur = self.last_story
curpos = self.last_story.curpos
else:
<DeepExtract>
ps = cur.prev_sel
o = cur
lines = 0
if not ps:
(cur, lines) = (None, lines)
while o and o != ps:
o = o.prev_obj
lines += o.lines(self.width)
(cur, lines) = (ps, lines)
</DeepExtract>
curpos -= lines
if cur == start:
self.callbacks['set_var']('info_msg', 'No marked items.')
break
<DeepExtract>
sel = self.callbacks['get_var']('selected')
if sel:
sel.unselect()
self.callbacks['set_var']('selected', cur)
if cur:
conf = self.callbacks['get_conf']()
curstyle = conf['taglist']['cursor']
if curstyle['type'] == 'bottom':
curpos = 0
elif curstyle['type'] == 'middle':
curpos = int((self.height - 1) / 2)
elif curstyle['type'] == 'top':
curpos = self.height - 1
tag = self.tag_by_obj(cur)
wl_top = max(curstyle['edge'], tag.lines(self.width))
wl_bottom = self.height - 1 - max(curstyle['edge'], cur.lines(self.width))
if curpos > wl_bottom:
if curstyle['scroll'] == 'scroll':
curpos = wl_bottom
elif curstyle['scroll'] == 'page':
curpos = wl_top
elif curpos < wl_top:
if curstyle['scroll'] == 'scroll':
curpos = wl_top
elif curstyle['scroll'] == 'page':
curpos = wl_bottom
self.callbacks['set_var']('target_obj', cur)
self.callbacks['set_var']('target_offset', curpos)
cur.select()
else:
self.callbacks['set_var']('target_obj', self.first_sel)
if self.first_sel:
self.callbacks['set_var']('target_offset', self.first_sel.curpos)
</DeepExtract>
|
def cmd_prev_marked(self):
start = self.callbacks['get_var']('selected')
if start:
cur = start.prev_story
else:
start = self.last_story
cur = start
if not cur:
return
curpos = cur.curpos
while not cur or not cur.marked:
if cur == None:
cur = self.last_story
curpos = self.last_story.curpos
else:
ps = cur.prev_sel
o = cur
lines = 0
if not ps:
(cur, lines) = (None, lines)
while o and o != ps:
o = o.prev_obj
lines += o.lines(self.width)
(cur, lines) = (ps, lines)
curpos -= lines
if cur == start:
self.callbacks['set_var']('info_msg', 'No marked items.')
break
sel = self.callbacks['get_var']('selected')
if sel:
sel.unselect()
self.callbacks['set_var']('selected', cur)
if cur:
conf = self.callbacks['get_conf']()
curstyle = conf['taglist']['cursor']
if curstyle['type'] == 'bottom':
curpos = 0
elif curstyle['type'] == 'middle':
curpos = int((self.height - 1) / 2)
elif curstyle['type'] == 'top':
curpos = self.height - 1
tag = self.tag_by_obj(cur)
wl_top = max(curstyle['edge'], tag.lines(self.width))
wl_bottom = self.height - 1 - max(curstyle['edge'], cur.lines(self.width))
if curpos > wl_bottom:
if curstyle['scroll'] == 'scroll':
curpos = wl_bottom
elif curstyle['scroll'] == 'page':
curpos = wl_top
elif curpos < wl_top:
if curstyle['scroll'] == 'scroll':
curpos = wl_top
elif curstyle['scroll'] == 'page':
curpos = wl_bottom
self.callbacks['set_var']('target_obj', cur)
self.callbacks['set_var']('target_offset', curpos)
cur.select()
else:
self.callbacks['set_var']('target_obj', self.first_sel)
if self.first_sel:
self.callbacks['set_var']('target_offset', self.first_sel.curpos)
</DeepExtract>
|
canto-curses
|
positive
|
def try_deploy(self, deploy_path):
LOG.info('Configuring settings ...')
self.kill_server()
self.clear_database()
<DeepExtract>
if HTTP_PROXY != '':
with open(os.path.join(self.setting_path, 'wrapper', 'grails-wrapper.properties'), 'a') as my_file:
my_file.write('\n')
(proxy_host, proxy_port) = HTTP_PROXY.split(':')
my_file.write('systemProp.http.proxyHost={}\n'.format(proxy_host))
my_file.write('systemProp.http.proxyPort={}\n'.format(proxy_port))
my_file.write('systemProp.http.nonProxyHosts=localhost,127.0.0.1')
adapter = {'MySQL': 'mysql'}[self.database.name]
jdbc_class = {'MySQL': 'com.mysql.jdbc.Driver'}[self.database.name]
dependency = {'MySQL': "runtime 'mysql:mysql-connector-java:5.1.16'"}[self.database.name]
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'DataSource.groovy'), 'w') as my_file:
my_file.write(DATABASE_SETTINGS.format(name=self.database_config['name'], username=self.database_config['username'], password=self.database_config['password'], host=self.database_config['host'], adapter=adapter, jdbc_class=jdbc_class))
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'BuildConfig.groovy'), 'r') as my_file:
build_config = my_file.read()
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'BuildConfig.groovy'), 'w') as my_file:
def add_repositories(matched):
s = matched.group(0)
s = s[:-1] + REPOSITORIES_SETTINGS + '}'
return s
build_config = re.sub('repositories\\s*?{.*?}', add_repositories, build_config, flags=re.S)
def add_dependency(matched):
s = matched.group(0)
s = s[:-1] + dependency + '}'
return s
build_config = re.sub('dependencies\\s*?{.*?}', add_dependency, build_config, flags=re.S)
my_file.write(build_config)
</DeepExtract>
<DeepExtract>
self.runtime = {'executable': 'java', 'version': '1.7'}
</DeepExtract>
LOG.info(self.runtime)
self.attempt.database = self.get_database()
LOG.info('Database: ' + self.attempt.database.name)
LOG.info('Installing requirements ...')
<DeepExtract>
if deploy_path:
command = '{} && export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 && chmod 777 grailsw && ./grailsw compile'.format(utils.cd(deploy_path))
out = utils.run_command(command)
if out[1] == '':
out = out[2]
else:
out = out[1]
out = ''
</DeepExtract>
LOG.info(out)
<DeepExtract>
self.configure_network()
LOG.info('Running server ...')
command = '{} && export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 && chmod 777 grailsw && ./grailsw -Dserver.port={} run-app > grails_output'.format(utils.cd(deploy_path), self.port)
return utils.run_command_async(command)
</DeepExtract>
retry_times = 0
while retry_times < 5:
time.sleep(30)
attemptStatus = self.check_server()
if attemptStatus == ATTEMPT_STATUS_SUCCESS:
break
retry_times += 1
return attemptStatus
|
def try_deploy(self, deploy_path):
LOG.info('Configuring settings ...')
self.kill_server()
self.clear_database()
if HTTP_PROXY != '':
with open(os.path.join(self.setting_path, 'wrapper', 'grails-wrapper.properties'), 'a') as my_file:
my_file.write('\n')
(proxy_host, proxy_port) = HTTP_PROXY.split(':')
my_file.write('systemProp.http.proxyHost={}\n'.format(proxy_host))
my_file.write('systemProp.http.proxyPort={}\n'.format(proxy_port))
my_file.write('systemProp.http.nonProxyHosts=localhost,127.0.0.1')
adapter = {'MySQL': 'mysql'}[self.database.name]
jdbc_class = {'MySQL': 'com.mysql.jdbc.Driver'}[self.database.name]
dependency = {'MySQL': "runtime 'mysql:mysql-connector-java:5.1.16'"}[self.database.name]
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'DataSource.groovy'), 'w') as my_file:
my_file.write(DATABASE_SETTINGS.format(name=self.database_config['name'], username=self.database_config['username'], password=self.database_config['password'], host=self.database_config['host'], adapter=adapter, jdbc_class=jdbc_class))
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'BuildConfig.groovy'), 'r') as my_file:
build_config = my_file.read()
with open(os.path.join(self.setting_path, 'grails-app', 'conf', 'BuildConfig.groovy'), 'w') as my_file:
def add_repositories(matched):
s = matched.group(0)
s = s[:-1] + REPOSITORIES_SETTINGS + '}'
return s
build_config = re.sub('repositories\\s*?{.*?}', add_repositories, build_config, flags=re.S)
def add_dependency(matched):
s = matched.group(0)
s = s[:-1] + dependency + '}'
return s
build_config = re.sub('dependencies\\s*?{.*?}', add_dependency, build_config, flags=re.S)
my_file.write(build_config)
self.runtime = {'executable': 'java', 'version': '1.7'}
LOG.info(self.runtime)
self.attempt.database = self.get_database()
LOG.info('Database: ' + self.attempt.database.name)
LOG.info('Installing requirements ...')
if deploy_path:
command = '{} && export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 && chmod 777 grailsw && ./grailsw compile'.format(utils.cd(deploy_path))
out = utils.run_command(command)
if out[1] == '':
out = out[2]
else:
out = out[1]
out = ''
LOG.info(out)
self.configure_network()
LOG.info('Running server ...')
command = '{} && export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 && chmod 777 grailsw && ./grailsw -Dserver.port={} run-app > grails_output'.format(utils.cd(deploy_path), self.port)
return utils.run_command_async(command)
retry_times = 0
while retry_times < 5:
time.sleep(30)
attemptStatus = self.check_server()
if attemptStatus == ATTEMPT_STATUS_SUCCESS:
break
retry_times += 1
return attemptStatus
|
cmdbac
|
positive
|
def generate_from_loaded_data(model_output: pyseir.run.PyseirOutputDatasets, output: pathlib.Path, selected_dataset: MultiRegionDataset, log):
"""Runs the API generation code using data in parameters, writing results to output."""
log.info('Running test positivity.')
regions_data = test_positivity.run_and_maybe_join_columns(selected_dataset, log)
regions_data = vaccine_backfills.derive_vaccine_pct(regions_data)
log.info(f'Joining inputs by region.')
rt_data_map = dict(model_output.infection_rate.iter_one_regions())
regional_inputs = [RegionalInput.from_one_regions(region, regional_data, rt_data=rt_data_map.get(region)) for (region, regional_data) in regions_data.iter_one_regions()]
log.info('Generating all API Timeseries')
<DeepExtract>
results = parallel_utils.parallel_map(build_timeseries_for_region, regional_inputs)
all_timeseries = [result for result in results if result]
if sort_func:
all_timeseries.sort(key=sort_func)
if limit:
all_timeseries = all_timeseries[:limit]
all_timeseries = all_timeseries
</DeepExtract>
<DeepExtract>
path_builder = APIOutputPathBuilder(output, AggregationLevel.COUNTY)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.COUNTY]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.COUNTY.value)
return
logger.info(f'Deploying {AggregationLevel.COUNTY.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.COUNTY in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.COUNTY is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
</DeepExtract>
<DeepExtract>
path_builder = APIOutputPathBuilder(output, AggregationLevel.STATE)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.STATE]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.STATE.value)
return
logger.info(f'Deploying {AggregationLevel.STATE.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.STATE in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.STATE is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
</DeepExtract>
<DeepExtract>
path_builder = APIOutputPathBuilder(output, AggregationLevel.CBSA)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.CBSA]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.CBSA.value)
return
logger.info(f'Deploying {AggregationLevel.CBSA.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.CBSA in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.CBSA is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
</DeepExtract>
<DeepExtract>
path_builder = APIOutputPathBuilder(output, AggregationLevel.PLACE)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.PLACE]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.PLACE.value)
return
logger.info(f'Deploying {AggregationLevel.PLACE.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.PLACE in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.PLACE is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
</DeepExtract>
<DeepExtract>
path_builder = APIOutputPathBuilder(output, AggregationLevel.COUNTRY)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.COUNTRY]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.COUNTRY.value)
return
logger.info(f'Deploying {AggregationLevel.COUNTRY.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.COUNTRY in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.COUNTRY is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
</DeepExtract>
log.info('Finished API generation.')
|
def generate_from_loaded_data(model_output: pyseir.run.PyseirOutputDatasets, output: pathlib.Path, selected_dataset: MultiRegionDataset, log):
"""Runs the API generation code using data in parameters, writing results to output."""
log.info('Running test positivity.')
regions_data = test_positivity.run_and_maybe_join_columns(selected_dataset, log)
regions_data = vaccine_backfills.derive_vaccine_pct(regions_data)
log.info(f'Joining inputs by region.')
rt_data_map = dict(model_output.infection_rate.iter_one_regions())
regional_inputs = [RegionalInput.from_one_regions(region, regional_data, rt_data=rt_data_map.get(region)) for (region, regional_data) in regions_data.iter_one_regions()]
log.info('Generating all API Timeseries')
results = parallel_utils.parallel_map(build_timeseries_for_region, regional_inputs)
all_timeseries = [result for result in results if result]
if sort_func:
all_timeseries.sort(key=sort_func)
if limit:
all_timeseries = all_timeseries[:limit]
all_timeseries = all_timeseries
path_builder = APIOutputPathBuilder(output, AggregationLevel.COUNTY)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.COUNTY]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.COUNTY.value)
return
logger.info(f'Deploying {AggregationLevel.COUNTY.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.COUNTY in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.COUNTY is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
path_builder = APIOutputPathBuilder(output, AggregationLevel.STATE)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.STATE]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.STATE.value)
return
logger.info(f'Deploying {AggregationLevel.STATE.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.STATE in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.STATE is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
path_builder = APIOutputPathBuilder(output, AggregationLevel.CBSA)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.CBSA]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.CBSA.value)
return
logger.info(f'Deploying {AggregationLevel.CBSA.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.CBSA in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.CBSA is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
path_builder = APIOutputPathBuilder(output, AggregationLevel.PLACE)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.PLACE]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.PLACE.value)
return
logger.info(f'Deploying {AggregationLevel.PLACE.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.PLACE in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.PLACE is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
path_builder = APIOutputPathBuilder(output, AggregationLevel.COUNTRY)
path_builder.make_directories()
all_timeseries = [output for output in all_timeseries if output.level is AggregationLevel.COUNTRY]
all_summaries = [output.region_summary for output in all_timeseries]
if not all_timeseries:
logger.warning(f'No regions detected - skipping.', aggregate_level=AggregationLevel.COUNTRY.value)
return
logger.info(f'Deploying {AggregationLevel.COUNTRY.value} output to {output}')
for summary in all_summaries:
output_path = path_builder.single_summary(summary, FileType.JSON)
deploy_json_api_output(summary, output_path)
for timeseries in all_timeseries:
output_path = path_builder.single_timeseries(timeseries, FileType.JSON)
deploy_json_api_output(timeseries, output_path)
if AggregationLevel.COUNTRY in [AggregationLevel.STATE, AggregationLevel.COUNTRY]:
output_path = path_builder.single_timeseries(timeseries, FileType.CSV)
bulk_timeseries = AggregateRegionSummaryWithTimeseries(__root__=[timeseries])
flattened_timeseries = build_api_v2.build_bulk_flattened_timeseries(bulk_timeseries)
deploy_csv_api_output(flattened_timeseries, output_path, csv_column_ordering.TIMESERIES_ORDER)
deploy_bulk_files(path_builder, all_timeseries, all_summaries)
if AggregationLevel.COUNTRY is AggregationLevel.COUNTY:
for state in set((record.state for record in all_summaries)):
state_timeseries = [record for record in all_timeseries if record.state == state]
state_summaries = [record for record in all_summaries if record.state == state]
deploy_bulk_files(path_builder, state_timeseries, state_summaries, state=state)
log.info('Finished API generation.')
|
covid-data-model
|
positive
|
def protect(event) -> Any:
"""
Will apply the correct protection to apdus depending on the security context
"""
LOG.info(f'Ciphering DLMS Request', apdu=event)
if isinstance(event, (acse.ApplicationAssociationRequest, acse.ReleaseRequest)):
if event.user_information:
<DeepExtract>
if not self.global_encryption_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_encryption_key')
if not self.global_authentication_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_authentication_key')
invocation_counter = self.client_invocation_counter
ciphered_text = security.encrypt(self.security_control, system_title=self.client_system_title, invocation_counter=invocation_counter, key=self.global_encryption_key, auth_key=self.global_authentication_key, plain_text=event.user_information.content.to_bytes())
self.client_invocation_counter += 1
(ciphered_text, ic) = (ciphered_text, invocation_counter)
</DeepExtract>
event.user_information = acse.UserInformation(content=xdlms.GlobalCipherInitiateRequest(security_control=self.security_control, invocation_counter=ic, ciphered_text=ciphered_text))
elif isinstance(event, AbstractXDlmsApdu):
<DeepExtract>
if not self.global_encryption_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_encryption_key')
if not self.global_authentication_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_authentication_key')
invocation_counter = self.client_invocation_counter
ciphered_text = security.encrypt(self.security_control, system_title=self.client_system_title, invocation_counter=invocation_counter, key=self.global_encryption_key, auth_key=self.global_authentication_key, plain_text=event.to_bytes())
self.client_invocation_counter += 1
(ciphered_text, ic) = (ciphered_text, invocation_counter)
</DeepExtract>
event = xdlms.GeneralGlobalCipher(system_title=self.client_system_title, security_control=self.security_control, invocation_counter=ic, ciphered_text=ciphered_text)
else:
raise RuntimeError(f'Unable to handle ecryption/protection of {event}')
return event
|
def protect(event) -> Any:
"""
Will apply the correct protection to apdus depending on the security context
"""
LOG.info(f'Ciphering DLMS Request', apdu=event)
if isinstance(event, (acse.ApplicationAssociationRequest, acse.ReleaseRequest)):
if event.user_information:
if not self.global_encryption_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_encryption_key')
if not self.global_authentication_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_authentication_key')
invocation_counter = self.client_invocation_counter
ciphered_text = security.encrypt(self.security_control, system_title=self.client_system_title, invocation_counter=invocation_counter, key=self.global_encryption_key, auth_key=self.global_authentication_key, plain_text=event.user_information.content.to_bytes())
self.client_invocation_counter += 1
(ciphered_text, ic) = (ciphered_text, invocation_counter)
event.user_information = acse.UserInformation(content=xdlms.GlobalCipherInitiateRequest(security_control=self.security_control, invocation_counter=ic, ciphered_text=ciphered_text))
elif isinstance(event, AbstractXDlmsApdu):
if not self.global_encryption_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_encryption_key')
if not self.global_authentication_key:
raise ProtectionError('Unable to encrypt plain text. Missing global_authentication_key')
invocation_counter = self.client_invocation_counter
ciphered_text = security.encrypt(self.security_control, system_title=self.client_system_title, invocation_counter=invocation_counter, key=self.global_encryption_key, auth_key=self.global_authentication_key, plain_text=event.to_bytes())
self.client_invocation_counter += 1
(ciphered_text, ic) = (ciphered_text, invocation_counter)
event = xdlms.GeneralGlobalCipher(system_title=self.client_system_title, security_control=self.security_control, invocation_counter=ic, ciphered_text=ciphered_text)
else:
raise RuntimeError(f'Unable to handle ecryption/protection of {event}')
return event
|
dlms-cosem
|
positive
|
def fhook(self, ops, vm, line_num):
if len(ops) == 1:
<DeepExtract>
check_num_args(self.name, ops, 1, line_num)
if sub.__name__ == 'div' and ops[0].get_val(line_num) == 0.0:
raise DivisionZero(line_num)
else:
vm.registers['ST0'] = sub(vm.registers['ST0'], ops[0].get_val(line_num))
</DeepExtract>
elif len(ops) == 2:
<DeepExtract>
check_num_args(self.name, ops, 2, line_num)
(reg_one, reg_two) = [int(x.get_nm()[-1]) for x in ops]
if reg_one != 0 and reg_two != 0:
raise InvalidOperand('Neither registers are ST0', line_num)
r1 = vm.registers[f'ST{reg_one}']
r2 = vm.registers[f'ST{reg_two}']
vm.registers[f'ST{reg_one}'] = checkflag(sub(r1, r2), vm)
</DeepExtract>
vm.pop_from_Float_Stack()
|
def fhook(self, ops, vm, line_num):
if len(ops) == 1:
check_num_args(self.name, ops, 1, line_num)
if sub.__name__ == 'div' and ops[0].get_val(line_num) == 0.0:
raise DivisionZero(line_num)
else:
vm.registers['ST0'] = sub(vm.registers['ST0'], ops[0].get_val(line_num))
elif len(ops) == 2:
check_num_args(self.name, ops, 2, line_num)
(reg_one, reg_two) = [int(x.get_nm()[-1]) for x in ops]
if reg_one != 0 and reg_two != 0:
raise InvalidOperand('Neither registers are ST0', line_num)
r1 = vm.registers[f'ST{reg_one}']
r2 = vm.registers[f'ST{reg_two}']
vm.registers[f'ST{reg_one}'] = checkflag(sub(r1, r2), vm)
vm.pop_from_Float_Stack()
|
Emu86
|
positive
|
def get_schemas(self) -> List[Dict]:
kfp_schema_present = False
airflow_schema_present = False
runtime_schemas = []
<DeepExtract>
schemas = []
for schema in ElyraSchemasProvider.local_schemas:
if schema.get('schemaspace_id') == Runtimes.RUNTIMES_SCHEMASPACE_ID:
schemas.append(schema)
schemas = schemas
</DeepExtract>
for schema in schemas:
if schema['name'] in self._runtime_processor_names:
runtime_schemas.append(schema)
if schema['name'] == 'kfp':
kfp_schema_present = True
elif schema['name'] == 'airflow':
airflow_schema_present = True
else:
self.log.error(f"No entrypoint with name '{schema['name']}' was found in group 'elyra.pipeline.processor' to match the schema with the same name. Skipping...")
if kfp_schema_present:
if not TektonClient:
for schema in runtime_schemas:
if schema['name'] == 'kfp':
engine_enum: list = schema['properties']['metadata']['properties']['engine']['enum']
if 'Tekton' in engine_enum:
engine_enum.remove('Tekton')
schema['properties']['metadata']['properties']['engine']['enum'] = engine_enum
auth_type_enum = SupportedAuthProviders.get_provider_names()
auth_type_default = SupportedAuthProviders.get_default_provider().name
for schema in runtime_schemas:
if schema['name'] == 'kfp':
if schema['properties']['metadata']['properties'].get('auth_type') is not None:
schema['properties']['metadata']['properties']['auth_type']['enum'] = auth_type_enum
schema['properties']['metadata']['properties']['auth_type']['default'] = auth_type_default
if airflow_schema_present:
git_type_enum = list(map(lambda c: c.name, SupportedGitTypes.get_enabled_types()))
git_type_default = SupportedGitTypes.get_default_type().name
for schema in runtime_schemas:
if schema['name'] == 'airflow':
if schema['properties']['metadata']['properties'].get('git_type') is not None:
schema['properties']['metadata']['properties']['git_type']['enum'] = git_type_enum
schema['properties']['metadata']['properties']['git_type']['default'] = git_type_default
return runtime_schemas
|
def get_schemas(self) -> List[Dict]:
kfp_schema_present = False
airflow_schema_present = False
runtime_schemas = []
schemas = []
for schema in ElyraSchemasProvider.local_schemas:
if schema.get('schemaspace_id') == Runtimes.RUNTIMES_SCHEMASPACE_ID:
schemas.append(schema)
schemas = schemas
for schema in schemas:
if schema['name'] in self._runtime_processor_names:
runtime_schemas.append(schema)
if schema['name'] == 'kfp':
kfp_schema_present = True
elif schema['name'] == 'airflow':
airflow_schema_present = True
else:
self.log.error(f"No entrypoint with name '{schema['name']}' was found in group 'elyra.pipeline.processor' to match the schema with the same name. Skipping...")
if kfp_schema_present:
if not TektonClient:
for schema in runtime_schemas:
if schema['name'] == 'kfp':
engine_enum: list = schema['properties']['metadata']['properties']['engine']['enum']
if 'Tekton' in engine_enum:
engine_enum.remove('Tekton')
schema['properties']['metadata']['properties']['engine']['enum'] = engine_enum
auth_type_enum = SupportedAuthProviders.get_provider_names()
auth_type_default = SupportedAuthProviders.get_default_provider().name
for schema in runtime_schemas:
if schema['name'] == 'kfp':
if schema['properties']['metadata']['properties'].get('auth_type') is not None:
schema['properties']['metadata']['properties']['auth_type']['enum'] = auth_type_enum
schema['properties']['metadata']['properties']['auth_type']['default'] = auth_type_default
if airflow_schema_present:
git_type_enum = list(map(lambda c: c.name, SupportedGitTypes.get_enabled_types()))
git_type_default = SupportedGitTypes.get_default_type().name
for schema in runtime_schemas:
if schema['name'] == 'airflow':
if schema['properties']['metadata']['properties'].get('git_type') is not None:
schema['properties']['metadata']['properties']['git_type']['enum'] = git_type_enum
schema['properties']['metadata']['properties']['git_type']['default'] = git_type_default
return runtime_schemas
|
elyra
|
positive
|
def process_file(filename):
print('Processing file:', filename)
with open(filename, 'rb') as f:
<DeepExtract>
print('Low level API...')
elffile = ELFFile(f)
print(' %s sections' % elffile['e_shnum'])
for i in range(elffile['e_shnum']):
section_offset = elffile['e_shoff'] + i * elffile['e_shentsize']
f.seek(section_offset)
section_header = elffile.structs.Elf_Shdr.parse_stream(f)
if section_header['sh_type'] == 'SHT_SYMTAB':
print(' Section name: %s, type: %s' % (section_header['sh_name'], section_header['sh_type']))
break
else:
print(' No symbol table found. Perhaps this ELF has been stripped?')
</DeepExtract>
f.seek(0)
<DeepExtract>
print('High level API...')
elffile = ELFFile(f)
print(' %s sections' % elffile.num_sections())
section = elffile.get_section_by_name(b'.symtab')
if not section:
print(' No symbol table found. Perhaps this ELF has been stripped?')
return
print(' Section name: %s, type: %s' % (bytes2str(section.name), section['sh_type']))
if isinstance(section, SymbolTableSection):
num_symbols = section.num_symbols()
print(" It's a symbol section with %s symbols" % num_symbols)
print(' The name of the last symbol in the section is: %s' % bytes2str(section.get_symbol(num_symbols - 1).name))
</DeepExtract>
|
def process_file(filename):
print('Processing file:', filename)
with open(filename, 'rb') as f:
print('Low level API...')
elffile = ELFFile(f)
print(' %s sections' % elffile['e_shnum'])
for i in range(elffile['e_shnum']):
section_offset = elffile['e_shoff'] + i * elffile['e_shentsize']
f.seek(section_offset)
section_header = elffile.structs.Elf_Shdr.parse_stream(f)
if section_header['sh_type'] == 'SHT_SYMTAB':
print(' Section name: %s, type: %s' % (section_header['sh_name'], section_header['sh_type']))
break
else:
print(' No symbol table found. Perhaps this ELF has been stripped?')
f.seek(0)
print('High level API...')
elffile = ELFFile(f)
print(' %s sections' % elffile.num_sections())
section = elffile.get_section_by_name(b'.symtab')
if not section:
print(' No symbol table found. Perhaps this ELF has been stripped?')
return
print(' Section name: %s, type: %s' % (bytes2str(section.name), section['sh_type']))
if isinstance(section, SymbolTableSection):
num_symbols = section.num_symbols()
print(" It's a symbol section with %s symbols" % num_symbols)
print(' The name of the last symbol in the section is: %s' % bytes2str(section.get_symbol(num_symbols - 1).name))
</DeepExtract>
|
ARMV8_Simulator
|
positive
|
def test_exception_divzero(self):
<DeepExtract>
if self.arch is None:
self.arch = platform.machine()
if os_str is None:
os_str = platform.system()
if os_str == 'Windows' and (not 'do_exception'.endswith('.exe')):
'do_exception' += '.exe'
signed = ''
if os_str == 'Darwin':
signed = '-signed'
base_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(base_path, 'binaries', f'{os_str}-{self.arch}{signed}', 'do_exception'))
fpath = path
</DeepExtract>
bv = BinaryViewType.get_view_of_file(fpath)
dbg = DebuggerController(bv)
if not self.arch == 'arm64':
dbg.cmd_line = 'divzero'
self.assertTrue(dbg.launch())
reason = dbg.go_and_wait()
<DeepExtract>
if platform.system() == 'Linux':
self.assertEqual(reason, DebugStopReason.SignalFpe)
else:
self.assertEqual(reason, DebugStopReason.Calculation)
</DeepExtract>
dbg.quit_and_wait()
|
def test_exception_divzero(self):
if self.arch is None:
self.arch = platform.machine()
if os_str is None:
os_str = platform.system()
if os_str == 'Windows' and (not 'do_exception'.endswith('.exe')):
'do_exception' += '.exe'
signed = ''
if os_str == 'Darwin':
signed = '-signed'
base_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(base_path, 'binaries', f'{os_str}-{self.arch}{signed}', 'do_exception'))
fpath = path
bv = BinaryViewType.get_view_of_file(fpath)
dbg = DebuggerController(bv)
if not self.arch == 'arm64':
dbg.cmd_line = 'divzero'
self.assertTrue(dbg.launch())
reason = dbg.go_and_wait()
if platform.system() == 'Linux':
self.assertEqual(reason, DebugStopReason.SignalFpe)
else:
self.assertEqual(reason, DebugStopReason.Calculation)
dbg.quit_and_wait()
|
debugger
|
positive
|
def step(self):
<DeepExtract>
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (1 + cos(self.iteration / self.step_size * pi))
self.iteration += 1
if self.iteration == self.step_size:
self.iteration = 0
lr = [lr for base_lr in self.base_lrs]
</DeepExtract>
if self.momentum is not None:
<DeepExtract>
if self.iteration > 2 * self.cycle_step:
momentum = self.momentum[0]
elif self.iteration > self.cycle_step:
cut = 1 - (self.iteration - self.cycle_step) / self.cycle_step
momentum = self.momentum[0] + cut * (self.momentum[1] - self.momentum[0])
else:
cut = self.iteration / self.cycle_step
momentum = self.momentum[0] + cut * (self.momentum[1] - self.momentum[0])
momentum = momentum
</DeepExtract>
self.iteration += 1
if self.iteration == self.step_size:
self.iteration = 0
for group in self.optimizer.param_groups:
group['lr'] = lr
if self.momentum is not None:
group['betas'] = (momentum, group['betas'][1])
return lr
|
def step(self):
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (1 + cos(self.iteration / self.step_size * pi))
self.iteration += 1
if self.iteration == self.step_size:
self.iteration = 0
lr = [lr for base_lr in self.base_lrs]
if self.momentum is not None:
if self.iteration > 2 * self.cycle_step:
momentum = self.momentum[0]
elif self.iteration > self.cycle_step:
cut = 1 - (self.iteration - self.cycle_step) / self.cycle_step
momentum = self.momentum[0] + cut * (self.momentum[1] - self.momentum[0])
else:
cut = self.iteration / self.cycle_step
momentum = self.momentum[0] + cut * (self.momentum[1] - self.momentum[0])
momentum = momentum
self.iteration += 1
if self.iteration == self.step_size:
self.iteration = 0
for group in self.optimizer.param_groups:
group['lr'] = lr
if self.momentum is not None:
group['betas'] = (momentum, group['betas'][1])
return lr
|
Benchmarks
|
positive
|
def __init__(self):
super().__init__()
f1 = 20
f2 = 40
f_output = 10
b_in = 60
lmax1 = 10
b_l1 = 10
lmax2 = 5
b_l2 = 6
<DeepExtract>
beta = torch.arange(1, n_beta + 1) * max_beta / n_beta
alpha = torch.linspace(0, 2 * math.pi, n_alpha + 1)[:-1]
(a, b) = torch.meshgrid(alpha, beta, indexing='ij')
b = b.flatten()
a = a.flatten()
grid_s2 = torch.stack((a, b))
</DeepExtract>
<DeepExtract>
if n_gamma is None:
n_gamma = n_alpha
beta = torch.arange(1, n_beta + 1) * max_beta / n_beta
alpha = torch.linspace(0, 2 * math.pi, n_alpha)[:-1]
pre_gamma = torch.linspace(-max_gamma, max_gamma, n_gamma)
(A, B, preC) = torch.meshgrid(alpha, beta, pre_gamma, indexing='ij')
C = preC - A
A = A.flatten()
B = B.flatten()
C = C.flatten()
grid_so3 = torch.stack((A, B, C))
</DeepExtract>
self.from_s2 = o3.FromS2Grid((b_in, b_in), lmax1)
self.conv1 = S2Convolution(1, f1, lmax1, kernel_grid=grid_s2)
self.act1 = SO3Activation(lmax1, lmax2, torch.relu, b_l1)
self.conv2 = SO3Convolution(f1, f2, lmax2, kernel_grid=grid_so3)
self.act2 = SO3Activation(lmax2, 0, torch.relu, b_l2)
self.w_out = torch.nn.Parameter(torch.randn(f2, f_output))
|
def __init__(self):
super().__init__()
f1 = 20
f2 = 40
f_output = 10
b_in = 60
lmax1 = 10
b_l1 = 10
lmax2 = 5
b_l2 = 6
beta = torch.arange(1, n_beta + 1) * max_beta / n_beta
alpha = torch.linspace(0, 2 * math.pi, n_alpha + 1)[:-1]
(a, b) = torch.meshgrid(alpha, beta, indexing='ij')
b = b.flatten()
a = a.flatten()
grid_s2 = torch.stack((a, b))
if n_gamma is None:
n_gamma = n_alpha
beta = torch.arange(1, n_beta + 1) * max_beta / n_beta
alpha = torch.linspace(0, 2 * math.pi, n_alpha)[:-1]
pre_gamma = torch.linspace(-max_gamma, max_gamma, n_gamma)
(A, B, preC) = torch.meshgrid(alpha, beta, pre_gamma, indexing='ij')
C = preC - A
A = A.flatten()
B = B.flatten()
C = C.flatten()
grid_so3 = torch.stack((A, B, C))
self.from_s2 = o3.FromS2Grid((b_in, b_in), lmax1)
self.conv1 = S2Convolution(1, f1, lmax1, kernel_grid=grid_s2)
self.act1 = SO3Activation(lmax1, lmax2, torch.relu, b_l1)
self.conv2 = SO3Convolution(f1, f2, lmax2, kernel_grid=grid_so3)
self.act2 = SO3Activation(lmax2, 0, torch.relu, b_l2)
self.w_out = torch.nn.Parameter(torch.randn(f2, f_output))
|
e3nn
|
positive
|
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for (i, qid) in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i + 1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
<DeepExtract>
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
</DeepExtract>
return {'ap': 100.0 * avg_prec}
|
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for (i, qid) in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i + 1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
return {'ap': 100.0 * avg_prec}
|
CCF-BDCI-Sentiment-Analysis-Baseline
|
positive
|
def train_pairwise(model_props, mode='ranking'):
<DeepExtract>
if not already_trained('all_pairs', 'weights_140'):
model_props.set_name('all_pairs')
model_props.set_mode('all_pairs')
model_props.load_weights_from = None
pairwise_learning.train(model_props, n_epochs=150)
if not already_trained('top_pairs', 'weights_40'):
model_props.set_name('top_pairs')
model_props.set_mode('top_pairs')
model_props.load_weights_from = 'all_pairs'
model_props.weights_file = 'weights_140'
pairwise_learning.train(model_props, n_epochs=50)
</DeepExtract>
model_props.set_name(mode)
model_props.set_mode(mode)
model_props.load_weights_from = 'top_pairs'
model_props.weights_file = 'weights_40'
pairwise_learning.train(model_props, n_epochs=100)
|
def train_pairwise(model_props, mode='ranking'):
if not already_trained('all_pairs', 'weights_140'):
model_props.set_name('all_pairs')
model_props.set_mode('all_pairs')
model_props.load_weights_from = None
pairwise_learning.train(model_props, n_epochs=150)
if not already_trained('top_pairs', 'weights_40'):
model_props.set_name('top_pairs')
model_props.set_mode('top_pairs')
model_props.load_weights_from = 'all_pairs'
model_props.weights_file = 'weights_140'
pairwise_learning.train(model_props, n_epochs=50)
model_props.set_name(mode)
model_props.set_mode(mode)
model_props.load_weights_from = 'top_pairs'
model_props.weights_file = 'weights_40'
pairwise_learning.train(model_props, n_epochs=100)
|
deep-coref
|
positive
|
def _add_spark_configuration(self):
spark_configuration = self.cluster_config.spark_configuration
if not spark_configuration:
return
<DeepExtract>
for file in [spark_configuration.spark_defaults_conf, spark_configuration.spark_env_sh, spark_configuration.core_site_xml]:
self.add_file(file, 'conf', False)
</DeepExtract>
self.zipf.writestr('id_rsa.pub', spark_configuration.ssh_key_pair['pub_key'])
self.zipf.writestr('id_rsa', spark_configuration.ssh_key_pair['priv_key'])
if spark_configuration.jars:
for jar in spark_configuration.jars:
<DeepExtract>
if not jar:
return
if isinstance(jar, (str, bytes)):
full_file_path = Path(jar)
with io.open(jar, 'r', encoding='UTF-8') as f:
if True:
self.zipf.write(jar, os.path.join('jars', full_file_path.name))
else:
self.zipf.writestr(os.path.join('jars', full_file_path.name), f.read().replace('\r\n', '\n'))
elif isinstance(jar, models.File):
self.zipf.writestr(os.path.join('jars', jar.name), jar.payload.getvalue())
</DeepExtract>
|
def _add_spark_configuration(self):
spark_configuration = self.cluster_config.spark_configuration
if not spark_configuration:
return
for file in [spark_configuration.spark_defaults_conf, spark_configuration.spark_env_sh, spark_configuration.core_site_xml]:
self.add_file(file, 'conf', False)
self.zipf.writestr('id_rsa.pub', spark_configuration.ssh_key_pair['pub_key'])
self.zipf.writestr('id_rsa', spark_configuration.ssh_key_pair['priv_key'])
if spark_configuration.jars:
for jar in spark_configuration.jars:
if not jar:
return
if isinstance(jar, (str, bytes)):
full_file_path = Path(jar)
with io.open(jar, 'r', encoding='UTF-8') as f:
if True:
self.zipf.write(jar, os.path.join('jars', full_file_path.name))
else:
self.zipf.writestr(os.path.join('jars', full_file_path.name), f.read().replace('\r\n', '\n'))
elif isinstance(jar, models.File):
self.zipf.writestr(os.path.join('jars', jar.name), jar.payload.getvalue())
</DeepExtract>
|
aztk
|
positive
|
def collect_distributed_training_examples(inputs: Dict[str, SemanticTensor], model_id: str, dataset_path: str) -> tf.Operation:
"""Exports feature values to file in the partial dataset cache format.
For distributed training, multiple tasks (with task="worker" and different
task index) can collect feature values at the same time and in the same
`dataset_path` location.
Once feature values are done being collected,
"finalize_distributed_dataset_collection" should be called.
Args:
inputs: Feature values to collect.
model_id: Id of the model.
dataset_path: Directory path to the output partial dataset cache.
Returns:
Op triggering the collection.
"""
in_order_inputs = list(inputs.items())
in_order_inputs.sort(key=lambda x: x[0])
ops = []
for (feature_idx, (feature_name, semantic_tensor)) in enumerate(in_order_inputs):
def raise_non_supported():
raise Exception(f'Non supported tensor dtype {semantic_tensor.tensor.dtype} and semantic {semantic_tensor.semantic} for feature {feature_name} for distributed training')
<DeepExtract>
input_id = model_id + '_' + feature_name.replace('|', '||').replace(',', '|c')
if ',' in input_id:
raise ValueError(f'Internal error: Found comma in input_id {input_id}')
if not True:
input_id += _FEATURE_RESOURCE_VALIDATION_SUFFIX
resource_id = input_id
</DeepExtract>
if semantic_tensor.semantic == Semantic.NUMERICAL:
if semantic_tensor.tensor.dtype == NormalizedNumericalType:
ops.append(training_op.SimpleMLNumericalFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
else:
<DeepExtract>
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
</DeepExtract>
elif semantic_tensor.semantic == Semantic.CATEGORICAL:
if semantic_tensor.tensor.dtype == NormalizedCategoricalIntType:
ops.append(training_op.SimpleMLCategoricalIntFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
elif semantic_tensor.tensor.dtype == NormalizedCategoricalStringType:
ops.append(training_op.SimpleMLCategoricalStringFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
else:
<DeepExtract>
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
</DeepExtract>
else:
<DeepExtract>
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
</DeepExtract>
return tf.group(ops)
|
def collect_distributed_training_examples(inputs: Dict[str, SemanticTensor], model_id: str, dataset_path: str) -> tf.Operation:
"""Exports feature values to file in the partial dataset cache format.
For distributed training, multiple tasks (with task="worker" and different
task index) can collect feature values at the same time and in the same
`dataset_path` location.
Once feature values are done being collected,
"finalize_distributed_dataset_collection" should be called.
Args:
inputs: Feature values to collect.
model_id: Id of the model.
dataset_path: Directory path to the output partial dataset cache.
Returns:
Op triggering the collection.
"""
in_order_inputs = list(inputs.items())
in_order_inputs.sort(key=lambda x: x[0])
ops = []
for (feature_idx, (feature_name, semantic_tensor)) in enumerate(in_order_inputs):
def raise_non_supported():
raise Exception(f'Non supported tensor dtype {semantic_tensor.tensor.dtype} and semantic {semantic_tensor.semantic} for feature {feature_name} for distributed training')
input_id = model_id + '_' + feature_name.replace('|', '||').replace(',', '|c')
if ',' in input_id:
raise ValueError(f'Internal error: Found comma in input_id {input_id}')
if not True:
input_id += _FEATURE_RESOURCE_VALIDATION_SUFFIX
resource_id = input_id
if semantic_tensor.semantic == Semantic.NUMERICAL:
if semantic_tensor.tensor.dtype == NormalizedNumericalType:
ops.append(training_op.SimpleMLNumericalFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
else:
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
elif semantic_tensor.semantic == Semantic.CATEGORICAL:
if semantic_tensor.tensor.dtype == NormalizedCategoricalIntType:
ops.append(training_op.SimpleMLCategoricalIntFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
elif semantic_tensor.tensor.dtype == NormalizedCategoricalStringType:
ops.append(training_op.SimpleMLCategoricalStringFeatureOnFile(value=semantic_tensor.tensor, resource_id=resource_id, feature_name=feature_name, feature_idx=feature_idx, dataset_path=dataset_path))
else:
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
else:
raise Exception('Non supported tensor dtype {} and semantic {} for feature {}'.format(semantic_tensor.tensor.dtype, semantic_tensor.semantic, key))
return tf.group(ops)
|
decision-forests
|
positive
|
def sync(self):
"""Write dict to disk"""
if self.flag == 'r':
return
filename = self.filename
tempname = filename + '.tmp'
fileobj = open(tempname, 'wb' if self.format == 'pickle' else 'w')
try:
<DeepExtract>
if self.format == 'csv':
csv.writer(fileobj).writerows(self.items())
elif self.format == 'json':
json.dump(self, fileobj, separators=(',', ':'))
elif self.format == 'pickle':
pickle.dump(dict(self), fileobj, 2)
else:
raise NotImplementedError('Unknown format: ' + repr(self.format))
</DeepExtract>
except Exception:
os.remove(tempname)
raise
finally:
fileobj.close()
shutil.move(tempname, self.filename)
if self.mode is not None:
os.chmod(self.filename, self.mode)
|
def sync(self):
"""Write dict to disk"""
if self.flag == 'r':
return
filename = self.filename
tempname = filename + '.tmp'
fileobj = open(tempname, 'wb' if self.format == 'pickle' else 'w')
try:
if self.format == 'csv':
csv.writer(fileobj).writerows(self.items())
elif self.format == 'json':
json.dump(self, fileobj, separators=(',', ':'))
elif self.format == 'pickle':
pickle.dump(dict(self), fileobj, 2)
else:
raise NotImplementedError('Unknown format: ' + repr(self.format))
except Exception:
os.remove(tempname)
raise
finally:
fileobj.close()
shutil.move(tempname, self.filename)
if self.mode is not None:
os.chmod(self.filename, self.mode)
|
autogoal
|
positive
|
def run_standalone_app(layout, callbacks, header_colors, filename):
"""Run demo app (tests/dashbio_demos/*/app.py) as standalone app."""
app = dash.Dash(__name__)
app.scripts.config.serve_locally = True
app.config['suppress_callback_exceptions'] = True
app_name = os.getenv('DASH_APP_NAME', '')
if app_name == '':
app_name = os.path.basename(os.path.dirname(filename))
app_name = app_name.replace('dash-', '')
app_title = '{}'.format(app_name.replace('-', ' ').title())
<DeepExtract>
app.layout = html.Div(id='main_page', children=[dcc.Location(id='url', refresh=False), html.Div(id='app-page-header', children=[html.A(id='dashbio-logo', children=[html.Img(src='data:image/png;base64,{}'.format(base64.b64encode(open('./assets/plotly-dash-bio-logo.png', 'rb').read()).decode()))], href='/Portal' if True else '/dash-bio'), html.H2(app_title), html.A(id='gh-link', children=['View on GitHub'], href='http://github.com/plotly/dash-bio/blob/master/tests/dashbio_demos/dash-{}/app.py'.format(app_name), style={'color': 'white' if light_logo else 'black', 'border': 'solid 1px white' if light_logo else 'solid 1px black'}), html.Img(src='data:image/png;base64,{}'.format(base64.b64encode(open('./assets/GitHub-Mark-{}64px.png'.format('Light-' if light_logo else ''), 'rb').read()).decode()))], style={'background': bg_color, 'color': font_color}), html.Div(id='app-page-content', children=layout())])
</DeepExtract>
callbacks(app)
return app
|
def run_standalone_app(layout, callbacks, header_colors, filename):
"""Run demo app (tests/dashbio_demos/*/app.py) as standalone app."""
app = dash.Dash(__name__)
app.scripts.config.serve_locally = True
app.config['suppress_callback_exceptions'] = True
app_name = os.getenv('DASH_APP_NAME', '')
if app_name == '':
app_name = os.path.basename(os.path.dirname(filename))
app_name = app_name.replace('dash-', '')
app_title = '{}'.format(app_name.replace('-', ' ').title())
app.layout = html.Div(id='main_page', children=[dcc.Location(id='url', refresh=False), html.Div(id='app-page-header', children=[html.A(id='dashbio-logo', children=[html.Img(src='data:image/png;base64,{}'.format(base64.b64encode(open('./assets/plotly-dash-bio-logo.png', 'rb').read()).decode()))], href='/Portal' if True else '/dash-bio'), html.H2(app_title), html.A(id='gh-link', children=['View on GitHub'], href='http://github.com/plotly/dash-bio/blob/master/tests/dashbio_demos/dash-{}/app.py'.format(app_name), style={'color': 'white' if light_logo else 'black', 'border': 'solid 1px white' if light_logo else 'solid 1px black'}), html.Img(src='data:image/png;base64,{}'.format(base64.b64encode(open('./assets/GitHub-Mark-{}64px.png'.format('Light-' if light_logo else ''), 'rb').read()).decode()))], style={'background': bg_color, 'color': font_color}), html.Div(id='app-page-content', children=layout())])
callbacks(app)
return app
|
dash-bio
|
positive
|
def do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments):
logger = logging.getLogger('maskrcnn_benchmark.trainer')
logger.info('Start training')
meters = MetricLogger(delimiter=' ')
max_iter = len(data_loader)
start_iter = arguments['iteration']
model.train()
start_training_time = time.time()
end = time.time()
for (iteration, (images, targets, _)) in enumerate(data_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments['iteration'] = iteration
scheduler.step()
images = images.to(device)
if isinstance(targets[0], list):
targets = [[target[0].to(device) for target in targets], [target[1].to(device) for target in targets]]
else:
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum((loss for loss in loss_dict.values()))
<DeepExtract>
world_size = get_world_size()
if world_size < 2:
loss_dict_reduced = loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
all_losses /= world_size
reduced_losses = {k: v for (k, v) in zip(loss_names, all_losses)}
loss_dict_reduced = reduced_losses
</DeepExtract>
losses_reduced = sum((loss for loss in loss_dict_reduced.values()))
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0))
if iteration % checkpoint_period == 0:
checkpointer.save('model_{:07d}'.format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save('model_final', **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info('Total training time: {} ({:.4f} s / it)'.format(total_time_str, total_training_time / max_iter))
|
def do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments):
logger = logging.getLogger('maskrcnn_benchmark.trainer')
logger.info('Start training')
meters = MetricLogger(delimiter=' ')
max_iter = len(data_loader)
start_iter = arguments['iteration']
model.train()
start_training_time = time.time()
end = time.time()
for (iteration, (images, targets, _)) in enumerate(data_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments['iteration'] = iteration
scheduler.step()
images = images.to(device)
if isinstance(targets[0], list):
targets = [[target[0].to(device) for target in targets], [target[1].to(device) for target in targets]]
else:
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum((loss for loss in loss_dict.values()))
world_size = get_world_size()
if world_size < 2:
loss_dict_reduced = loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
all_losses /= world_size
reduced_losses = {k: v for (k, v) in zip(loss_names, all_losses)}
loss_dict_reduced = reduced_losses
losses_reduced = sum((loss for loss in loss_dict_reduced.values()))
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0))
if iteration % checkpoint_period == 0:
checkpointer.save('model_{:07d}'.format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save('model_final', **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info('Total training time: {} ({:.4f} s / it)'.format(total_time_str, total_training_time / max_iter))
|
Box_Discretization_Network
|
positive
|
def scoring(self):
"""
Add scoring function in the starter kit for participant's reference
"""
def sdr(references, estimates):
delta = 1e-07
num = np.sum(np.square(references), axis=(1, 2))
den = np.sum(np.square(references - estimates), axis=(1, 2))
num += delta
den += delta
return 10 * np.log10(num / den)
<DeepExtract>
valid_music_names = None
if self.partial_run:
valid_music_names = self.partial_run.split(',')
music_names = []
for folder in listdir(self.test_data_path):
if not isfile(join(self.test_data_path, folder)):
if valid_music_names is None or folder in valid_music_names:
music_names.append(folder)
music_names = music_names
</DeepExtract>
instruments = ['bass', 'drums', 'other', 'vocals']
scores = {}
for music_name in music_names:
print('Evaluating for: %s' % music_name)
scores[music_name] = {}
references = []
estimates = []
for instrument in instruments:
reference_file = join(self.test_data_path, music_name, instrument + '.wav')
<DeepExtract>
if instrument is None:
instrument = 'mixture'
estimate_file = join(self.test_data_path, music_name, instrument + '.wav')
if not os.path.exists(self.results_data_path):
os.makedirs(self.results_data_path)
if not os.path.exists(join(self.results_data_path, music_name)):
os.makedirs(join(self.results_data_path, music_name))
estimate_file = join(self.results_data_path, music_name, instrument + '.wav')
</DeepExtract>
(reference, _) = sf.read(reference_file)
(estimate, _) = sf.read(estimate_file)
references.append(reference)
estimates.append(estimate)
references = np.stack(references)
estimates = np.stack(estimates)
references = references.astype(np.float32)
estimates = estimates.astype(np.float32)
song_score = sdr(references, estimates).tolist()
scores[music_name]['sdr_bass'] = song_score[0]
scores[music_name]['sdr_drums'] = song_score[1]
scores[music_name]['sdr_other'] = song_score[2]
scores[music_name]['sdr_vocals'] = song_score[3]
scores[music_name]['sdr'] = np.mean(song_score)
return scores
|
def scoring(self):
"""
Add scoring function in the starter kit for participant's reference
"""
def sdr(references, estimates):
delta = 1e-07
num = np.sum(np.square(references), axis=(1, 2))
den = np.sum(np.square(references - estimates), axis=(1, 2))
num += delta
den += delta
return 10 * np.log10(num / den)
valid_music_names = None
if self.partial_run:
valid_music_names = self.partial_run.split(',')
music_names = []
for folder in listdir(self.test_data_path):
if not isfile(join(self.test_data_path, folder)):
if valid_music_names is None or folder in valid_music_names:
music_names.append(folder)
music_names = music_names
instruments = ['bass', 'drums', 'other', 'vocals']
scores = {}
for music_name in music_names:
print('Evaluating for: %s' % music_name)
scores[music_name] = {}
references = []
estimates = []
for instrument in instruments:
reference_file = join(self.test_data_path, music_name, instrument + '.wav')
if instrument is None:
instrument = 'mixture'
estimate_file = join(self.test_data_path, music_name, instrument + '.wav')
if not os.path.exists(self.results_data_path):
os.makedirs(self.results_data_path)
if not os.path.exists(join(self.results_data_path, music_name)):
os.makedirs(join(self.results_data_path, music_name))
estimate_file = join(self.results_data_path, music_name, instrument + '.wav')
(reference, _) = sf.read(reference_file)
(estimate, _) = sf.read(estimate_file)
references.append(reference)
estimates.append(estimate)
references = np.stack(references)
estimates = np.stack(estimates)
references = references.astype(np.float32)
estimates = estimates.astype(np.float32)
song_score = sdr(references, estimates).tolist()
scores[music_name]['sdr_bass'] = song_score[0]
scores[music_name]['sdr_drums'] = song_score[1]
scores[music_name]['sdr_other'] = song_score[2]
scores[music_name]['sdr_vocals'] = song_score[3]
scores[music_name]['sdr'] = np.mean(song_score)
return scores
|
DNN-based_source_separation
|
positive
|
def _render_window(board, window_surface, reward):
"""Render the game onto the window surface."""
standard_font = pygame.font.SysFont('Courier', 24)
instructions_font = pygame.font.SysFont('Courier', 16)
num_rows = board.shape[0]
num_cols = board.shape[1]
window_surface.fill(_BLACK)
header = '* ' * (num_cols + 2)
<DeepExtract>
text = standard_font.render(header, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + 0 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
for board_index in range(num_rows):
row = board[board_index]
row_str = '* '
for c in row:
row_str += 'x ' if c == 1.0 else ' '
row_str += '* '
<DeepExtract>
text = standard_font.render(row_str, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + board_index + 1 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
<DeepExtract>
text = standard_font.render(header, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 1 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
reward_str = 'Reward: {}'.format(reward)
<DeepExtract>
text = standard_font.render(reward_str, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 3 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
instructions = 'Instructions: Left/Right arrow keys to move paddle, Escape to exit.'
<DeepExtract>
text = instructions_font.render(instructions, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 5 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
|
def _render_window(board, window_surface, reward):
"""Render the game onto the window surface."""
standard_font = pygame.font.SysFont('Courier', 24)
instructions_font = pygame.font.SysFont('Courier', 16)
num_rows = board.shape[0]
num_cols = board.shape[1]
window_surface.fill(_BLACK)
header = '* ' * (num_cols + 2)
text = standard_font.render(header, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + 0 * 30
window_surface.blit(text, text_rect)
for board_index in range(num_rows):
row = board[board_index]
row_str = '* '
for c in row:
row_str += 'x ' if c == 1.0 else ' '
row_str += '* '
text = standard_font.render(row_str, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + board_index + 1 * 30
window_surface.blit(text, text_rect)
text = standard_font.render(header, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 1 * 30
window_surface.blit(text, text_rect)
reward_str = 'Reward: {}'.format(reward)
text = standard_font.render(reward_str, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 3 * 30
window_surface.blit(text, text_rect)
instructions = 'Instructions: Left/Right arrow keys to move paddle, Escape to exit.'
text = instructions_font.render(instructions, True, _WHITE)
text_rect = text.get_rect()
text_rect.left = 50
text_rect.top = 30 + num_rows + 5 * 30
window_surface.blit(text, text_rect)
</DeepExtract>
|
dm_env_rpc
|
positive
|
def clear_database():
global datastore, database_setup
if not database_setup:
<DeepExtract>
global datastore, database_setup
if not database_setup:
datastore = create_datastore(os.environ.get('MONGOURL', 'mim:///depottest'))
mainsession.bind = datastore
ming.odm.Mapper.compile_all()
</DeepExtract>
try:
datastore.conn.drop_all()
except TypeError:
datastore.conn.drop_database(datastore.db)
|
def clear_database():
global datastore, database_setup
if not database_setup:
global datastore, database_setup
if not database_setup:
datastore = create_datastore(os.environ.get('MONGOURL', 'mim:///depottest'))
mainsession.bind = datastore
ming.odm.Mapper.compile_all()
try:
datastore.conn.drop_all()
except TypeError:
datastore.conn.drop_database(datastore.db)
|
depot
|
positive
|
def test_initial_call():
context_builder = ContextBuilder('test_fan_out_fan_in_function')
result = get_orchestration_state_result(context_builder, generator_function)
<DeepExtract>
expected_state = OrchestratorState(is_done=False, actions=[], output=output, replay_schema=replay_schema)
</DeepExtract>
<DeepExtract>
action = CallActivityAction(function_name='GetActivityCount', input_=None)
expected_state.actions.append([action])
</DeepExtract>
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
|
def test_initial_call():
context_builder = ContextBuilder('test_fan_out_fan_in_function')
result = get_orchestration_state_result(context_builder, generator_function)
expected_state = OrchestratorState(is_done=False, actions=[], output=output, replay_schema=replay_schema)
action = CallActivityAction(function_name='GetActivityCount', input_=None)
expected_state.actions.append([action])
expected = expected_state.to_json()
assert_valid_schema(result)
assert_orchestration_state_equals(expected, result)
|
azure-functions-durable-python
|
positive
|
def mul(x0, x1):
<DeepExtract>
if np.isscalar(x1):
x1 = np.array(x1)
x1 = x1
</DeepExtract>
return Mul()(x0, x1)
|
def mul(x0, x1):
if np.isscalar(x1):
x1 = np.array(x1)
x1 = x1
return Mul()(x0, x1)
|
deep-learning-from-scratch-3
|
positive
|
def test_osbs_builder_with_rhpam_3(tmpdir, caplog):
"""
Verify that multi-stage build has Cachito instructions enabled.
"""
caplog.set_level(logging.DEBUG, logger='cekit')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'images', 'rhpam3'), os.path.join(str(tmpdir), 'rhpam'))
cfgcontents = '\n[common]\nredhat = True\n '
cfgfile = os.path.join(str(tmpdir), 'config.cfg')
with open(cfgfile, 'w') as _file:
_file.write(cfgcontents)
<DeepExtract>
with Chdir(os.path.join(str(tmpdir), 'rhpam')):
result = CliRunner().invoke(cli, ['--config', cfgfile, '-v', '--work-dir', str(tmpdir), 'build', '--dry-run', 'osbs'], catch_exceptions=False)
sys.stdout.write('\n')
sys.stdout.write(result.output)
assert result.exit_code == return_code
if message:
assert message in result.output
return result
</DeepExtract>
dockerfile_path = os.path.join(str(tmpdir), 'rhpam', 'target', 'image', 'Dockerfile')
assert os.path.exists(dockerfile_path) is True
with open(dockerfile_path, 'r') as _file:
dockerfile = _file.read()
print('\n' + dockerfile + '\n')
assert '# This is a Dockerfile for the rhpam-7/rhpam-kogito-operator:7.11 image.\n\n## START builder image operator-builder-one:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/go-toolset:1.14.12 AS operator-builder-one\n USER root\n\n###### START module \'org.kie.kogito.builder:7.11\'\n###### \\\n # Copy \'org.kie.kogito.builder\' module general artifacts to \'/workspace/\' destination\n COPY \\\n main.go \\\n /workspace/\n # Copy \'org.kie.kogito.builder\' module content\n COPY modules/org.kie.kogito.builder /tmp/scripts/org.kie.kogito.builder\n # Custom scripts from \'org.kie.kogito.builder\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/org.kie.kogito.builder/install.sh" ]\n###### /\n###### END module \'org.kie.kogito.builder:7.11\'\n\n###### START image \'operator-builder-one:7.11\'\n###### \\\n # Set \'operator-builder-one\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'operator-builder-one\' image defined labels\n LABEL \\\n name="rhpam-7/rhpam-kogito-operator" \\\n version="7.11"\n###### /\n###### END image \'operator-builder-one:7.11\'\n\n\n## /\n## END builder image\n## START builder image operator-builder-two:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/go-toolset:1.14.12 AS operator-builder-two\n USER root\n\n COPY $REMOTE_SOURCE $REMOTE_SOURCE_DIR\n WORKDIR $REMOTE_SOURCE_DIR/app\n\n###### START image \'operator-builder-two:7.11\'\n###### \\\n # Set \'operator-builder-two\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'operator-builder-two\' image defined labels\n LABEL \\\n name="rhpam-7/rhpam-kogito-operator" \\\n version="7.11"\n###### /\n###### END image \'operator-builder-two:7.11\'\n\n RUN rm -rf $REMOTE_SOURCE_DIR\n\n## /\n## END builder image\n\n## START target image rhpam-7/rhpam-kogito-operator:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/ubi-minimal:latest\n\n\n USER root\n\n###### START image \'rhpam-7/rhpam-kogito-operator:7.11\'\n###### \\\n # Copy \'rhpam-7/rhpam-kogito-operator\' image stage artifacts\n COPY --from=operator-builder /workspace/rhpam-kogito-operator-manager /usr/local/bin/rhpam-kogito-operator-manager\n # Set \'rhpam-7/rhpam-kogito-operator\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'rhpam-7/rhpam-kogito-operator\' image defined labels\n LABEL \\\n com.redhat.component="rhpam-7-kogito-rhel8-operator-container" \\\n description="Runtime Image for the RHPAM Kogito Operator" \\\n io.cekit.version="VVVVV" \\\n io.k8s.description="Operator for deploying RHPAM Kogito Application" \\\n io.k8s.display-name="Red Hat PAM Kogito Operator" \\\n io.openshift.tags="rhpam,kogito,operator" \\\n maintainer="bsig-cloud@redhat.com" \\\n name="rhpam-7/rhpam-kogito-operator" \\\n summary="Runtime Image for the RHPAM Kogito Operator" \\\n version="7.11"\n###### /\n###### END image \'rhpam-7/rhpam-kogito-operator:7.11\'\n\n\n\n # Switch to \'root\' user and remove artifacts and modules\n USER root\n RUN [ ! -d /tmp/scripts ] || rm -rf /tmp/scripts\n RUN [ ! -d /tmp/artifacts ] || rm -rf /tmp/artifacts\n # Define the user\n USER 1001\n## /\n## END target image'.replace('VVVVV', __version__) in dockerfile
container_path = os.path.join(str(tmpdir), 'rhpam', 'target', 'image', 'container.yaml')
assert os.path.exists(container_path) is True
with open(container_path, 'r') as _file:
containerfile = _file.read()
print('\n' + containerfile + '\n')
assert 'image_build_method: imagebuilder\nplatforms:\n only:\n - x86_64\nremote_source:\n pkg_managers:\n - gomod\n ref: db4a5d18f5f52a64083d8f1bd1776ad60a46904c\n repo: https://github.com/kiegroup/rhpam-kogito-operator' in containerfile
|
def test_osbs_builder_with_rhpam_3(tmpdir, caplog):
"""
Verify that multi-stage build has Cachito instructions enabled.
"""
caplog.set_level(logging.DEBUG, logger='cekit')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'images', 'rhpam3'), os.path.join(str(tmpdir), 'rhpam'))
cfgcontents = '\n[common]\nredhat = True\n '
cfgfile = os.path.join(str(tmpdir), 'config.cfg')
with open(cfgfile, 'w') as _file:
_file.write(cfgcontents)
with Chdir(os.path.join(str(tmpdir), 'rhpam')):
result = CliRunner().invoke(cli, ['--config', cfgfile, '-v', '--work-dir', str(tmpdir), 'build', '--dry-run', 'osbs'], catch_exceptions=False)
sys.stdout.write('\n')
sys.stdout.write(result.output)
assert result.exit_code == return_code
if message:
assert message in result.output
return result
dockerfile_path = os.path.join(str(tmpdir), 'rhpam', 'target', 'image', 'Dockerfile')
assert os.path.exists(dockerfile_path) is True
with open(dockerfile_path, 'r') as _file:
dockerfile = _file.read()
print('\n' + dockerfile + '\n')
assert '# This is a Dockerfile for the rhpam-7/rhpam-kogito-operator:7.11 image.\n\n## START builder image operator-builder-one:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/go-toolset:1.14.12 AS operator-builder-one\n USER root\n\n###### START module \'org.kie.kogito.builder:7.11\'\n###### \\\n # Copy \'org.kie.kogito.builder\' module general artifacts to \'/workspace/\' destination\n COPY \\\n main.go \\\n /workspace/\n # Copy \'org.kie.kogito.builder\' module content\n COPY modules/org.kie.kogito.builder /tmp/scripts/org.kie.kogito.builder\n # Custom scripts from \'org.kie.kogito.builder\' module\n USER root\n RUN [ "sh", "-x", "/tmp/scripts/org.kie.kogito.builder/install.sh" ]\n###### /\n###### END module \'org.kie.kogito.builder:7.11\'\n\n###### START image \'operator-builder-one:7.11\'\n###### \\\n # Set \'operator-builder-one\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'operator-builder-one\' image defined labels\n LABEL \\\n name="rhpam-7/rhpam-kogito-operator" \\\n version="7.11"\n###### /\n###### END image \'operator-builder-one:7.11\'\n\n\n## /\n## END builder image\n## START builder image operator-builder-two:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/go-toolset:1.14.12 AS operator-builder-two\n USER root\n\n COPY $REMOTE_SOURCE $REMOTE_SOURCE_DIR\n WORKDIR $REMOTE_SOURCE_DIR/app\n\n###### START image \'operator-builder-two:7.11\'\n###### \\\n # Set \'operator-builder-two\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'operator-builder-two\' image defined labels\n LABEL \\\n name="rhpam-7/rhpam-kogito-operator" \\\n version="7.11"\n###### /\n###### END image \'operator-builder-two:7.11\'\n\n RUN rm -rf $REMOTE_SOURCE_DIR\n\n## /\n## END builder image\n\n## START target image rhpam-7/rhpam-kogito-operator:7.11\n## \\\n FROM registry.access.redhat.com/ubi8/ubi-minimal:latest\n\n\n USER root\n\n###### START image \'rhpam-7/rhpam-kogito-operator:7.11\'\n###### \\\n # Copy \'rhpam-7/rhpam-kogito-operator\' image stage artifacts\n COPY --from=operator-builder /workspace/rhpam-kogito-operator-manager /usr/local/bin/rhpam-kogito-operator-manager\n # Set \'rhpam-7/rhpam-kogito-operator\' image defined environment variables\n ENV \\\n JBOSS_IMAGE_NAME="rhpam-7/rhpam-kogito-operator" \\\n JBOSS_IMAGE_VERSION="7.11"\n # Set \'rhpam-7/rhpam-kogito-operator\' image defined labels\n LABEL \\\n com.redhat.component="rhpam-7-kogito-rhel8-operator-container" \\\n description="Runtime Image for the RHPAM Kogito Operator" \\\n io.cekit.version="VVVVV" \\\n io.k8s.description="Operator for deploying RHPAM Kogito Application" \\\n io.k8s.display-name="Red Hat PAM Kogito Operator" \\\n io.openshift.tags="rhpam,kogito,operator" \\\n maintainer="bsig-cloud@redhat.com" \\\n name="rhpam-7/rhpam-kogito-operator" \\\n summary="Runtime Image for the RHPAM Kogito Operator" \\\n version="7.11"\n###### /\n###### END image \'rhpam-7/rhpam-kogito-operator:7.11\'\n\n\n\n # Switch to \'root\' user and remove artifacts and modules\n USER root\n RUN [ ! -d /tmp/scripts ] || rm -rf /tmp/scripts\n RUN [ ! -d /tmp/artifacts ] || rm -rf /tmp/artifacts\n # Define the user\n USER 1001\n## /\n## END target image'.replace('VVVVV', __version__) in dockerfile
container_path = os.path.join(str(tmpdir), 'rhpam', 'target', 'image', 'container.yaml')
assert os.path.exists(container_path) is True
with open(container_path, 'r') as _file:
containerfile = _file.read()
print('\n' + containerfile + '\n')
assert 'image_build_method: imagebuilder\nplatforms:\n only:\n - x86_64\nremote_source:\n pkg_managers:\n - gomod\n ref: db4a5d18f5f52a64083d8f1bd1776ad60a46904c\n repo: https://github.com/kiegroup/rhpam-kogito-operator' in containerfile
|
cekit
|
positive
|
def run_benchmark_train_test_cv_ml(n_jobs=1):
print('Fit by cv_ml & Evaluation')
model_dict_cv_ml = {'CKDE_cv_ml': {'estimator': ['ConditionalKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'bandwidth': ['cv_ml'], 'random_seed': [22]}, 'NKDE_cv_ml': {'estimator': ['NeighborKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'param_selection': ['cv_ml'], 'random_seed': [22]}}
<DeepExtract>
model_configs = {}
for (model_key, conf_dict) in model_dict_cv_ml.items():
model_configs[model_key] = [dict(zip(conf_dict.keys(), value_tuple)) for value_tuple in list(itertools.product(*list(conf_dict.values())))]
configs_initialized = {}
for (model_key, model_conf_list) in model_configs.items():
configs_initialized[model_key] = []
for (i, conf) in enumerate(model_conf_list):
conf['name'] = model_name_prefix + model_key.replace(' ', '_') + '_%i' % i
if VERBOSE:
print('instantiating ', conf['name'])
' remove estimator entry from dict to instantiate it'
estimator = conf.pop('estimator')
configs_initialized[model_key].append(globals()[estimator](**conf))
model_dict_cv_ml = configs_initialized
</DeepExtract>
model_dict = OrderedDict(list(model_dict.items()))
<DeepExtract>
result_dict = {}
manager = Manager()
result_list_model = manager.list()
if n_jobs == -1:
n_jobs = len(SEEDS)
if multiprocessing:
executor = AsyncExecutor(n_jobs=n_jobs)
eval = lambda est: result_list_model.append(empirical_evaluation(est, VALIDATION_PORTION, moment_r2=True, eval_by_fc=False, fit_by_cv=False))
for (model_name, models) in model_dict.items():
print('Running likelihood fit and validation for %s' % model_name)
t = time.time()
if multiprocessing:
executor.run(eval, models)
else:
for est in models:
eval(est)
assert len(result_list_model) == len(models)
(mean_logli_list, mu_rmse_list, std_rmse_list, std_intraday_rmse_list) = list(zip(*list(result_list_model)))
for _ in range(len(result_list_model)):
del result_list_model[0]
assert len(result_list_model) == 0
(mean_logli, mean_logli_dev) = (np.mean(mean_logli_list), np.std(mean_logli_list))
(mu_rmse, mu_rmse_dev) = (np.mean(mu_rmse_list), np.std(mu_rmse_list))
(std_rmse, std_rmse_dev) = (np.mean(std_rmse_list), np.std(std_rmse_list))
(std_intraday_rmse, std_intraday_rmse_dev) = (np.mean(std_intraday_rmse_list), np.std(std_intraday_rmse_list))
result_dict[model_name] = (mean_logli, mean_logli_dev, mu_rmse, mu_rmse_dev, std_rmse, std_rmse_dev, std_intraday_rmse, std_intraday_rmse_dev)
print('%s results:' % model_name, result_dict[model_name])
print('Duration of %s:' % model_name, time.time() - t)
df = pd.DataFrame.from_dict(result_dict, 'index')
df.columns = ['log_likelihood', 'log_likelihood_dev', 'rmse_mean', 'rmse_mean_dev', 'rmse_std', 'rmse_std_dev', 'rmse_std_intraday', 'rmse_std_intraday_dev']
result_df_cv_ml = df
</DeepExtract>
print(result_df_cv_ml)
print(result_df_cv_ml.to_latex())
|
def run_benchmark_train_test_cv_ml(n_jobs=1):
print('Fit by cv_ml & Evaluation')
model_dict_cv_ml = {'CKDE_cv_ml': {'estimator': ['ConditionalKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'bandwidth': ['cv_ml'], 'random_seed': [22]}, 'NKDE_cv_ml': {'estimator': ['NeighborKernelDensityEstimation'], 'ndim_x': [ndim_x], 'ndim_y': [ndim_y], 'param_selection': ['cv_ml'], 'random_seed': [22]}}
model_configs = {}
for (model_key, conf_dict) in model_dict_cv_ml.items():
model_configs[model_key] = [dict(zip(conf_dict.keys(), value_tuple)) for value_tuple in list(itertools.product(*list(conf_dict.values())))]
configs_initialized = {}
for (model_key, model_conf_list) in model_configs.items():
configs_initialized[model_key] = []
for (i, conf) in enumerate(model_conf_list):
conf['name'] = model_name_prefix + model_key.replace(' ', '_') + '_%i' % i
if VERBOSE:
print('instantiating ', conf['name'])
' remove estimator entry from dict to instantiate it'
estimator = conf.pop('estimator')
configs_initialized[model_key].append(globals()[estimator](**conf))
model_dict_cv_ml = configs_initialized
model_dict = OrderedDict(list(model_dict.items()))
result_dict = {}
manager = Manager()
result_list_model = manager.list()
if n_jobs == -1:
n_jobs = len(SEEDS)
if multiprocessing:
executor = AsyncExecutor(n_jobs=n_jobs)
eval = lambda est: result_list_model.append(empirical_evaluation(est, VALIDATION_PORTION, moment_r2=True, eval_by_fc=False, fit_by_cv=False))
for (model_name, models) in model_dict.items():
print('Running likelihood fit and validation for %s' % model_name)
t = time.time()
if multiprocessing:
executor.run(eval, models)
else:
for est in models:
eval(est)
assert len(result_list_model) == len(models)
(mean_logli_list, mu_rmse_list, std_rmse_list, std_intraday_rmse_list) = list(zip(*list(result_list_model)))
for _ in range(len(result_list_model)):
del result_list_model[0]
assert len(result_list_model) == 0
(mean_logli, mean_logli_dev) = (np.mean(mean_logli_list), np.std(mean_logli_list))
(mu_rmse, mu_rmse_dev) = (np.mean(mu_rmse_list), np.std(mu_rmse_list))
(std_rmse, std_rmse_dev) = (np.mean(std_rmse_list), np.std(std_rmse_list))
(std_intraday_rmse, std_intraday_rmse_dev) = (np.mean(std_intraday_rmse_list), np.std(std_intraday_rmse_list))
result_dict[model_name] = (mean_logli, mean_logli_dev, mu_rmse, mu_rmse_dev, std_rmse, std_rmse_dev, std_intraday_rmse, std_intraday_rmse_dev)
print('%s results:' % model_name, result_dict[model_name])
print('Duration of %s:' % model_name, time.time() - t)
df = pd.DataFrame.from_dict(result_dict, 'index')
df.columns = ['log_likelihood', 'log_likelihood_dev', 'rmse_mean', 'rmse_mean_dev', 'rmse_std', 'rmse_std_dev', 'rmse_std_intraday', 'rmse_std_intraday_dev']
result_df_cv_ml = df
print(result_df_cv_ml)
print(result_df_cv_ml.to_latex())
|
Conditional_Density_Estimation
|
positive
|
def getGroupedInstrumentUpdates(self):
allInstrumentUpdates = []
for instrumentId in self._instrumentIds:
print('Processing data for stock: %s' % instrumentId)
fileName = self.getFileName(instrumentId)
if not self.downloadAndAdjustData(instrumentId, fileName):
continue
with open(fileName) as f:
records = csv.DictReader(f)
for row in records:
try:
<DeepExtract>
raise NotImplementedError
</DeepExtract>
allInstrumentUpdates.append(inst)
except:
continue
(timeUpdates, groupedInstrumentUpdates) = groupAndSortByTimeUpdates(allInstrumentUpdates)
return (timeUpdates, groupedInstrumentUpdates)
|
def getGroupedInstrumentUpdates(self):
allInstrumentUpdates = []
for instrumentId in self._instrumentIds:
print('Processing data for stock: %s' % instrumentId)
fileName = self.getFileName(instrumentId)
if not self.downloadAndAdjustData(instrumentId, fileName):
continue
with open(fileName) as f:
records = csv.DictReader(f)
for row in records:
try:
raise NotImplementedError
allInstrumentUpdates.append(inst)
except:
continue
(timeUpdates, groupedInstrumentUpdates) = groupAndSortByTimeUpdates(allInstrumentUpdates)
return (timeUpdates, groupedInstrumentUpdates)
|
auquantoolbox
|
positive
|
@power.setter
def power(self, value):
"""Runs tests and converts power rgb values before setting"""
<DeepExtract>
if type(value) in [Decimal, float, int, str]:
try:
value = self._check_single_value(value, 'power', negative_allow)
except (TypeError, ValueError):
raise
else:
set_value = [value] * 3
elif type(value) in [list, tuple]:
try:
value = self._check_rgb_values(value, 'power', negative_allow)
except (TypeError, ValueError):
raise
else:
set_value = value
else:
raise TypeError('{name} cannot be set directly with objects of type: "{type}". Value given: "{value}".'.format(name='power'.title(), type=type(value), value=value))
value = set_value
</DeepExtract>
self._power = value
|
@power.setter
def power(self, value):
"""Runs tests and converts power rgb values before setting"""
if type(value) in [Decimal, float, int, str]:
try:
value = self._check_single_value(value, 'power', negative_allow)
except (TypeError, ValueError):
raise
else:
set_value = [value] * 3
elif type(value) in [list, tuple]:
try:
value = self._check_rgb_values(value, 'power', negative_allow)
except (TypeError, ValueError):
raise
else:
set_value = value
else:
raise TypeError('{name} cannot be set directly with objects of type: "{type}". Value given: "{value}".'.format(name='power'.title(), type=type(value), value=value))
value = set_value
self._power = value
|
cdl_convert
|
positive
|
def get_plots_32(env_list):
with open('report.md', 'a') as f:
f.write('### Question 3.2\n\n<p float="left">\n')
for env_name in env_list:
<DeepExtract>
expert = load_json(os.path.join(DATA_DIR, env_name + '.json'))
exp_dir = os.path.join(EXPERIMENTS_DIR, env_name)
bc = load_json(os.path.join(exp_dir, 'behavioral_cloning', env_name + '_results.json'))
dagger = load_json(os.path.join(exp_dir, 'dagger', env_name + '_results.json'))
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(np.arange(1, len(bc['epochs']) + 1), np.mean(bc['returns'], axis=-1), color='b', label='Behavioral Cloning')
plt.errorbar(np.arange(1, len(bc['epochs']) + 1), np.mean(bc['returns'], axis=-1), np.std(bc['returns'], axis=-1), fmt='.', color='b')
plt.plot(np.arange(1, len(dagger['epochs']) + 1), np.mean(dagger['returns'], axis=-1), color='r', label='DAgger')
plt.errorbar(np.arange(1, len(dagger['epochs']) + 1), np.mean(dagger['returns'], axis=-1), np.std(dagger['returns'], axis=-1), fmt='.', color='r')
plt.fill_between(np.arange(1, len(bc['epochs']) + 1), expert['mean_return'] - expert['std_return'], expert['mean_return'] + expert['std_return'], label='Expert', color='g')
plt.xlabel('DAgger iterations')
plt.ylabel('Return')
plt.legend(loc='best')
plt.title(env_name)
plt.savefig(os.path.join(exp_dir, env_name + '.png'), bbox_inches='tight', transparent=True, pad_inches=0.1)
path = os.path.join(exp_dir, env_name + '.png')
</DeepExtract>
f.write(' <img src="{}" width="350"/>\n'.format(path))
f.write('</p>\n\n')
|
def get_plots_32(env_list):
with open('report.md', 'a') as f:
f.write('### Question 3.2\n\n<p float="left">\n')
for env_name in env_list:
expert = load_json(os.path.join(DATA_DIR, env_name + '.json'))
exp_dir = os.path.join(EXPERIMENTS_DIR, env_name)
bc = load_json(os.path.join(exp_dir, 'behavioral_cloning', env_name + '_results.json'))
dagger = load_json(os.path.join(exp_dir, 'dagger', env_name + '_results.json'))
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(np.arange(1, len(bc['epochs']) + 1), np.mean(bc['returns'], axis=-1), color='b', label='Behavioral Cloning')
plt.errorbar(np.arange(1, len(bc['epochs']) + 1), np.mean(bc['returns'], axis=-1), np.std(bc['returns'], axis=-1), fmt='.', color='b')
plt.plot(np.arange(1, len(dagger['epochs']) + 1), np.mean(dagger['returns'], axis=-1), color='r', label='DAgger')
plt.errorbar(np.arange(1, len(dagger['epochs']) + 1), np.mean(dagger['returns'], axis=-1), np.std(dagger['returns'], axis=-1), fmt='.', color='r')
plt.fill_between(np.arange(1, len(bc['epochs']) + 1), expert['mean_return'] - expert['std_return'], expert['mean_return'] + expert['std_return'], label='Expert', color='g')
plt.xlabel('DAgger iterations')
plt.ylabel('Return')
plt.legend(loc='best')
plt.title(env_name)
plt.savefig(os.path.join(exp_dir, env_name + '.png'), bbox_inches='tight', transparent=True, pad_inches=0.1)
path = os.path.join(exp_dir, env_name + '.png')
f.write(' <img src="{}" width="350"/>\n'.format(path))
f.write('</p>\n\n')
|
cs294-112_hws
|
positive
|
def iterate_nodes(self, string_key, distinct=True):
"""Given a string key it returns the nodes as a generator that can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.ring:
yield (None, None)
returned_values = set()
def distinct_filter(value):
if str(value) not in returned_values:
returned_values.add(str(value))
return value
<DeepExtract>
if not self.ring:
pos = None
key = self.gen_key(string_key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
pos = 0
else:
pos = pos
</DeepExtract>
for key in self._sorted_keys[pos:]:
<DeepExtract>
if str(self.ring[key]) not in returned_values:
returned_values.add(str(self.ring[key]))
val = self.ring[key]
</DeepExtract>
if val:
yield val
for (i, key) in enumerate(self._sorted_keys):
if i < pos:
<DeepExtract>
if str(self.ring[key]) not in returned_values:
returned_values.add(str(self.ring[key]))
val = self.ring[key]
</DeepExtract>
if val:
yield val
|
def iterate_nodes(self, string_key, distinct=True):
"""Given a string key it returns the nodes as a generator that can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.ring:
yield (None, None)
returned_values = set()
def distinct_filter(value):
if str(value) not in returned_values:
returned_values.add(str(value))
return value
if not self.ring:
pos = None
key = self.gen_key(string_key)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
pos = 0
else:
pos = pos
for key in self._sorted_keys[pos:]:
if str(self.ring[key]) not in returned_values:
returned_values.add(str(self.ring[key]))
val = self.ring[key]
if val:
yield val
for (i, key) in enumerate(self._sorted_keys):
if i < pos:
if str(self.ring[key]) not in returned_values:
returned_values.add(str(self.ring[key]))
val = self.ring[key]
if val:
yield val
|
cola
|
positive
|
def test_counter_labels(self):
cmf = CounterMetricFamily('c_total', 'help', labels=['a', 'c_total'])
cmf.add_metric(['b', 'd'], 2)
<DeepExtract>
class CustomCollector:
def collect(self):
return [cmf]
self.registry.register(CustomCollector())
</DeepExtract>
self.assertEqual(2, self.registry.get_sample_value('c_total', {'a': 'b', 'c_total': 'd'}))
|
def test_counter_labels(self):
cmf = CounterMetricFamily('c_total', 'help', labels=['a', 'c_total'])
cmf.add_metric(['b', 'd'], 2)
class CustomCollector:
def collect(self):
return [cmf]
self.registry.register(CustomCollector())
self.assertEqual(2, self.registry.get_sample_value('c_total', {'a': 'b', 'c_total': 'd'}))
|
client_python
|
positive
|
def test_restart(self):
"""Test stopping and restarting the stopwatch."""
t1 = time.time()
cpu1 = time.clock()
self.sw.start()
<DeepExtract>
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
</DeepExtract>
t2 = time.time()
cpu2 = time.clock()
self.sw.stop()
<DeepExtract>
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
</DeepExtract>
t3 = time.time()
cpu3 = time.clock()
self.sw.start()
<DeepExtract>
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
</DeepExtract>
t4 = time.time()
cpu4 = time.clock()
self.sw.stop()
estimated_time = t2 - t1 + (t4 - t3)
estimated_cpu = cpu2 - cpu1 + (cpu4 - cpu3)
<DeepExtract>
(elapsed_time, cpu_time) = self.sw.read()
self.assertAlmostEqual(estimated_time, elapsed_time, delta=self.DELTA, msg='elapsed_time does not match.')
self.assertAlmostEqual(estimated_cpu, cpu_time, delta=self.DELTA, msg='cpu_time does not match.')
</DeepExtract>
|
def test_restart(self):
"""Test stopping and restarting the stopwatch."""
t1 = time.time()
cpu1 = time.clock()
self.sw.start()
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
t2 = time.time()
cpu2 = time.clock()
self.sw.stop()
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
t3 = time.time()
cpu3 = time.clock()
self.sw.start()
cube_util.synchronize(time.time() + random.gauss(1.0, 0.24))
t4 = time.time()
cpu4 = time.clock()
self.sw.stop()
estimated_time = t2 - t1 + (t4 - t3)
estimated_cpu = cpu2 - cpu1 + (cpu4 - cpu3)
(elapsed_time, cpu_time) = self.sw.read()
self.assertAlmostEqual(estimated_time, elapsed_time, delta=self.DELTA, msg='elapsed_time does not match.')
self.assertAlmostEqual(estimated_cpu, cpu_time, delta=self.DELTA, msg='cpu_time does not match.')
</DeepExtract>
|
agdc
|
positive
|
def evaluate(self, data: List[ContextAndQuestion], true_len, **kargs):
if self.text_eval == 'triviaqa':
<DeepExtract>
scores = np.zeros((len(data), 4))
for i in range(len(data)):
para = data[i]
ans = para.answer
pred_span = kargs['spans'][i]
pred_text = ' '.join(para.get_context()[pred_span[0]:pred_span[1] + 1])
span_correct = False
span_max_f1 = 0
text_correct = 0
text_max_f1 = 0
for (word_start, word_end) in ans.answer_spans:
answer_span = (word_start, word_end)
span_max_f1 = max(span_max_f1, compute_span_f1(answer_span, pred_span))
if answer_span == tuple(pred_span):
span_correct = True
for text in ans.answer_text:
f1 = triviaqa_f1_score(pred_text, text)
correct = triviaqa_em_score(pred_text, text)
text_correct = max(text_correct, correct)
text_max_f1 = max(text_max_f1, f1)
scores[i] = [span_correct, span_max_f1, text_correct, text_max_f1]
scores = scores
</DeepExtract>
elif self.text_eval == 'squad':
<DeepExtract>
scores = np.zeros((len(data), 4))
for i in range(len(data)):
para = data[i]
pred_span = tuple(kargs['spans'][i])
pred_text = para.get_original_text(pred_span[0], pred_span[1])
span_correct = False
span_max_f1 = 0
text_correct = 0
text_max_f1 = 0
answer = data[i].answer
for ((start, end), text) in zip(answer.answer_spans, answer.answer_text):
answer_span = (start, end)
span_max_f1 = max(span_max_f1, compute_span_f1(answer_span, pred_span))
if answer_span == pred_span:
span_correct = True
f1 = squad_official_f1_score(pred_text, text)
correct = squad_official_em_score(pred_text, text)
text_correct = max(text_correct, correct)
text_max_f1 = max(text_max_f1, f1)
scores[i] = [span_correct, span_max_f1, text_correct, text_max_f1]
scores = scores
</DeepExtract>
else:
raise RuntimeError()
has_answer = [len(x.answer.answer_spans) > 0 for x in data]
aggregated_scores = scores[has_answer].mean(axis=0)
prefix = 'b%d/' % self.bound
scalars = {prefix + 'accuracy': aggregated_scores[0], prefix + 'f1': aggregated_scores[1], prefix + 'text-accuracy': aggregated_scores[2], prefix + 'text-f1': aggregated_scores[3]}
if self.rank_metric == 'spr':
metric = spearmanr
elif self.rank_metric == 'k-tau':
metric = kendalltau
else:
raise ValueError()
if 'none_prob' in kargs:
none_conf = kargs['none_prob']
scalars[prefix + 'none-text-f1-' + self.rank_metric] = metric(none_conf, scores[:, 3])[0]
scalars[prefix + 'none-span-accuracy-' + self.rank_metric] = metric(none_conf, scores[:, 0])[0]
conf = kargs['conf']
scalars[prefix + 'score-text-f1-' + self.rank_metric] = metric(conf, scores[:, 3])[0]
scalars[prefix + 'score-span-accuracy-' + self.rank_metric] = metric(conf, scores[:, 0])[0]
return Evaluation(scalars)
|
def evaluate(self, data: List[ContextAndQuestion], true_len, **kargs):
if self.text_eval == 'triviaqa':
scores = np.zeros((len(data), 4))
for i in range(len(data)):
para = data[i]
ans = para.answer
pred_span = kargs['spans'][i]
pred_text = ' '.join(para.get_context()[pred_span[0]:pred_span[1] + 1])
span_correct = False
span_max_f1 = 0
text_correct = 0
text_max_f1 = 0
for (word_start, word_end) in ans.answer_spans:
answer_span = (word_start, word_end)
span_max_f1 = max(span_max_f1, compute_span_f1(answer_span, pred_span))
if answer_span == tuple(pred_span):
span_correct = True
for text in ans.answer_text:
f1 = triviaqa_f1_score(pred_text, text)
correct = triviaqa_em_score(pred_text, text)
text_correct = max(text_correct, correct)
text_max_f1 = max(text_max_f1, f1)
scores[i] = [span_correct, span_max_f1, text_correct, text_max_f1]
scores = scores
elif self.text_eval == 'squad':
scores = np.zeros((len(data), 4))
for i in range(len(data)):
para = data[i]
pred_span = tuple(kargs['spans'][i])
pred_text = para.get_original_text(pred_span[0], pred_span[1])
span_correct = False
span_max_f1 = 0
text_correct = 0
text_max_f1 = 0
answer = data[i].answer
for ((start, end), text) in zip(answer.answer_spans, answer.answer_text):
answer_span = (start, end)
span_max_f1 = max(span_max_f1, compute_span_f1(answer_span, pred_span))
if answer_span == pred_span:
span_correct = True
f1 = squad_official_f1_score(pred_text, text)
correct = squad_official_em_score(pred_text, text)
text_correct = max(text_correct, correct)
text_max_f1 = max(text_max_f1, f1)
scores[i] = [span_correct, span_max_f1, text_correct, text_max_f1]
scores = scores
else:
raise RuntimeError()
has_answer = [len(x.answer.answer_spans) > 0 for x in data]
aggregated_scores = scores[has_answer].mean(axis=0)
prefix = 'b%d/' % self.bound
scalars = {prefix + 'accuracy': aggregated_scores[0], prefix + 'f1': aggregated_scores[1], prefix + 'text-accuracy': aggregated_scores[2], prefix + 'text-f1': aggregated_scores[3]}
if self.rank_metric == 'spr':
metric = spearmanr
elif self.rank_metric == 'k-tau':
metric = kendalltau
else:
raise ValueError()
if 'none_prob' in kargs:
none_conf = kargs['none_prob']
scalars[prefix + 'none-text-f1-' + self.rank_metric] = metric(none_conf, scores[:, 3])[0]
scalars[prefix + 'none-span-accuracy-' + self.rank_metric] = metric(none_conf, scores[:, 0])[0]
conf = kargs['conf']
scalars[prefix + 'score-text-f1-' + self.rank_metric] = metric(conf, scores[:, 3])[0]
scalars[prefix + 'score-span-accuracy-' + self.rank_metric] = metric(conf, scores[:, 0])[0]
return Evaluation(scalars)
|
document-qa
|
positive
|
def _check_against(text, expected_type, allowable_netcodes):
try:
data = encoding.a2b_hashed_base58(text)
<DeepExtract>
d = {}
for length in (4, 1):
for (network, the_type) in NETWORK_PREFIXES.get(data[:length], []):
d[network.code] = the_type
for netcode in allowable_netcodes:
v = d.get(netcode)
if v:
(netcode, the_type) = (netcode, v)
raise encoding.EncodingError('unknown prefix')
</DeepExtract>
if the_type in expected_type and netcode in allowable_netcodes:
return netcode
except encoding.EncodingError:
pass
return None
|
def _check_against(text, expected_type, allowable_netcodes):
try:
data = encoding.a2b_hashed_base58(text)
d = {}
for length in (4, 1):
for (network, the_type) in NETWORK_PREFIXES.get(data[:length], []):
d[network.code] = the_type
for netcode in allowable_netcodes:
v = d.get(netcode)
if v:
(netcode, the_type) = (netcode, v)
raise encoding.EncodingError('unknown prefix')
if the_type in expected_type and netcode in allowable_netcodes:
return netcode
except encoding.EncodingError:
pass
return None
|
dashman
|
positive
|
@with_dimension
@pytest.mark.parametrize('provide_simplex', [True, False])
def test_adding_point_inside_standard_simplex(dim, provide_simplex):
t = Triangulation(_make_standard_simplex(dim))
first_simplex = tuple(range(dim + 1))
inside_simplex = (0.1,) * dim
if provide_simplex:
<DeepExtract>
old_simplices = t.simplices.copy()
(deleted_simplices, created_simplices) = t.add_point(inside_simplex, simplex=first_simplex)
new_simplices = t.simplices.copy()
assert deleted_simplices == old_simplices - new_simplices
assert created_simplices == new_simplices - old_simplices
</DeepExtract>
else:
<DeepExtract>
old_simplices = t.simplices.copy()
(deleted_simplices, created_simplices) = t.add_point(inside_simplex, simplex=simplex)
new_simplices = t.simplices.copy()
assert deleted_simplices == old_simplices - new_simplices
assert created_simplices == new_simplices - old_simplices
</DeepExtract>
added_point = dim + 1
<DeepExtract>
_check_simplices_are_valid(t)
_check_faces_are_valid(t)
_check_hull_is_valid(t)
</DeepExtract>
other_points = list(range(dim + 1))
expected_simplices = {(*points, added_point) for points in itertools.combinations(other_points, dim)}
assert expected_simplices == t.simplices
assert np.isclose(np.sum(t.volumes()), _standard_simplex_volume(dim))
|
@with_dimension
@pytest.mark.parametrize('provide_simplex', [True, False])
def test_adding_point_inside_standard_simplex(dim, provide_simplex):
t = Triangulation(_make_standard_simplex(dim))
first_simplex = tuple(range(dim + 1))
inside_simplex = (0.1,) * dim
if provide_simplex:
old_simplices = t.simplices.copy()
(deleted_simplices, created_simplices) = t.add_point(inside_simplex, simplex=first_simplex)
new_simplices = t.simplices.copy()
assert deleted_simplices == old_simplices - new_simplices
assert created_simplices == new_simplices - old_simplices
else:
old_simplices = t.simplices.copy()
(deleted_simplices, created_simplices) = t.add_point(inside_simplex, simplex=simplex)
new_simplices = t.simplices.copy()
assert deleted_simplices == old_simplices - new_simplices
assert created_simplices == new_simplices - old_simplices
added_point = dim + 1
_check_simplices_are_valid(t)
_check_faces_are_valid(t)
_check_hull_is_valid(t)
other_points = list(range(dim + 1))
expected_simplices = {(*points, added_point) for points in itertools.combinations(other_points, dim)}
assert expected_simplices == t.simplices
assert np.isclose(np.sum(t.volumes()), _standard_simplex_volume(dim))
|
adaptive
|
positive
|
def test_api_get_registration_subject_not_found(kafka_cluster, load_file):
"""
Attempts to obtain information about a schema's subject registration for
an unknown subject.
Args:
kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture
load_file (callable(str)): Schema fixture constructor
"""
sr = kafka_cluster.schema_registry()
schema = Schema(load_file('basic_schema.avsc'), schema_type='AVRO')
<DeepExtract>
subject = 'registration_subject_not_found' + '-' + str(uuid1())
</DeepExtract>
with pytest.raises(SchemaRegistryError, match='Subject .*not found.*') as e:
sr.lookup_schema(subject, schema)
assert e.value.http_status_code == 404
assert e.value.error_code == 40401
|
def test_api_get_registration_subject_not_found(kafka_cluster, load_file):
"""
Attempts to obtain information about a schema's subject registration for
an unknown subject.
Args:
kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture
load_file (callable(str)): Schema fixture constructor
"""
sr = kafka_cluster.schema_registry()
schema = Schema(load_file('basic_schema.avsc'), schema_type='AVRO')
subject = 'registration_subject_not_found' + '-' + str(uuid1())
with pytest.raises(SchemaRegistryError, match='Subject .*not found.*') as e:
sr.lookup_schema(subject, schema)
assert e.value.http_status_code == 404
assert e.value.error_code == 40401
|
confluent-kafka-python
|
positive
|
def __init__(self, incl_col_slicer, excl_col_slicer, is_optimized_for_all_cols, verbosity):
self.incl_col_slicer = incl_col_slicer
self.excl_col_slicer = excl_col_slicer
self.verbosity = verbosity
self.index: List[int] = []
self.is_valid = False
self.col_default_range: bool
start_time = time.time()
<DeepExtract>
if self.incl_rec_slicer.indexer.valid and self.excl_rec_slicer.indexer.valid and (len(self.excl_rec_slicer.index) <= 10000):
for spec_item in self.incl_rec_slicer.index:
if spec_item in self.excl_rec_slicer.index:
continue
else:
if len(self.index) > MAX_MEM_INDEX_CNT:
self.index = []
break
self.index.append(spec_item)
else:
self.stop_rec = max(self.index, default=0)
self.is_valid = True
</DeepExtract>
self.duration = time.time() - start_time
if verbosity == 'debug':
<DeepExtract>
print(' ')
print('Column-Index Optimizations: ')
print(f' self.incl_rec_slicer.has_all_inclusions={self.incl_rec_slicer.has_all_inclusions!r}')
print(f' self.incl_rec_slicer.indexer.valid={self.incl_rec_slicer.indexer.valid!r}')
print(f' len(self.incl_rec_slicer.index)={len(self.incl_rec_slicer.index)!r}')
print(f' self.incl_rec_slicer.includes_out_of_order={self.incl_rec_slicer.includes_out_of_order!r}')
print(f' self.incl_rec_slicer.includes_repeats={self.incl_rec_slicer.includes_repeats!r}')
print(f' self.incl_rec_slicer.includes_reverse={self.incl_rec_slicer.includes_reverse!r}')
print(' ')
print(f' self.excl_rec_slicer.has_all_inclusions={self.excl_rec_slicer.has_all_inclusions!r}')
print(f' self.excl_rec_slicer.indexer.valid={self.excl_rec_slicer.indexer.valid!r}')
print(f' len(self.excl_rec_slicer.index)={len(self.excl_rec_slicer.index)!r}')
print(' ')
print(f'--------> setup_index_optimization duration: {self.duration:.2f}')
</DeepExtract>
|
def __init__(self, incl_col_slicer, excl_col_slicer, is_optimized_for_all_cols, verbosity):
self.incl_col_slicer = incl_col_slicer
self.excl_col_slicer = excl_col_slicer
self.verbosity = verbosity
self.index: List[int] = []
self.is_valid = False
self.col_default_range: bool
start_time = time.time()
if self.incl_rec_slicer.indexer.valid and self.excl_rec_slicer.indexer.valid and (len(self.excl_rec_slicer.index) <= 10000):
for spec_item in self.incl_rec_slicer.index:
if spec_item in self.excl_rec_slicer.index:
continue
else:
if len(self.index) > MAX_MEM_INDEX_CNT:
self.index = []
break
self.index.append(spec_item)
else:
self.stop_rec = max(self.index, default=0)
self.is_valid = True
self.duration = time.time() - start_time
if verbosity == 'debug':
print(' ')
print('Column-Index Optimizations: ')
print(f' self.incl_rec_slicer.has_all_inclusions={self.incl_rec_slicer.has_all_inclusions!r}')
print(f' self.incl_rec_slicer.indexer.valid={self.incl_rec_slicer.indexer.valid!r}')
print(f' len(self.incl_rec_slicer.index)={len(self.incl_rec_slicer.index)!r}')
print(f' self.incl_rec_slicer.includes_out_of_order={self.incl_rec_slicer.includes_out_of_order!r}')
print(f' self.incl_rec_slicer.includes_repeats={self.incl_rec_slicer.includes_repeats!r}')
print(f' self.incl_rec_slicer.includes_reverse={self.incl_rec_slicer.includes_reverse!r}')
print(' ')
print(f' self.excl_rec_slicer.has_all_inclusions={self.excl_rec_slicer.has_all_inclusions!r}')
print(f' self.excl_rec_slicer.indexer.valid={self.excl_rec_slicer.indexer.valid!r}')
print(f' len(self.excl_rec_slicer.index)={len(self.excl_rec_slicer.index)!r}')
print(' ')
print(f'--------> setup_index_optimization duration: {self.duration:.2f}')
</DeepExtract>
|
DataGristle
|
positive
|
def inference_1conv_head(self, input, eval_data=False):
"""
Args:
input: 4D tensor of [batch_size, WIDTH, HEIGHT, DEPTHS] size.
Returns:
logits: 2D tensor of [batch_size, NUM_CLASSES].
"""
if eval_data:
batch_size = int(graphcnn_input.EVAL_BATCH_SIZE)
else:
batch_size = int(graphcnn_input.TRAIN_BATCH_SIZE)
with tf.variable_scope('conv1') as scope:
inputmaps = graphcnn_input.NUM_CHANNELS
outputmaps = 64
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[1, 5, inputmaps, outputmaps], stddev=0.05), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
</DeepExtract>
<DeepExtract>
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.0, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
</DeepExtract>
conv = tf.nn.conv2d(input, weights, [1, 1, 5, 1], padding='VALID')
conv = tf.nn.bias_add(conv, biases)
output = tf.nn.relu(conv, name=scope.name)
<DeepExtract>
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
</DeepExtract>
with tf.variable_scope('norm1') as scope:
output = tf.nn.lrn(output, depth_radius=5, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=scope.name)
return output
|
def inference_1conv_head(self, input, eval_data=False):
"""
Args:
input: 4D tensor of [batch_size, WIDTH, HEIGHT, DEPTHS] size.
Returns:
logits: 2D tensor of [batch_size, NUM_CLASSES].
"""
if eval_data:
batch_size = int(graphcnn_input.EVAL_BATCH_SIZE)
else:
batch_size = int(graphcnn_input.TRAIN_BATCH_SIZE)
with tf.variable_scope('conv1') as scope:
inputmaps = graphcnn_input.NUM_CHANNELS
outputmaps = 64
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.truncated_normal(shape=[1, 5, inputmaps, outputmaps], stddev=0.05), name='weights', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='weights' + '_loss')
tf.add_to_collection('losses', weight_decay)
weights = var
dtype = tf.float16 if graphcnn_option.USE_FP16 else tf.float32
var = tf.Variable(initial_value=tf.constant(0.0, shape=[outputmaps]), name='biases', dtype=dtype)
if graphcnn_option.WEIGHT_DECAY is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), graphcnn_option.WEIGHT_DECAY, name='biases' + '_loss')
tf.add_to_collection('losses', weight_decay)
biases = var
conv = tf.nn.conv2d(input, weights, [1, 1, 5, 1], padding='VALID')
conv = tf.nn.bias_add(conv, biases)
output = tf.nn.relu(conv, name=scope.name)
if graphcnn_option.SUMMARYWRITER:
tensor_name = output.op.name
tf.histogram_summary(tensor_name + '/activations', output)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(output))
with tf.variable_scope('norm1') as scope:
output = tf.nn.lrn(output, depth_radius=5, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=scope.name)
return output
|
DeepGraphCNNforTexts
|
positive
|
def do_init(self, args):
"""Initialize an empty repository"""
print('Initializing repository at "%s"' % args.repository.orig)
<DeepExtract>
if args.repository.proto == 'ssh':
repository = RemoteRepository(args.repository, create=True)
else:
repository = Repository(args.repository.path, create=True, exclusive=True)
repository._location = args.repository
repository = repository
</DeepExtract>
key = key_creator(repository, args)
manifest = Manifest(key, repository)
manifest.key = key
manifest.write()
repository.commit()
Cache(repository, key, manifest, warn_if_unencrypted=False)
return self.exit_code
|
def do_init(self, args):
"""Initialize an empty repository"""
print('Initializing repository at "%s"' % args.repository.orig)
if args.repository.proto == 'ssh':
repository = RemoteRepository(args.repository, create=True)
else:
repository = Repository(args.repository.path, create=True, exclusive=True)
repository._location = args.repository
repository = repository
key = key_creator(repository, args)
manifest = Manifest(key, repository)
manifest.key = key
manifest.write()
repository.commit()
Cache(repository, key, manifest, warn_if_unencrypted=False)
return self.exit_code
|
attic
|
positive
|
@filter_hook
def get_response(self):
<DeepExtract>
add = self.org_obj is None
change = self.org_obj is not None
new_context = {'form': self.form_obj, 'original': self.org_obj, 'show_delete': self.org_obj is not None, 'add': add, 'change': change, 'errors': self.get_error_list(), 'has_add_permission': self.has_add_permission(), 'has_view_permission': self.has_view_permission(), 'has_change_permission': self.has_change_permission(self.org_obj), 'has_delete_permission': self.has_delete_permission(self.org_obj), 'has_file_field': True, 'has_absolute_url': hasattr(self.model, 'get_absolute_url'), 'form_url': '', 'content_type_id': ContentType.objects.get_for_model(self.model).id, 'save_as': self.save_as, 'save_on_top': self.save_on_top}
new_context.update({'onclick_attrib': '', 'show_delete_link': new_context['has_delete_permission'] and (change or new_context['show_delete']), 'show_save_as_new': change and self.save_as, 'show_save_and_add_another': new_context['has_add_permission'] and (not self.save_as or add), 'show_save_and_continue': new_context['has_change_permission'], 'show_save': True})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url('delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
context = context
</DeepExtract>
context.update(self.kwargs or {})
return TemplateResponse(self.request, self.add_form_template or self.get_template_list('views/model_form.html'), context)
|
@filter_hook
def get_response(self):
add = self.org_obj is None
change = self.org_obj is not None
new_context = {'form': self.form_obj, 'original': self.org_obj, 'show_delete': self.org_obj is not None, 'add': add, 'change': change, 'errors': self.get_error_list(), 'has_add_permission': self.has_add_permission(), 'has_view_permission': self.has_view_permission(), 'has_change_permission': self.has_change_permission(self.org_obj), 'has_delete_permission': self.has_delete_permission(self.org_obj), 'has_file_field': True, 'has_absolute_url': hasattr(self.model, 'get_absolute_url'), 'form_url': '', 'content_type_id': ContentType.objects.get_for_model(self.model).id, 'save_as': self.save_as, 'save_on_top': self.save_on_top}
new_context.update({'onclick_attrib': '', 'show_delete_link': new_context['has_delete_permission'] and (change or new_context['show_delete']), 'show_save_as_new': change and self.save_as, 'show_save_and_add_another': new_context['has_add_permission'] and (not self.save_as or add), 'show_save_and_continue': new_context['has_change_permission'], 'show_save': True})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url('delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
context = context
context.update(self.kwargs or {})
return TemplateResponse(self.request, self.add_form_template or self.get_template_list('views/model_form.html'), context)
|
CTF_AWD_Platform
|
positive
|
def deserialize(self, data, hashmap={}):
"""
This method is called to reconstruct a ``Block`` when loading a saved JSON
file containing all relevant information to recreate the ``Scene`` with all
its items.
:param data: a Dictionary of essential information for reconstructing a ``Block``
:type data: OrderedDict, required
:param hashmap: a Dictionary for directly mapping the essential block parameters
to this instance of ``Block``, without having to individually map each parameter
:type hashmap: Dict, required
:return: True when completed successfully
:rtype: Boolean
"""
self.id = data['id']
try:
if self.block_type not in ['Connector', 'CONNECTOR']:
self.title = data['title']
self.inputsNum = data['inputsNum']
self.outputsNum = data['outputsNum']
self.width = data['width']
self.height = data['height']
try:
if data['flipped']:
self.flipped = data['flipped']
except KeyError:
pass
<DeepExtract>
self.grBlock.setPos(data['pos_x'], data['pos_y'])
</DeepExtract>
if self.inputs:
self.inputs[0].removeSockets('Input')
if self.outputs:
self.outputs[0].removeSockets('Output')
self.inputs = []
self.outputs = []
if self.block_type not in ['Connector', 'CONNECTOR']:
i = 0
for (paramName, paramVal) in data['parameters']:
if DEBUG:
print('----------------------')
if DEBUG:
print('Cautionary check')
if DEBUG:
print('current value:', [self.parameters[i][0], self.parameters[i][1], self.parameters[i][2]])
if DEBUG:
print('setting to value:', [paramName, self.parameters[i][1], paramVal])
self.parameters[i][0] = paramName
self.parameters[i][2] = paramVal
if self.block_type in ['SUBSYSTEM', 'OUTPORT', 'INPORT']:
if paramName == 'inport labels':
if paramVal:
self.input_names = [str(j) for j in paramVal]
if paramName == 'outport labels':
if paramVal:
self.output_names = [str(j) for j in paramVal]
i += 1
for (i, socket_data) in enumerate(data['inputs']):
try:
if self.input_names:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'], socket_label=self.input_names[i])
else:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
except (AttributeError, IndexError):
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
new_socket.deserialize(socket_data, hashmap)
self.inputs.append(new_socket)
<DeepExtract>
if self.block_type == 'PROD' or self.block_type == 'SUM' or self.block_type == 'Prod' or (self.block_type == 'Sum'):
for parameter in self.parameters:
if parameter[0] == 'ops' or parameter[0] == 'signs':
index = 0
for sign in parameter[2]:
self.inputs[index].socket_sign = sign
index += 1
</DeepExtract>
for (i, socket_data) in enumerate(data['outputs']):
try:
if self.output_names:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'], socket_label=self.output_names[i])
else:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
except (AttributeError, IndexError):
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
new_socket.deserialize(socket_data, hashmap)
self.outputs.append(new_socket)
if self.block_type not in ['Connector', 'CONNECTOR']:
if self.parameters:
<DeepExtract>
self.parameterWindow = ParamWindow(self)
self.parameterWindow.setVisible(self._param_visible)
self.window.addWidget(self.parameterWindow, 1, 10, 9, 1)
</DeepExtract>
return True
except (ValueError, NameError, IndexError):
print(f'error deserializing block [{self.block_type}::{self.title}] - maybe JSON file has old function parameters')
|
def deserialize(self, data, hashmap={}):
"""
This method is called to reconstruct a ``Block`` when loading a saved JSON
file containing all relevant information to recreate the ``Scene`` with all
its items.
:param data: a Dictionary of essential information for reconstructing a ``Block``
:type data: OrderedDict, required
:param hashmap: a Dictionary for directly mapping the essential block parameters
to this instance of ``Block``, without having to individually map each parameter
:type hashmap: Dict, required
:return: True when completed successfully
:rtype: Boolean
"""
self.id = data['id']
try:
if self.block_type not in ['Connector', 'CONNECTOR']:
self.title = data['title']
self.inputsNum = data['inputsNum']
self.outputsNum = data['outputsNum']
self.width = data['width']
self.height = data['height']
try:
if data['flipped']:
self.flipped = data['flipped']
except KeyError:
pass
self.grBlock.setPos(data['pos_x'], data['pos_y'])
if self.inputs:
self.inputs[0].removeSockets('Input')
if self.outputs:
self.outputs[0].removeSockets('Output')
self.inputs = []
self.outputs = []
if self.block_type not in ['Connector', 'CONNECTOR']:
i = 0
for (paramName, paramVal) in data['parameters']:
if DEBUG:
print('----------------------')
if DEBUG:
print('Cautionary check')
if DEBUG:
print('current value:', [self.parameters[i][0], self.parameters[i][1], self.parameters[i][2]])
if DEBUG:
print('setting to value:', [paramName, self.parameters[i][1], paramVal])
self.parameters[i][0] = paramName
self.parameters[i][2] = paramVal
if self.block_type in ['SUBSYSTEM', 'OUTPORT', 'INPORT']:
if paramName == 'inport labels':
if paramVal:
self.input_names = [str(j) for j in paramVal]
if paramName == 'outport labels':
if paramVal:
self.output_names = [str(j) for j in paramVal]
i += 1
for (i, socket_data) in enumerate(data['inputs']):
try:
if self.input_names:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'], socket_label=self.input_names[i])
else:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
except (AttributeError, IndexError):
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
new_socket.deserialize(socket_data, hashmap)
self.inputs.append(new_socket)
if self.block_type == 'PROD' or self.block_type == 'SUM' or self.block_type == 'Prod' or (self.block_type == 'Sum'):
for parameter in self.parameters:
if parameter[0] == 'ops' or parameter[0] == 'signs':
index = 0
for sign in parameter[2]:
self.inputs[index].socket_sign = sign
index += 1
for (i, socket_data) in enumerate(data['outputs']):
try:
if self.output_names:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'], socket_label=self.output_names[i])
else:
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
except (AttributeError, IndexError):
new_socket = Socket(node=self, index=socket_data['index'], position=socket_data['position'], socket_type=socket_data['socket_type'])
new_socket.deserialize(socket_data, hashmap)
self.outputs.append(new_socket)
if self.block_type not in ['Connector', 'CONNECTOR']:
if self.parameters:
self.parameterWindow = ParamWindow(self)
self.parameterWindow.setVisible(self._param_visible)
self.window.addWidget(self.parameterWindow, 1, 10, 9, 1)
return True
except (ValueError, NameError, IndexError):
print(f'error deserializing block [{self.block_type}::{self.title}] - maybe JSON file has old function parameters')
|
bdsim
|
positive
|
def sanitize_guanidine(mol):
for atom in mol.GetAtoms():
<DeepExtract>
if atom.GetSymbol() == 'C':
terminal_NR2s = get_terminal_NR2s(atom)
mol = atom.GetOwningMol()
if len(terminal_NR2s) == 3:
if verbose:
print('Guanidyl group detected, sanitizing it...')
bond1 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[0].GetIdx())
bond1.SetBondType(Chem.rdchem.BondType.SINGLE)
terminal_NR2s[0].SetFormalCharge(-1)
bond2 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[1].GetIdx())
bond2.SetBondType(Chem.rdchem.BondType.SINGLE)
terminal_NR2s[1].SetFormalCharge(0)
bond3 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[2].GetIdx())
bond3.SetBondType(Chem.rdchem.BondType.DOUBLE)
terminal_NR2s[2].SetFormalCharge(1)
</DeepExtract>
return mol
|
def sanitize_guanidine(mol):
for atom in mol.GetAtoms():
if atom.GetSymbol() == 'C':
terminal_NR2s = get_terminal_NR2s(atom)
mol = atom.GetOwningMol()
if len(terminal_NR2s) == 3:
if verbose:
print('Guanidyl group detected, sanitizing it...')
bond1 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[0].GetIdx())
bond1.SetBondType(Chem.rdchem.BondType.SINGLE)
terminal_NR2s[0].SetFormalCharge(-1)
bond2 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[1].GetIdx())
bond2.SetBondType(Chem.rdchem.BondType.SINGLE)
terminal_NR2s[1].SetFormalCharge(0)
bond3 = mol.GetBondBetweenAtoms(atom.GetIdx(), terminal_NR2s[2].GetIdx())
bond3.SetBondType(Chem.rdchem.BondType.DOUBLE)
terminal_NR2s[2].SetFormalCharge(1)
return mol
|
dpdata
|
positive
|
def is_oversized_changed_notification(message_type):
<DeepExtract>
if not message_type:
raise Exception('Error: ', 'messageType', 'is not defined')
return message_type
</DeepExtract>
return message_type == 'OversizedConfigurationItemChangeNotification'
|
def is_oversized_changed_notification(message_type):
if not message_type:
raise Exception('Error: ', 'messageType', 'is not defined')
return message_type
return message_type == 'OversizedConfigurationItemChangeNotification'
|
aws-deployment-framework
|
positive
|
def last_requestline(sent_data):
"""
Find the last line in sent_data that can be parsed with parse_requestline
"""
for line in reversed(sent_data):
try:
<DeepExtract>
methods = '|'.join(HttpBaseClass.METHODS)
m = re.match('(' + methods + ')\\s+(.*)\\s+HTTP/(1.[0|1])', decode_utf8(line), re.I)
if m:
return (m.group(1).upper(), m.group(2), m.group(3))
else:
raise ValueError('Not a Request-Line')
</DeepExtract>
except ValueError:
pass
else:
return line
|
def last_requestline(sent_data):
"""
Find the last line in sent_data that can be parsed with parse_requestline
"""
for line in reversed(sent_data):
try:
methods = '|'.join(HttpBaseClass.METHODS)
m = re.match('(' + methods + ')\\s+(.*)\\s+HTTP/(1.[0|1])', decode_utf8(line), re.I)
if m:
return (m.group(1).upper(), m.group(2), m.group(3))
else:
raise ValueError('Not a Request-Line')
except ValueError:
pass
else:
return line
|
Deadline
|
positive
|
def test_removed_execfile(self):
"""Builtin execfile is removed - use exec() and compile()."""
code = 'execfile("some_filename")'
<DeepExtract>
(_, after) = (up_to_version((3, 0)), from_version((3, 0)))
</DeepExtract>
<DeepExtract>
[] = sorted(listify([], [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, [], details)
</DeepExtract>
|
def test_removed_execfile(self):
"""Builtin execfile is removed - use exec() and compile()."""
code = 'execfile("some_filename")'
(_, after) = (up_to_version((3, 0)), from_version((3, 0)))
[] = sorted(listify([], [], str))
(error_type, error_msg) = NAMEERROR
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, [], details)
</DeepExtract>
|
DidYouMean-Python
|
positive
|
def run(self, args, options):
def setBorder(layer, width, color, colorClass):
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClass, color))
obj = fb.evaluateInputExpression(args[0])
depth = int(options.depth)
isMac = runtimeHelpers.isMacintoshArch()
color = options.color
assert color in self.colors, 'Color must be one of the following: {}'.format(' '.join(self.colors))
colorClassName = 'UIColor'
if isMac:
colorClassName = 'NSColor'
if viewHelpers.isView(obj):
prevLevel = 0
for (view, level) in viewHelpers.subviewsOfView(obj):
if level > depth:
break
if prevLevel != level:
<DeepExtract>
assert color in self.colors, '{} is not a supported color'.format(color)
color = self.colors[(self.colors.index(color) + 1) % len(self.colors)]
</DeepExtract>
prevLevel = level
layer = viewHelpers.convertToLayer(view)
<DeepExtract>
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, options.width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClassName, color))
</DeepExtract>
else:
assert depth <= 0, 'Recursive bordering is only supported for UIViews or NSViews'
layer = viewHelpers.convertToLayer(obj)
<DeepExtract>
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, options.width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClassName, color))
</DeepExtract>
lldb.debugger.HandleCommand('caflush')
|
def run(self, args, options):
def setBorder(layer, width, color, colorClass):
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClass, color))
obj = fb.evaluateInputExpression(args[0])
depth = int(options.depth)
isMac = runtimeHelpers.isMacintoshArch()
color = options.color
assert color in self.colors, 'Color must be one of the following: {}'.format(' '.join(self.colors))
colorClassName = 'UIColor'
if isMac:
colorClassName = 'NSColor'
if viewHelpers.isView(obj):
prevLevel = 0
for (view, level) in viewHelpers.subviewsOfView(obj):
if level > depth:
break
if prevLevel != level:
assert color in self.colors, '{} is not a supported color'.format(color)
color = self.colors[(self.colors.index(color) + 1) % len(self.colors)]
prevLevel = level
layer = viewHelpers.convertToLayer(view)
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, options.width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClassName, color))
else:
assert depth <= 0, 'Recursive bordering is only supported for UIViews or NSViews'
layer = viewHelpers.convertToLayer(obj)
fb.evaluateEffect('[%s setBorderWidth:(CGFloat)%s]' % (layer, options.width))
fb.evaluateEffect('[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]' % (layer, colorClassName, color))
lldb.debugger.HandleCommand('caflush')
|
chisel
|
positive
|
def forward(self, lrs):
"""
Args:
lrs: Input lr frames: (b, 7, 3, h, w).
Returns:
Tensor: SR frame: (b, 3, h, w).
"""
if self.adapt_official_weights:
lrs = lrs[:, [3, 0, 1, 2, 4, 5, 6], :, :, :]
(num_batches, num_lrs, _, h, w) = lrs.size()
<DeepExtract>
lrs = (lrs.view(-1, 3, h, w) - self.mean) / self.std
</DeepExtract>
lrs = lrs.view(num_batches, num_lrs, 3, h, w)
lr_ref = lrs[:, self.ref_idx, :, :, :]
lr_aligned = []
for i in range(7):
if i == self.ref_idx:
lr_aligned.append(lr_ref)
else:
lr_supp = lrs[:, i, :, :, :]
flow = self.spynet(lr_ref, lr_supp)
lr_aligned.append(flow_warp(lr_supp, flow.permute(0, 2, 3, 1)))
hr = torch.stack(lr_aligned, dim=1)
hr = hr.view(num_batches, -1, h, w)
hr = self.relu(self.conv_1(hr))
hr = self.relu(self.conv_2(hr))
hr = self.relu(self.conv_3(hr))
hr = self.conv_4(hr) + lr_ref
return self.denormalize(hr)
|
def forward(self, lrs):
"""
Args:
lrs: Input lr frames: (b, 7, 3, h, w).
Returns:
Tensor: SR frame: (b, 3, h, w).
"""
if self.adapt_official_weights:
lrs = lrs[:, [3, 0, 1, 2, 4, 5, 6], :, :, :]
(num_batches, num_lrs, _, h, w) = lrs.size()
lrs = (lrs.view(-1, 3, h, w) - self.mean) / self.std
lrs = lrs.view(num_batches, num_lrs, 3, h, w)
lr_ref = lrs[:, self.ref_idx, :, :, :]
lr_aligned = []
for i in range(7):
if i == self.ref_idx:
lr_aligned.append(lr_ref)
else:
lr_supp = lrs[:, i, :, :, :]
flow = self.spynet(lr_ref, lr_supp)
lr_aligned.append(flow_warp(lr_supp, flow.permute(0, 2, 3, 1)))
hr = torch.stack(lr_aligned, dim=1)
hr = hr.view(num_batches, -1, h, w)
hr = self.relu(self.conv_1(hr))
hr = self.relu(self.conv_2(hr))
hr = self.relu(self.conv_3(hr))
hr = self.conv_4(hr) + lr_ref
return self.denormalize(hr)
|
BasicSR
|
positive
|
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
"""
Copy from [insightface](https://github.com/deepinsight/insightface)
:param thresholds:
:param embeddings1:
:param embeddings2:
:param actual_issame:
:param far_target:
:param nrof_folds:
:return:
"""
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for (fold_idx, (train_set, test_set)) in enumerate(k_fold.split(indices)):
far_train = np.zeros(nrof_thresholds)
for (threshold_idx, threshold) in enumerate(thresholds):
<DeepExtract>
predict_issame = np.less(dist[train_set], threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame[train_set]))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame[train_set])))
n_same = np.sum(actual_issame[train_set])
n_diff = np.sum(np.logical_not(actual_issame[train_set]))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
(_, far_train[threshold_idx]) = (val, far)
</DeepExtract>
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
<DeepExtract>
predict_issame = np.less(dist[test_set], threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame[test_set]))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame[test_set])))
n_same = np.sum(actual_issame[test_set])
n_diff = np.sum(np.logical_not(actual_issame[test_set]))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
(val[fold_idx], far[fold_idx]) = (val, far)
</DeepExtract>
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return (val_mean, val_std, far_mean)
|
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
"""
Copy from [insightface](https://github.com/deepinsight/insightface)
:param thresholds:
:param embeddings1:
:param embeddings2:
:param actual_issame:
:param far_target:
:param nrof_folds:
:return:
"""
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for (fold_idx, (train_set, test_set)) in enumerate(k_fold.split(indices)):
far_train = np.zeros(nrof_thresholds)
for (threshold_idx, threshold) in enumerate(thresholds):
predict_issame = np.less(dist[train_set], threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame[train_set]))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame[train_set])))
n_same = np.sum(actual_issame[train_set])
n_diff = np.sum(np.logical_not(actual_issame[train_set]))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
(_, far_train[threshold_idx]) = (val, far)
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
predict_issame = np.less(dist[test_set], threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame[test_set]))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame[test_set])))
n_same = np.sum(actual_issame[test_set])
n_diff = np.sum(np.logical_not(actual_issame[test_set]))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
(val[fold_idx], far[fold_idx]) = (val, far)
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return (val_mean, val_std, far_mean)
|
cavaface.pytorch
|
positive
|
def _handle_request(self, data, user, request):
form = forms.ApiFinnaNearestPhotosForm(data)
if form.is_valid():
lon = round(form.cleaned_data['longitude'], 4)
lat = round(form.cleaned_data['latitude'], 4)
query = form.cleaned_data['query'] or ''
album = form.cleaned_data['album'] or ''
if query == '':
distance = 0.1
else:
distance = 100000
if album == 'signebrander':
return self._get_signe_results(lat, lon, query, user, request)
photos = []
<DeepExtract>
finna_filters = ['free_online_boolean:"1"', '~format:"0/Place/"', '~format:"0/Image/"', '~usage_rights_str_mv:"usage_B"', '-topic_facet:"ilmakuvat"', '-topic_facet:"arkeologia"', '-topic_facet:"lentokoneet"', '-topic_facet:"ilmakulkuneuvot lentokoneet"', '-author_facet:"Koivisto Andreas"', '-author_facet:"Kauppinen Anne"', '{!geofilt sfield=location_geo pt=%f,%f d=%f}' % (lat, lon, distance)]
if album == '1918':
finna_filters.append('~era_facet:"1918"')
finna_result_json = requests.get(self.search_url, {'lookfor': query, 'type': 'AllFields', 'limit': self.page_size, 'lng': 'en-gb', 'streetsearch': 1, 'field[]': ['id', 'title', 'images', 'imageRights', 'authors', 'source', 'geoLocations', 'recordPage', 'year', 'summary', 'rawData'], 'filter[]': finna_filters})
finna_result = finna_result_json.json()
if 'records' in finna_result:
records = finna_result['records']
elif distance < 1000:
records = self._get_finna_results(lat, lon, query, album, distance * 100)
else:
records = []
</DeepExtract>
for p in records:
comma = ', '
authors = []
if 'authors' in p:
if p['authors']['primary']:
for (k, each) in p['authors']['primary'].items():
authors.append(k)
if 'geoLocations' in p:
for each in p['geoLocations']:
if 'POINT' in each:
point_parts = each.split(' ')
lon = point_parts[0][6:]
lat = point_parts[1][:-1]
try:
p['longitude'] = float(lon)
p['latitude'] = float(lat)
break
except:
p['longitude'] = None
p['latitude'] = None
ir_description = ''
image_rights = p.get('imageRights', None)
if image_rights:
ir_description = image_rights.get('description')
title = p.get('title', '')
if 'rawData' in p and 'title_alt' in p['rawData']:
title_alt = next(iter(p['rawData']['title_alt'] or []), None)
if title < title_alt:
title = title_alt
elif 'rawData' in p and 'geographic' in p['rawData']:
title_geo = next(iter(p['rawData']['geographic'] or []), None)
if title_geo:
title = f'{title} ; {title_geo}'
year = p.get('year', None)
if year:
title = '%s (%s)' % (title, p.get('year', ''))
photo = {'id': 'https://www.finna.fi%s' % p.get('recordPage', None), 'image': 'https://www.finna.fi/Cover/Show?id=%s' % quote(p.get('id', None)), 'height': 768, 'width': 583, 'title': title, 'date': p.get('year', None), 'author': comma.join(authors), 'source': {'url': 'https://www.finna.fi%s' % p.get('recordPage', None), 'name': ir_description}, 'rephotos': [], 'favorited': False}
photos.append(photo)
return Response({'error': RESPONSE_STATUSES['OK'], 'photos': photos})
else:
return Response({'error': RESPONSE_STATUSES['INVALID_PARAMETERS'], 'foo': 'bar', 'photos': []})
|
def _handle_request(self, data, user, request):
form = forms.ApiFinnaNearestPhotosForm(data)
if form.is_valid():
lon = round(form.cleaned_data['longitude'], 4)
lat = round(form.cleaned_data['latitude'], 4)
query = form.cleaned_data['query'] or ''
album = form.cleaned_data['album'] or ''
if query == '':
distance = 0.1
else:
distance = 100000
if album == 'signebrander':
return self._get_signe_results(lat, lon, query, user, request)
photos = []
finna_filters = ['free_online_boolean:"1"', '~format:"0/Place/"', '~format:"0/Image/"', '~usage_rights_str_mv:"usage_B"', '-topic_facet:"ilmakuvat"', '-topic_facet:"arkeologia"', '-topic_facet:"lentokoneet"', '-topic_facet:"ilmakulkuneuvot lentokoneet"', '-author_facet:"Koivisto Andreas"', '-author_facet:"Kauppinen Anne"', '{!geofilt sfield=location_geo pt=%f,%f d=%f}' % (lat, lon, distance)]
if album == '1918':
finna_filters.append('~era_facet:"1918"')
finna_result_json = requests.get(self.search_url, {'lookfor': query, 'type': 'AllFields', 'limit': self.page_size, 'lng': 'en-gb', 'streetsearch': 1, 'field[]': ['id', 'title', 'images', 'imageRights', 'authors', 'source', 'geoLocations', 'recordPage', 'year', 'summary', 'rawData'], 'filter[]': finna_filters})
finna_result = finna_result_json.json()
if 'records' in finna_result:
records = finna_result['records']
elif distance < 1000:
records = self._get_finna_results(lat, lon, query, album, distance * 100)
else:
records = []
for p in records:
comma = ', '
authors = []
if 'authors' in p:
if p['authors']['primary']:
for (k, each) in p['authors']['primary'].items():
authors.append(k)
if 'geoLocations' in p:
for each in p['geoLocations']:
if 'POINT' in each:
point_parts = each.split(' ')
lon = point_parts[0][6:]
lat = point_parts[1][:-1]
try:
p['longitude'] = float(lon)
p['latitude'] = float(lat)
break
except:
p['longitude'] = None
p['latitude'] = None
ir_description = ''
image_rights = p.get('imageRights', None)
if image_rights:
ir_description = image_rights.get('description')
title = p.get('title', '')
if 'rawData' in p and 'title_alt' in p['rawData']:
title_alt = next(iter(p['rawData']['title_alt'] or []), None)
if title < title_alt:
title = title_alt
elif 'rawData' in p and 'geographic' in p['rawData']:
title_geo = next(iter(p['rawData']['geographic'] or []), None)
if title_geo:
title = f'{title} ; {title_geo}'
year = p.get('year', None)
if year:
title = '%s (%s)' % (title, p.get('year', ''))
photo = {'id': 'https://www.finna.fi%s' % p.get('recordPage', None), 'image': 'https://www.finna.fi/Cover/Show?id=%s' % quote(p.get('id', None)), 'height': 768, 'width': 583, 'title': title, 'date': p.get('year', None), 'author': comma.join(authors), 'source': {'url': 'https://www.finna.fi%s' % p.get('recordPage', None), 'name': ir_description}, 'rephotos': [], 'favorited': False}
photos.append(photo)
return Response({'error': RESPONSE_STATUSES['OK'], 'photos': photos})
else:
return Response({'error': RESPONSE_STATUSES['INVALID_PARAMETERS'], 'foo': 'bar', 'photos': []})
|
ajapaik-web
|
positive
|
def test_zero(self):
<DeepExtract>
return super(Any, self).assert_match(__unit__.Any(), 0)
</DeepExtract>
<DeepExtract>
return super(Matching, self).assert_no_match(__unit__.Matching(self.ODD), 0)
</DeepExtract>
|
def test_zero(self):
return super(Any, self).assert_match(__unit__.Any(), 0)
return super(Matching, self).assert_no_match(__unit__.Matching(self.ODD), 0)
</DeepExtract>
|
callee
|
positive
|
def createAppResult(self, Id, name, desc, samples=None, appSessionId=None):
"""
Create an AppResult object.
:param Id: The id of the project in which the AppResult is to be added
:param name: The name of the AppResult
:param desc: A description of the AppResult
:param samples: (Optional) A list of one or more Samples Ids that the AppResult is related to
:param appSessionId: (Optional) If no appSessionId is given, the id used to initialize the BaseSpaceAPI instance will be used. If appSessionId is set equal to an empty string, a new appsession will be created for the appresult object
:raises Exception: when attempting to create AppResult in an AppSession that has a status other than 'running'.
:returns: a newly created AppResult instance
"""
if not self.appSessionId and appSessionId == None:
raise Exception('This BaseSpaceAPI instance has no appSessionId set and no alternative id was supplied for method createAppResult')
if samples is None:
samples = []
resourcePath = '/projects/{ProjectId}/appresults'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
resourcePath = resourcePath.replace('{ProjectId}', Id)
queryParams = {}
headerParams = {}
postData = {}
if appSessionId:
queryParams['appsessionid'] = appSessionId
if appSessionId == None:
queryParams['appsessionid'] = self.appSessionId
if len(samples):
ref = []
for s in samples:
d = {'Rel': 'using', 'Type': 'Sample', 'HrefContent': self.version + '/samples/' + s.Id}
ref.append(d)
postData['References'] = ref
if queryParams.has_key('appsessionid'):
<DeepExtract>
if queryParams['appsessionid'] is None:
queryParams['appsessionid'] = self.appSessionId
if not queryParams['appsessionid']:
raise AppSessionException('An AppSession Id is required')
resourcePath = '/appsessions/{AppSessionId}'
resourcePath = resourcePath.replace('{AppSessionId}', queryParams['appsessionid'])
method = 'GET'
headerParams = {}
queryParams = {}
session = self.__singleRequest__(AppSessionResponse.AppSessionResponse, resourcePath, method, queryParams, headerParams)
</DeepExtract>
if not session.canWorkOn():
raise Exception('AppSession status must be "running," to create an AppResults. Current status is ' + session.Status)
postData['Name'] = name
postData['Description'] = desc
return self.__singleRequest__(AppResultResponse.AppResultResponse, resourcePath, method, queryParams, headerParams, postData=postData)
|
def createAppResult(self, Id, name, desc, samples=None, appSessionId=None):
"""
Create an AppResult object.
:param Id: The id of the project in which the AppResult is to be added
:param name: The name of the AppResult
:param desc: A description of the AppResult
:param samples: (Optional) A list of one or more Samples Ids that the AppResult is related to
:param appSessionId: (Optional) If no appSessionId is given, the id used to initialize the BaseSpaceAPI instance will be used. If appSessionId is set equal to an empty string, a new appsession will be created for the appresult object
:raises Exception: when attempting to create AppResult in an AppSession that has a status other than 'running'.
:returns: a newly created AppResult instance
"""
if not self.appSessionId and appSessionId == None:
raise Exception('This BaseSpaceAPI instance has no appSessionId set and no alternative id was supplied for method createAppResult')
if samples is None:
samples = []
resourcePath = '/projects/{ProjectId}/appresults'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
resourcePath = resourcePath.replace('{ProjectId}', Id)
queryParams = {}
headerParams = {}
postData = {}
if appSessionId:
queryParams['appsessionid'] = appSessionId
if appSessionId == None:
queryParams['appsessionid'] = self.appSessionId
if len(samples):
ref = []
for s in samples:
d = {'Rel': 'using', 'Type': 'Sample', 'HrefContent': self.version + '/samples/' + s.Id}
ref.append(d)
postData['References'] = ref
if queryParams.has_key('appsessionid'):
if queryParams['appsessionid'] is None:
queryParams['appsessionid'] = self.appSessionId
if not queryParams['appsessionid']:
raise AppSessionException('An AppSession Id is required')
resourcePath = '/appsessions/{AppSessionId}'
resourcePath = resourcePath.replace('{AppSessionId}', queryParams['appsessionid'])
method = 'GET'
headerParams = {}
queryParams = {}
session = self.__singleRequest__(AppSessionResponse.AppSessionResponse, resourcePath, method, queryParams, headerParams)
if not session.canWorkOn():
raise Exception('AppSession status must be "running," to create an AppResults. Current status is ' + session.Status)
postData['Name'] = name
postData['Description'] = desc
return self.__singleRequest__(AppResultResponse.AppResultResponse, resourcePath, method, queryParams, headerParams, postData=postData)
|
basespace-python-sdk
|
positive
|
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
(batch, source_l, dim) = memory_bank.size()
(batch_, target_l, dim_) = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
(batch_, source_l_) = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank = memory_bank + self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
<DeepExtract>
(src_batch, src_len, src_dim) = memory_bank.size()
(tgt_batch, tgt_len, tgt_dim) = source.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = source.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
source = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = memory_bank.transpose(1, 2)
align = torch.bmm(source, h_s_)
else:
dim = self.dim
wq = self.linear_query(source.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(memory_bank.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
align = self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
</DeepExtract>
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1)
align.masked_fill_(1 - mask, -float('inf'))
if self.attn_func == 'softmax':
align_vectors = F.softmax(align.view(batch * target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch * target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ['general', 'dot']:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
(batch_, dim_) = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
(batch_, source_l_) = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
(target_l_, batch_, dim_) = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
(target_l_, batch_, source_l_) = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return (attn_h, align_vectors)
|
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (`FloatTensor`): query vectors `[batch x tgt_len x dim]`
memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`
memory_lengths (`LongTensor`): the source context lengths `[batch]`
coverage (`FloatTensor`): None (not supported yet)
Returns:
(`FloatTensor`, `FloatTensor`):
* Computed vector `[tgt_len x batch x dim]`
* Attention distribtutions for each query
`[tgt_len x batch x src_len]`
"""
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
(batch, source_l, dim) = memory_bank.size()
(batch_, target_l, dim_) = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
(batch_, source_l_) = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank = memory_bank + self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
(src_batch, src_len, src_dim) = memory_bank.size()
(tgt_batch, tgt_len, tgt_dim) = source.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = source.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
source = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = memory_bank.transpose(1, 2)
align = torch.bmm(source, h_s_)
else:
dim = self.dim
wq = self.linear_query(source.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(memory_bank.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = torch.tanh(wq + uh)
align = self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1)
align.masked_fill_(1 - mask, -float('inf'))
if self.attn_func == 'softmax':
align_vectors = F.softmax(align.view(batch * target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch * target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view(batch * target_l, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ['general', 'dot']:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
(batch_, dim_) = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
(batch_, source_l_) = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
(target_l_, batch_, dim_) = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
(target_l_, batch_, source_l_) = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return (attn_h, align_vectors)
|
ExHiRD-DKG
|
positive
|
def _remove_callable(callback):
for reference in self._callbacks_refs:
if not isinstance(reference, tuple):
if reference() == callback:
<DeepExtract>
for ref in [reference]:
self._callbacks_refs.remove(ref)
</DeepExtract>
break
|
def _remove_callable(callback):
for reference in self._callbacks_refs:
if not isinstance(reference, tuple):
if reference() == callback:
for ref in [reference]:
self._callbacks_refs.remove(ref)
break
|
core
|
positive
|
def _str2time(day, mon, yr, hr, min, sec, tz):
try:
mon = months_lower.index(mon.lower()) + 1
except ValueError:
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
if hr is None:
hr = 0
if min is None:
min = 0
if sec is None:
sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0:
yr = yr + 100
else:
yr = yr - 100
<DeepExtract>
(year, month, mday, hour, min, sec) = (yr, mon, day, hr, min, sec, tz)[:6]
if year >= EPOCH and 1 <= month <= 12 and (1 <= mday <= 31) and (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61):
t = timegm((yr, mon, day, hr, min, sec, tz))
else:
t = None
</DeepExtract>
if t is not None:
if tz is None:
tz = 'UTC'
tz = tz.upper()
<DeepExtract>
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
offset = offset
</DeepExtract>
if offset is None:
return None
t = t - offset
return t
|
def _str2time(day, mon, yr, hr, min, sec, tz):
try:
mon = months_lower.index(mon.lower()) + 1
except ValueError:
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
if hr is None:
hr = 0
if min is None:
min = 0
if sec is None:
sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0:
yr = yr + 100
else:
yr = yr - 100
(year, month, mday, hour, min, sec) = (yr, mon, day, hr, min, sec, tz)[:6]
if year >= EPOCH and 1 <= month <= 12 and (1 <= mday <= 31) and (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61):
t = timegm((yr, mon, day, hr, min, sec, tz))
else:
t = None
if t is not None:
if tz is None:
tz = 'UTC'
tz = tz.upper()
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
offset = offset
if offset is None:
return None
t = t - offset
return t
|
BruteXSS
|
positive
|
def __init__(self, model, statistics_calc, backend, simulations_net=None, parameters_net=None, embedding_dimension=None, n_samples=1000, n_samples_val=0, parameters=None, simulations=None, parameters_val=None, simulations_val=None, lower_bound_simulations=None, upper_bound_simulations=None, sliced=True, noise_type='radermacher', variance_reduction=False, n_epochs=100, batch_size=16, scale_samples=True, scale_parameters=False, early_stopping=False, epochs_early_stopping_interval=1, start_epoch_early_stopping=10, cuda=None, load_all_data_GPU=False, seed=None, nonlinearity_simulations=None, nonlinearity_parameters=None, batch_norm=True, batch_norm_momentum=0.1, batch_norm_update_before_test=False, lr_simulations=0.001, lr_parameters=0.001, lam=0.0, optimizer_simulations=None, optimizer_parameters=None, scheduler_simulations=None, scheduler_parameters=None, start_epoch_training=0, optimizer_simulations_kwargs={}, optimizer_parameters_kwargs={}, scheduler_simulations_kwargs={}, scheduler_parameters_kwargs={}, use_tqdm=True):
"""
Parameters
----------
model: abcpy.models.Model
Model object that conforms to the Model class.
statistics_calc: abcpy.statistics.Statistics
Statistics object that conforms to the Statistics class, applied before learning the transformation.
backend: abcpy.backends.Backend
Backend object that conforms to the Backend class.
simulations_net: torch.nn object or list, optional
The neural network which transforms the simulations to the summary statistics of the exponential family.
At the end of the training routine, the output of `simulations_net` (except for the last component)
will be the learned summary statistics.
It can be a torch.nn object with input size corresponding to size of model output
(after being transformed by `statistics_calc`) or, alternatively, a list
with integer numbers denoting the width of the hidden layers, from which a fully connected network with
that structure is created, having the input size corresponding to size of model output
(after being transformed by `statistics_calc`) and the output size determined by
`embedding_dimension` (see below). Importantly, the output size of `simulations_net` needs to be equal to
the output size of `parameters_net` increased by one, as the two are used together in the code. If both nets
are left to their default values, this is done automatically.
In case this is None, a fully connected neural network with three
hidden layers is used; the width of the hidden layers is given by
``[int(input_size * 1.5), int(input_size * 0.75 + output_size * 3), int(output_size * 5)]``,
where `input_size` is the size of the data after being transformed by `statistics_calc`, while
`output_size` is determined by `embedding_dimension`. For further details check
:func:`abcpy.NN_utilities.networks.createDefaultNN`. By default, this is None.
parameters_net: torch.nn object or list, optional
The neural network which maps the parameters to the natural parametrization form of the exponential family.
It can be a torch.nn object with input size corresponding to the number of parameters
or, alternatively, a list
with integer numbers denoting the width of the hidden layers, from which a fully connected network with
that structure is created, having the input size corresponding to the number of parameters
and the output size determined by
`embedding_dimension` (see below). Importantly, the output size of `parameters_net` needs to be equal to
the output size of `simulations_net` decreased by one, as the two are used together in the code.
If both nets are left to their default values, this is done automatically.
In case this is None, a fully connected neural network with three
hidden layers is used; the width of the hidden layers is given by
``[int(input_size * 1.5), int(input_size * 0.75 + output_size * 3), int(output_size * 5)]``,
where `input_size` is the number of parameters, while `output_size` is determined by `embedding_dimension`.
For further details check
:func:`abcpy.NN_utilities.networks.createDefaultNN`. By default, this is None.
embedding_dimension: integer, optional
Size of the learned summary statistics if `simulations_net` is None or a list.
Specifically, in these cases
`simulations_net` is automatically created having output size `embedding_dimension + 1`, of which all but
the latter components will represent the learned summaries (the latter instead is a learned base measure).
If also `parameters_net` is None or a list, it will be automatically created with output size equal to
`embedding_dimension`. By default `embedding_dimension` is None, in which case it is fixed to the number
of parameters in the model.
n_samples: int, optional
The number of (parameter, simulated data) tuple to be generated to learn the summary statistics in pilot
step. The default value is 1000.
This is ignored if `simulations` and `parameters` are provided.
n_samples_val: int, optional
The number of (parameter, simulated data) tuple to be generated to be used as a validation set in the pilot
step. The default value is 0, which means no validation set is used.
This is ignored if `simulations_val` and `parameters_val` are provided.
parameters: array, optional
A numpy array with shape (n_samples, n_parameters) that is used, together with `simulations` to fit the
summary selection learning algorithm. It has to be provided together with `simulations`, in which case no
other simulations are performed to generate the training data. Default value is None.
simulations: array, optional
A numpy array with shape (n_samples, output_size) that is used, together with `parameters` to fit the
summary selection learning algorithm. It has to be provided together with `parameters`, in which case no
other simulations are performed to generate the training data. These are transformed by the
`statistics_calc` statistics before the learning step is done. Default value is None.
parameters_val: array, optional
A numpy array with shape (n_samples_val, n_parameters) that is used, together with `simulations_val` as a
validation set in the summary selection learning algorithm. It has to be provided together with
`simulations_val`, in which case no other simulations are performed to generate the validation set. Default
value is None.
simulations_val: array, optional
A numpy array with shape (n_samples_val, output_size) that is used, together with `parameters_val` as a
validation set in the summary selection learning algorithm. It has to be provided together with
`parameters_val`, in which case no other simulations are performed to generate the validation set. Default
value is None.
lower_bound_simulations: np.ndarray, optional
Array of the same length of the simulations on which the statistics will be learned (therefore, after
`statistics_calc` has been applied). It contains the lower bounds of the simulations, with each entry
being either None or a number. It works together with `upper_bound_simulations` to determine the
nonlinear transformation mapping the bounded space to an unbounded one: if both upper and lower
bounds for a given entry are None, no transformation is applied to that entry. If both of them are numbers,
a transformation mapping a compact domain to an unbounded one is applied. If instead the lower bound is a
number and the upper one is None, a transformation for lower bounded variables is applied. More details on
the transformations can be found at :class:`abcpy.transformers.BoundedVarTransformer`. By default,
`lower_bound_simulations` is None, in which case all variables are assumed to not be lower bounded.
upper_bound_simulations: np.ndarray, optional
Array of the same length of the simulations on which the statistics will be learned (therefore, after
`statistics_calc` has been applied). It contains the upper bounds of the simulations, with each entry
being either None or a number. It works together with `lower_bound_simulations` to determine the
nonlinear transformation mapping the bounded space to an unbounded one: if both upper and lower
bounds for a given entry are None, no transformation is applied to that entry. If both of them are numbers,
a transformation mapping a compact domain to an unbounded one is applied. If instead the lower bound is a
number and the upper one is None, a transformation for lower bounded variables is applied. More details on
the transformations can be found at :class:`abcpy.transformers.BoundedVarTransformer`. By default,
`upper_bound_simulations` is None, in which case all variables are assumed to not be upper bounded.
sliced: boolean, optional
If True, the exponential family is fit with the sliced Score Matching approach, which is a faster
(stochastic) version of Score Matching. If False, the full Score Matching approach is used. Default is True.
noise_type: basestring, optional
Denotes the noise type used in the sliced Score Matching version. It can be 'radermacher', 'gaussian' or
'sphere', with 'radermacher' being the default one. Ignored if `sliced=False`.
variance_reduction: boolean, optional
If True, use the variance reduction version of Sliced Score Matching (when that is used), which replaces a
term with its exact expectation over the noise distribution. Cannot be used when `noise=sphere`.
Default is False, ignored if `sliced=False`.
n_epochs: integer, optional
the number of epochs used for training the neural network. Default is 100
batch_size: integer, optional
the batch size used for training the neural network. Default is 16
scale_samples: boolean, optional
If True, the simulations are scaled to the (0,1) range before the transformation is learned (i.e., before
being fed to the neural network). This happens after the simulations have been transformed with
`statistics_calc` and after the (optional) nonlinear transformation governed by `lower_bound_simulations`
and `upper_bound_simulations` is applied. This relies on a wrapping of `sklearn.preprocessing.MinMaxScaler`.
The validation set will also be rescaled in the same fashion.
When calling the `get_statistics` and the `get_simulations_network` methods,
the network will be wrapped by :class:`abcpy.NN_utilities.networks.ScalerAndNet`; this automatically
takes care of transforming the data with the scaler before applying the neural network.
It is highly recommended to use a scaler, as neural networks are sensitive to the range of input data. A
case in which you may not want to use a scaler is timeseries data, as the scaler works independently on each
feature of the data.
Default value is True.
scale_parameters: boolean, optional
If True, the parameters are scaled to the (0,1) range before the natural parameters transformation
is learned (i.e., before being fed to the neural network).
This relies on a wrapping of `sklearn.preprocessing.MinMaxScaler`.
The validation set will also be rescaled in the same fashion.
When calling the `get_statistics` and the `get_parameters_network` methods,
the network will be wrapped by :class:`abcpy.NN_utilities.networks.ScalerAndNet`; this automatically
takes care of transforming the data with the scaler before applying the neural network.
For parameter, the scaler is not as critical as for simulations, as parameters usually have smaller ranges.
If however the different parameters differ by orderd of magnitude, using a scaler is recommended.
Default value is False.
early_stopping: boolean, optional
If True, the validation set (which needs to be either provided through the arguments `parameters_val` and
`simulations_val` or generated by setting `n_samples_val` to a value larger than 0) is used to early stop
the training of the neural network as soon as the loss on the validation set starts to increase. Default
value is False.
epochs_early_stopping_interval: integer, optional
The frequency at which the validation error is compared in order to decide whether to early stop the
training or not. Namely, if `epochs_early_stopping_interval=10`, early stopping can happen only at epochs
multiple of 10. Default value is 1.
start_epoch_early_stopping: integer, optional
The epoch after which early stopping can happen; in fact, as soon as training starts, there may be a
transient period in which the loss increases. Default value is 10.
cuda: boolean, optional
If cuda=None, it will select GPU if it is available. Or you can specify True to use GPU or False to use CPU
load_all_data_GPU: boolean, optional
If True and if we a GPU is used, the whole dataset is loaded on the GPU before training begins; this may
speed up training as it avoid transfer between CPU and GPU, but it is not guaranteed to do. Note that if the
dataset is not small enough, setting this to True causes things to crash if the dataset is too large.
Default to False, you should not rely too much on this.
seed: integer, optional
Optional initial seed for the random number generator. The default value is generated randomly.
nonlinearity_simulations: torch.nn class, optional
If the neural networks for the simulations is built automatically (ie when `simulations_net` is either a
list or None), then this is used nonlinearity. Default is `torch.nn.Softplus`. This is because the Score
Matching routine (when `sliced=False`) needs the output of the simulations net to have a non-zero second
derivative with respect to data, which does not happen when using the common ReLU nonlinearity.
nonlinearity_parameters: torch.nn class, optional
If the neural networks for the simulations is built automatically (ie when `parameters_net` is either a
list or None), then this is used nonlinearity. Default is `torch.nn.ReLU`.
batch_norm: boolean, optional
If True, a batch normalization layer is put on top of the parameters net when that is built automatically.
This improves the performance of the method as it reduces the degeneracy of the
(summary statistics) * (natural parameters) product. Default is True.
batch_norm_momentum: float, optional
Momentum value with which the batch estimates in the batch norm layer are updated at each batch; see
`torch.nn.BatchNorm1d` for more information. Default is 0.1. Ignored if `batch_norm` is False, or if
an actual `parameters_net` is provided.
batch_norm_update_before_test: boolean, optional
When using batch norm layer on the test set, the resulting test loss evaluation can be noisy as the
batch norm estimates change during the train phase. To reduce this issue, it is enough to perform a simple
forward pass of the full train set (without backprop or loss evaulation) before the testing phase is
started. Set `batch_norm_update_before_test=True` to do that. Default is False.
Ignored if `batch_norm` is False, if an actual `parameters_net` is provided, as well as if no test set
is present.
lr_simulations: float, optional
The learning rate to be used in the iterative training scheme for the simulations neural network.
Default to 1e-3.
lr_parameters: float, optional
The learning rate to be used in the iterative training scheme for the parameters neural network.
Default to 1e-3.
lam: float, optional
If the full Score Matching approach is used (ie `sliced=False`) this denotes the amount of
second derivative regularization added to the Score Matching loss in the way proposed in Kingma & LeCun
(2010). Defaul is 0, corresponding to no regularization.
optimizer_simulations: torch Optimizer class, optional
A torch Optimizer class, for instance `SGD` or `Adam`, to be used for the simulations network.
Default to `Adam`. Additional parameters may be passed through the `optimizer_simulations_kwargs` argument.
optimizer_parameters: torch Optimizer class, optional
A torch Optimizer class, for instance `SGD` or `Adam`, to be used for the parameters network.
Default to `Adam`. Additional parameters may be passed through the `optimizer_parameters_kwargs` argument.
scheduler_simulations: torch _LRScheduler class, optional
A torch _LRScheduler class, used to modify the learning rate across epochs for the simulations net.
By default, a :class:`torch.optim.lr_scheduler.ExponentialLR` scheduler with `gamma=0.99` is used.
Additional arguments may be passed through the `scheduler_simulations_kwargs` parameter.
scheduler_parameters: torch _LRScheduler class, optional
A torch _LRScheduler class, used to modify the learning rate across epochs for the parameters net.
By default, a :class:`torch.optim.lr_scheduler.ExponentialLR` scheduler with `gamma=0.99` is used.
Additional arguments may be passed through the `scheduler_parameters_kwargs` parameter.
start_epoch_training: integer, optional
If schedulers is used, for the first `start_epoch_training` epochs the scheduler is applied to modify
the learning rate without training the network. From then on, the training proceeds normally, applying both
the scheduler and the optimizer at each epoch. Default to 0.
optimizer_simulations_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the optimizer used for the simulations network.
optimizer_parameters_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the optimizer used for the parameters network.
scheduler_simulations_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the simulations scheduler.
scheduler_parameters_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the parameters scheduler.
use_tqdm : boolean, optional
Whether using tqdm or not to display progress. Defaults to True.
"""
self.logger = logging.getLogger(__name__)
self.scale_samples = scale_samples
self.scale_parameters = scale_parameters
self.sliced = sliced
if lower_bound_simulations is not None and (not hasattr(lower_bound_simulations, 'shape')):
raise RuntimeError('Provided lower bounds need to be a numpy array.')
if upper_bound_simulations is not None and (not hasattr(upper_bound_simulations, 'shape')):
raise RuntimeError('Provided upper bounds need to be a numpy array.')
if upper_bound_simulations is not None and lower_bound_simulations is not None and (lower_bound_simulations.shape != upper_bound_simulations.shape):
raise RuntimeError('Provided lower and upper bounds need to have same shape.')
if not has_torch:
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(self.__class__.__name__))
if seed is not None:
torch.manual_seed(seed)
if cuda is None:
cuda = torch.cuda.is_available()
elif cuda and (not torch.cuda.is_available()):
cuda = False
self.logger.warning('You requested to use GPU but no GPU is available! The computation will proceed on CPU.')
self.device = 'cuda' if cuda and torch.cuda.is_available() else 'cpu'
if self.device == 'cuda':
self.logger.debug('We are using GPU to train the network.')
else:
self.logger.debug('We are using CPU to train the network.')
super(ExponentialFamilyScoreMatching, self).__init__(model, statistics_calc, backend, n_samples, n_samples_val, 1, parameters, simulations, seed=seed, parameters_val=parameters_val, simulations_val=simulations_val)
self.has_val_set = hasattr(self, 'sample_parameters_val') and len(self.sample_parameters_val) > 0
self.logger.info('Learning of the transformation...')
(parameters, simulations) = (self.sample_parameters, self.sample_statistics)
if self.has_val_set:
(parameters_val, simulations_val) = (self.sample_parameters_val, self.sample_statistics_val)
else:
(parameters_val, simulations_val) = (None, None)
if lower_bound_simulations is None and upper_bound_simulations is None and (not scale_samples):
self.has_scaler_for_simulations = False
else:
self.has_scaler_for_simulations = True
if lower_bound_simulations is None:
lower_bound_simulations = np.array([None] * simulations.shape[1])
if upper_bound_simulations is None:
upper_bound_simulations = np.array([None] * simulations.shape[1])
self.scaler_simulations = BoundedVarScaler(lower_bound_simulations, upper_bound_simulations, rescale_transformed_vars=self.scale_samples).fit(simulations)
simulations = self.scaler_simulations.transform(simulations)
if self.has_val_set:
simulations_val = self.scaler_simulations.transform(simulations_val)
if self.scale_parameters:
self.scaler_parameters = MinMaxScaler().fit(parameters)
parameters = self.scaler_parameters.transform(parameters)
if self.has_val_set:
parameters_val = self.scaler_parameters.transform(parameters_val)
simulations = torch.tensor(simulations.astype('float32'), requires_grad=True)
parameters = torch.tensor(parameters.astype('float32'), requires_grad=False)
if self.has_val_set:
simulations_val = torch.tensor(simulations_val.astype('float32'), requires_grad=True)
parameters_val = torch.tensor(parameters_val.astype('float32'), requires_grad=False)
if embedding_dimension is None:
embedding_dimension = parameters.shape[1]
if isinstance(simulations_net, torch.nn.Module):
self.simulations_net = simulations_net
self.logger.debug('We use the provided neural network for the summary statistics')
elif isinstance(simulations_net, list) or simulations_net is None:
self.simulations_net = createDefaultNNWithDerivatives(input_size=simulations.shape[1], output_size=embedding_dimension + 1, hidden_sizes=simulations_net, nonlinearity=torch.nn.Softplus if nonlinearity_simulations is None else nonlinearity_simulations)()
self.logger.debug('We generate a default neural network for the summary statistics')
else:
raise RuntimeError("'simulations_net' needs to be either a torch.nn.Module, or a list, or None.")
if isinstance(parameters_net, torch.nn.Module):
self.parameters_net = parameters_net
self.logger.debug('We use the provided neural network for the parameters')
elif isinstance(parameters_net, list) or parameters_net is None:
self.parameters_net = createDefaultNN(input_size=parameters.shape[1], output_size=embedding_dimension, hidden_sizes=parameters_net, nonlinearity=torch.nn.ReLU() if nonlinearity_parameters is None else nonlinearity_parameters(), batch_norm_last_layer=batch_norm, batch_norm_last_layer_momentum=batch_norm_momentum)()
self.logger.debug('We generate a default neural network for the parameters')
else:
raise RuntimeError("'parameters_net' needs to be either a torch.nn.Module, or a list, or None.")
if cuda:
self.simulations_net.cuda()
self.parameters_net.cuda()
self.logger.debug('We now run the training routine')
<DeepExtract>
if self.sliced:
batch_steps = lambda samples, etas: self._single_sliced_score_matching(samples, etas, noise_type=noise_type, variance_reduction=variance_reduction)
else:
batch_steps = lambda samples, etas: self._batch_Fisher_div_with_c_x(samples, etas, lam=lam)
if load_all_data_GPU:
simulations = simulations.to(self.device)
if simulations_val is not None:
simulations_val = simulations_val.to(self.device)
parameters = parameters.to(self.device)
if parameters_val is not None:
parameters_val = parameters_val.to(self.device)
compute_test_loss = False
if parameters_val is not None and simulations_val is not None:
test_loss_list = []
compute_test_loss = True
n_theta_test = parameters_val.shape[0]
if optimizer_simulations is None:
optimizer_simulations = Adam(self.simulations_net.parameters(), lr=lr_simulations, **optimizer_simulations_kwargs)
else:
optimizer_simulations = optimizer_simulations(self.simulations_net.parameters(), lr=lr_simulations, **optimizer_simulations_kwargs)
if optimizer_parameters is None:
optimizer_parameters = Adam(self.parameters_net.parameters(), lr=lr_parameters, **optimizer_parameters_kwargs)
else:
optimizer_parameters = optimizer_parameters(self.parameters_net.parameters(), lr=lr_parameters, **optimizer_parameters_kwargs)
if batch_size is None:
batch_size = parameters.shape[0]
n_theta = parameters.shape[0]
loss_list = []
enable_scheduler_simulations = True
enable_scheduler_parameters = True
if scheduler_simulations is False:
enable_scheduler_simulations = False
else:
if scheduler_simulations is None:
scheduler_simulations = lr_scheduler.ExponentialLR
if len(scheduler_simulations_kwargs) == 0:
scheduler_simulations_kwargs = dict(gamma=0.99)
scheduler_simulations = scheduler_simulations(optimizer_simulations, **scheduler_simulations_kwargs)
if scheduler_parameters is False:
enable_scheduler_parameters = False
else:
if scheduler_parameters is None:
scheduler_parameters = lr_scheduler.ExponentialLR
if len(scheduler_parameters_kwargs) == 0:
scheduler_parameters_kwargs = dict(gamma=0.99)
scheduler_parameters = scheduler_parameters(optimizer_parameters, **scheduler_parameters_kwargs)
net_state_dict = None
net_state_dict_theta = None
for epoch in range(0, start_epoch_training):
if enable_scheduler_simulations:
scheduler_simulations.step()
if enable_scheduler_parameters:
scheduler_parameters.step()
for epoch in tqdm(range(start_epoch_training, n_epochs), disable=not use_tqdm):
self.simulations_net.train()
self.parameters_net.train()
indeces = self.rng.permutation(n_theta)
batch_index = 0
total_train_loss_epoch = 0
while batch_size * batch_index < n_theta:
optimizer_simulations.zero_grad()
optimizer_parameters.zero_grad()
batch_indeces = indeces[batch_size * batch_index:batch_size * (batch_index + 1)]
thetas_batch = parameters[batch_indeces].to(self.device)
etas = self.parameters_net(thetas_batch)
samples_batch = simulations[batch_indeces].to(self.device)
batch_loss = batch_steps(samples_batch, etas)
total_train_loss_epoch += batch_loss.item()
if lr_simulations == 0:
set_requires_grad(self.simulations_net, False)
if lr_parameters == 0:
set_requires_grad(self.parameters_net, False)
batch_loss.backward()
if lr_simulations == 0:
set_requires_grad(self.simulations_net, True)
if lr_parameters == 0:
set_requires_grad(self.parameters_net, True)
optimizer_simulations.step()
optimizer_parameters.step()
batch_index += 1
loss_list.append(total_train_loss_epoch / (batch_index + 1))
if compute_test_loss:
if batch_norm_update_before_test:
with torch.no_grad():
batch_index = 0
while batch_size * batch_index < n_theta:
thetas_batch = parameters[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
_ = self.parameters_net(thetas_batch)
batch_index += 1
self.simulations_net.eval()
self.parameters_net.eval()
batch_index = 0
total_test_loss_epoch = 0
while batch_size * batch_index < n_theta_test:
thetas_batch = parameters_val[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
samples_batch = simulations_val[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
etas_test = self.parameters_net(thetas_batch)
total_test_loss_epoch += batch_steps(samples_batch, etas_test).item()
batch_index += 1
test_loss_list.append(total_test_loss_epoch / (batch_index + 1))
if early_stopping and (epoch + 1) % epochs_early_stopping_interval == 0:
if epoch + 1 > start_epoch_early_stopping and net_state_dict is not None:
if test_loss_list[-1] > test_loss_list[-1 - epochs_early_stopping_interval]:
self.logger.info('Training has been early stopped at epoch {}.'.format(epoch + 1))
self.simulations_net.load_state_dict(net_state_dict)
self.parameters_net.load_state_dict(net_state_dict_theta)
break
net_state_dict = self.simulations_net.state_dict()
net_state_dict_theta = self.parameters_net.state_dict()
if enable_scheduler_simulations:
scheduler_simulations.step()
if enable_scheduler_parameters:
scheduler_parameters.step()
self.simulations_net.eval()
self.parameters_net.eval()
if compute_test_loss:
(self.train_losses, self.test_losses) = (loss_list, test_loss_list)
else:
(self.train_losses, self.test_losses) = (loss_list, None)
</DeepExtract>
self.logger.info('Finished learning the transformation.')
self.simulations_net.cpu()
self.parameters_net.cpu()
|
def __init__(self, model, statistics_calc, backend, simulations_net=None, parameters_net=None, embedding_dimension=None, n_samples=1000, n_samples_val=0, parameters=None, simulations=None, parameters_val=None, simulations_val=None, lower_bound_simulations=None, upper_bound_simulations=None, sliced=True, noise_type='radermacher', variance_reduction=False, n_epochs=100, batch_size=16, scale_samples=True, scale_parameters=False, early_stopping=False, epochs_early_stopping_interval=1, start_epoch_early_stopping=10, cuda=None, load_all_data_GPU=False, seed=None, nonlinearity_simulations=None, nonlinearity_parameters=None, batch_norm=True, batch_norm_momentum=0.1, batch_norm_update_before_test=False, lr_simulations=0.001, lr_parameters=0.001, lam=0.0, optimizer_simulations=None, optimizer_parameters=None, scheduler_simulations=None, scheduler_parameters=None, start_epoch_training=0, optimizer_simulations_kwargs={}, optimizer_parameters_kwargs={}, scheduler_simulations_kwargs={}, scheduler_parameters_kwargs={}, use_tqdm=True):
"""
Parameters
----------
model: abcpy.models.Model
Model object that conforms to the Model class.
statistics_calc: abcpy.statistics.Statistics
Statistics object that conforms to the Statistics class, applied before learning the transformation.
backend: abcpy.backends.Backend
Backend object that conforms to the Backend class.
simulations_net: torch.nn object or list, optional
The neural network which transforms the simulations to the summary statistics of the exponential family.
At the end of the training routine, the output of `simulations_net` (except for the last component)
will be the learned summary statistics.
It can be a torch.nn object with input size corresponding to size of model output
(after being transformed by `statistics_calc`) or, alternatively, a list
with integer numbers denoting the width of the hidden layers, from which a fully connected network with
that structure is created, having the input size corresponding to size of model output
(after being transformed by `statistics_calc`) and the output size determined by
`embedding_dimension` (see below). Importantly, the output size of `simulations_net` needs to be equal to
the output size of `parameters_net` increased by one, as the two are used together in the code. If both nets
are left to their default values, this is done automatically.
In case this is None, a fully connected neural network with three
hidden layers is used; the width of the hidden layers is given by
``[int(input_size * 1.5), int(input_size * 0.75 + output_size * 3), int(output_size * 5)]``,
where `input_size` is the size of the data after being transformed by `statistics_calc`, while
`output_size` is determined by `embedding_dimension`. For further details check
:func:`abcpy.NN_utilities.networks.createDefaultNN`. By default, this is None.
parameters_net: torch.nn object or list, optional
The neural network which maps the parameters to the natural parametrization form of the exponential family.
It can be a torch.nn object with input size corresponding to the number of parameters
or, alternatively, a list
with integer numbers denoting the width of the hidden layers, from which a fully connected network with
that structure is created, having the input size corresponding to the number of parameters
and the output size determined by
`embedding_dimension` (see below). Importantly, the output size of `parameters_net` needs to be equal to
the output size of `simulations_net` decreased by one, as the two are used together in the code.
If both nets are left to their default values, this is done automatically.
In case this is None, a fully connected neural network with three
hidden layers is used; the width of the hidden layers is given by
``[int(input_size * 1.5), int(input_size * 0.75 + output_size * 3), int(output_size * 5)]``,
where `input_size` is the number of parameters, while `output_size` is determined by `embedding_dimension`.
For further details check
:func:`abcpy.NN_utilities.networks.createDefaultNN`. By default, this is None.
embedding_dimension: integer, optional
Size of the learned summary statistics if `simulations_net` is None or a list.
Specifically, in these cases
`simulations_net` is automatically created having output size `embedding_dimension + 1`, of which all but
the latter components will represent the learned summaries (the latter instead is a learned base measure).
If also `parameters_net` is None or a list, it will be automatically created with output size equal to
`embedding_dimension`. By default `embedding_dimension` is None, in which case it is fixed to the number
of parameters in the model.
n_samples: int, optional
The number of (parameter, simulated data) tuple to be generated to learn the summary statistics in pilot
step. The default value is 1000.
This is ignored if `simulations` and `parameters` are provided.
n_samples_val: int, optional
The number of (parameter, simulated data) tuple to be generated to be used as a validation set in the pilot
step. The default value is 0, which means no validation set is used.
This is ignored if `simulations_val` and `parameters_val` are provided.
parameters: array, optional
A numpy array with shape (n_samples, n_parameters) that is used, together with `simulations` to fit the
summary selection learning algorithm. It has to be provided together with `simulations`, in which case no
other simulations are performed to generate the training data. Default value is None.
simulations: array, optional
A numpy array with shape (n_samples, output_size) that is used, together with `parameters` to fit the
summary selection learning algorithm. It has to be provided together with `parameters`, in which case no
other simulations are performed to generate the training data. These are transformed by the
`statistics_calc` statistics before the learning step is done. Default value is None.
parameters_val: array, optional
A numpy array with shape (n_samples_val, n_parameters) that is used, together with `simulations_val` as a
validation set in the summary selection learning algorithm. It has to be provided together with
`simulations_val`, in which case no other simulations are performed to generate the validation set. Default
value is None.
simulations_val: array, optional
A numpy array with shape (n_samples_val, output_size) that is used, together with `parameters_val` as a
validation set in the summary selection learning algorithm. It has to be provided together with
`parameters_val`, in which case no other simulations are performed to generate the validation set. Default
value is None.
lower_bound_simulations: np.ndarray, optional
Array of the same length of the simulations on which the statistics will be learned (therefore, after
`statistics_calc` has been applied). It contains the lower bounds of the simulations, with each entry
being either None or a number. It works together with `upper_bound_simulations` to determine the
nonlinear transformation mapping the bounded space to an unbounded one: if both upper and lower
bounds for a given entry are None, no transformation is applied to that entry. If both of them are numbers,
a transformation mapping a compact domain to an unbounded one is applied. If instead the lower bound is a
number and the upper one is None, a transformation for lower bounded variables is applied. More details on
the transformations can be found at :class:`abcpy.transformers.BoundedVarTransformer`. By default,
`lower_bound_simulations` is None, in which case all variables are assumed to not be lower bounded.
upper_bound_simulations: np.ndarray, optional
Array of the same length of the simulations on which the statistics will be learned (therefore, after
`statistics_calc` has been applied). It contains the upper bounds of the simulations, with each entry
being either None or a number. It works together with `lower_bound_simulations` to determine the
nonlinear transformation mapping the bounded space to an unbounded one: if both upper and lower
bounds for a given entry are None, no transformation is applied to that entry. If both of them are numbers,
a transformation mapping a compact domain to an unbounded one is applied. If instead the lower bound is a
number and the upper one is None, a transformation for lower bounded variables is applied. More details on
the transformations can be found at :class:`abcpy.transformers.BoundedVarTransformer`. By default,
`upper_bound_simulations` is None, in which case all variables are assumed to not be upper bounded.
sliced: boolean, optional
If True, the exponential family is fit with the sliced Score Matching approach, which is a faster
(stochastic) version of Score Matching. If False, the full Score Matching approach is used. Default is True.
noise_type: basestring, optional
Denotes the noise type used in the sliced Score Matching version. It can be 'radermacher', 'gaussian' or
'sphere', with 'radermacher' being the default one. Ignored if `sliced=False`.
variance_reduction: boolean, optional
If True, use the variance reduction version of Sliced Score Matching (when that is used), which replaces a
term with its exact expectation over the noise distribution. Cannot be used when `noise=sphere`.
Default is False, ignored if `sliced=False`.
n_epochs: integer, optional
the number of epochs used for training the neural network. Default is 100
batch_size: integer, optional
the batch size used for training the neural network. Default is 16
scale_samples: boolean, optional
If True, the simulations are scaled to the (0,1) range before the transformation is learned (i.e., before
being fed to the neural network). This happens after the simulations have been transformed with
`statistics_calc` and after the (optional) nonlinear transformation governed by `lower_bound_simulations`
and `upper_bound_simulations` is applied. This relies on a wrapping of `sklearn.preprocessing.MinMaxScaler`.
The validation set will also be rescaled in the same fashion.
When calling the `get_statistics` and the `get_simulations_network` methods,
the network will be wrapped by :class:`abcpy.NN_utilities.networks.ScalerAndNet`; this automatically
takes care of transforming the data with the scaler before applying the neural network.
It is highly recommended to use a scaler, as neural networks are sensitive to the range of input data. A
case in which you may not want to use a scaler is timeseries data, as the scaler works independently on each
feature of the data.
Default value is True.
scale_parameters: boolean, optional
If True, the parameters are scaled to the (0,1) range before the natural parameters transformation
is learned (i.e., before being fed to the neural network).
This relies on a wrapping of `sklearn.preprocessing.MinMaxScaler`.
The validation set will also be rescaled in the same fashion.
When calling the `get_statistics` and the `get_parameters_network` methods,
the network will be wrapped by :class:`abcpy.NN_utilities.networks.ScalerAndNet`; this automatically
takes care of transforming the data with the scaler before applying the neural network.
For parameter, the scaler is not as critical as for simulations, as parameters usually have smaller ranges.
If however the different parameters differ by orderd of magnitude, using a scaler is recommended.
Default value is False.
early_stopping: boolean, optional
If True, the validation set (which needs to be either provided through the arguments `parameters_val` and
`simulations_val` or generated by setting `n_samples_val` to a value larger than 0) is used to early stop
the training of the neural network as soon as the loss on the validation set starts to increase. Default
value is False.
epochs_early_stopping_interval: integer, optional
The frequency at which the validation error is compared in order to decide whether to early stop the
training or not. Namely, if `epochs_early_stopping_interval=10`, early stopping can happen only at epochs
multiple of 10. Default value is 1.
start_epoch_early_stopping: integer, optional
The epoch after which early stopping can happen; in fact, as soon as training starts, there may be a
transient period in which the loss increases. Default value is 10.
cuda: boolean, optional
If cuda=None, it will select GPU if it is available. Or you can specify True to use GPU or False to use CPU
load_all_data_GPU: boolean, optional
If True and if we a GPU is used, the whole dataset is loaded on the GPU before training begins; this may
speed up training as it avoid transfer between CPU and GPU, but it is not guaranteed to do. Note that if the
dataset is not small enough, setting this to True causes things to crash if the dataset is too large.
Default to False, you should not rely too much on this.
seed: integer, optional
Optional initial seed for the random number generator. The default value is generated randomly.
nonlinearity_simulations: torch.nn class, optional
If the neural networks for the simulations is built automatically (ie when `simulations_net` is either a
list or None), then this is used nonlinearity. Default is `torch.nn.Softplus`. This is because the Score
Matching routine (when `sliced=False`) needs the output of the simulations net to have a non-zero second
derivative with respect to data, which does not happen when using the common ReLU nonlinearity.
nonlinearity_parameters: torch.nn class, optional
If the neural networks for the simulations is built automatically (ie when `parameters_net` is either a
list or None), then this is used nonlinearity. Default is `torch.nn.ReLU`.
batch_norm: boolean, optional
If True, a batch normalization layer is put on top of the parameters net when that is built automatically.
This improves the performance of the method as it reduces the degeneracy of the
(summary statistics) * (natural parameters) product. Default is True.
batch_norm_momentum: float, optional
Momentum value with which the batch estimates in the batch norm layer are updated at each batch; see
`torch.nn.BatchNorm1d` for more information. Default is 0.1. Ignored if `batch_norm` is False, or if
an actual `parameters_net` is provided.
batch_norm_update_before_test: boolean, optional
When using batch norm layer on the test set, the resulting test loss evaluation can be noisy as the
batch norm estimates change during the train phase. To reduce this issue, it is enough to perform a simple
forward pass of the full train set (without backprop or loss evaulation) before the testing phase is
started. Set `batch_norm_update_before_test=True` to do that. Default is False.
Ignored if `batch_norm` is False, if an actual `parameters_net` is provided, as well as if no test set
is present.
lr_simulations: float, optional
The learning rate to be used in the iterative training scheme for the simulations neural network.
Default to 1e-3.
lr_parameters: float, optional
The learning rate to be used in the iterative training scheme for the parameters neural network.
Default to 1e-3.
lam: float, optional
If the full Score Matching approach is used (ie `sliced=False`) this denotes the amount of
second derivative regularization added to the Score Matching loss in the way proposed in Kingma & LeCun
(2010). Defaul is 0, corresponding to no regularization.
optimizer_simulations: torch Optimizer class, optional
A torch Optimizer class, for instance `SGD` or `Adam`, to be used for the simulations network.
Default to `Adam`. Additional parameters may be passed through the `optimizer_simulations_kwargs` argument.
optimizer_parameters: torch Optimizer class, optional
A torch Optimizer class, for instance `SGD` or `Adam`, to be used for the parameters network.
Default to `Adam`. Additional parameters may be passed through the `optimizer_parameters_kwargs` argument.
scheduler_simulations: torch _LRScheduler class, optional
A torch _LRScheduler class, used to modify the learning rate across epochs for the simulations net.
By default, a :class:`torch.optim.lr_scheduler.ExponentialLR` scheduler with `gamma=0.99` is used.
Additional arguments may be passed through the `scheduler_simulations_kwargs` parameter.
scheduler_parameters: torch _LRScheduler class, optional
A torch _LRScheduler class, used to modify the learning rate across epochs for the parameters net.
By default, a :class:`torch.optim.lr_scheduler.ExponentialLR` scheduler with `gamma=0.99` is used.
Additional arguments may be passed through the `scheduler_parameters_kwargs` parameter.
start_epoch_training: integer, optional
If schedulers is used, for the first `start_epoch_training` epochs the scheduler is applied to modify
the learning rate without training the network. From then on, the training proceeds normally, applying both
the scheduler and the optimizer at each epoch. Default to 0.
optimizer_simulations_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the optimizer used for the simulations network.
optimizer_parameters_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the optimizer used for the parameters network.
scheduler_simulations_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the simulations scheduler.
scheduler_parameters_kwargs: Python dictionary, optional
dictionary containing optional keyword arguments for the parameters scheduler.
use_tqdm : boolean, optional
Whether using tqdm or not to display progress. Defaults to True.
"""
self.logger = logging.getLogger(__name__)
self.scale_samples = scale_samples
self.scale_parameters = scale_parameters
self.sliced = sliced
if lower_bound_simulations is not None and (not hasattr(lower_bound_simulations, 'shape')):
raise RuntimeError('Provided lower bounds need to be a numpy array.')
if upper_bound_simulations is not None and (not hasattr(upper_bound_simulations, 'shape')):
raise RuntimeError('Provided upper bounds need to be a numpy array.')
if upper_bound_simulations is not None and lower_bound_simulations is not None and (lower_bound_simulations.shape != upper_bound_simulations.shape):
raise RuntimeError('Provided lower and upper bounds need to have same shape.')
if not has_torch:
raise ImportError('Pytorch is required to instantiate an element of the {} class, in order to handle neural networks. Please install it. '.format(self.__class__.__name__))
if seed is not None:
torch.manual_seed(seed)
if cuda is None:
cuda = torch.cuda.is_available()
elif cuda and (not torch.cuda.is_available()):
cuda = False
self.logger.warning('You requested to use GPU but no GPU is available! The computation will proceed on CPU.')
self.device = 'cuda' if cuda and torch.cuda.is_available() else 'cpu'
if self.device == 'cuda':
self.logger.debug('We are using GPU to train the network.')
else:
self.logger.debug('We are using CPU to train the network.')
super(ExponentialFamilyScoreMatching, self).__init__(model, statistics_calc, backend, n_samples, n_samples_val, 1, parameters, simulations, seed=seed, parameters_val=parameters_val, simulations_val=simulations_val)
self.has_val_set = hasattr(self, 'sample_parameters_val') and len(self.sample_parameters_val) > 0
self.logger.info('Learning of the transformation...')
(parameters, simulations) = (self.sample_parameters, self.sample_statistics)
if self.has_val_set:
(parameters_val, simulations_val) = (self.sample_parameters_val, self.sample_statistics_val)
else:
(parameters_val, simulations_val) = (None, None)
if lower_bound_simulations is None and upper_bound_simulations is None and (not scale_samples):
self.has_scaler_for_simulations = False
else:
self.has_scaler_for_simulations = True
if lower_bound_simulations is None:
lower_bound_simulations = np.array([None] * simulations.shape[1])
if upper_bound_simulations is None:
upper_bound_simulations = np.array([None] * simulations.shape[1])
self.scaler_simulations = BoundedVarScaler(lower_bound_simulations, upper_bound_simulations, rescale_transformed_vars=self.scale_samples).fit(simulations)
simulations = self.scaler_simulations.transform(simulations)
if self.has_val_set:
simulations_val = self.scaler_simulations.transform(simulations_val)
if self.scale_parameters:
self.scaler_parameters = MinMaxScaler().fit(parameters)
parameters = self.scaler_parameters.transform(parameters)
if self.has_val_set:
parameters_val = self.scaler_parameters.transform(parameters_val)
simulations = torch.tensor(simulations.astype('float32'), requires_grad=True)
parameters = torch.tensor(parameters.astype('float32'), requires_grad=False)
if self.has_val_set:
simulations_val = torch.tensor(simulations_val.astype('float32'), requires_grad=True)
parameters_val = torch.tensor(parameters_val.astype('float32'), requires_grad=False)
if embedding_dimension is None:
embedding_dimension = parameters.shape[1]
if isinstance(simulations_net, torch.nn.Module):
self.simulations_net = simulations_net
self.logger.debug('We use the provided neural network for the summary statistics')
elif isinstance(simulations_net, list) or simulations_net is None:
self.simulations_net = createDefaultNNWithDerivatives(input_size=simulations.shape[1], output_size=embedding_dimension + 1, hidden_sizes=simulations_net, nonlinearity=torch.nn.Softplus if nonlinearity_simulations is None else nonlinearity_simulations)()
self.logger.debug('We generate a default neural network for the summary statistics')
else:
raise RuntimeError("'simulations_net' needs to be either a torch.nn.Module, or a list, or None.")
if isinstance(parameters_net, torch.nn.Module):
self.parameters_net = parameters_net
self.logger.debug('We use the provided neural network for the parameters')
elif isinstance(parameters_net, list) or parameters_net is None:
self.parameters_net = createDefaultNN(input_size=parameters.shape[1], output_size=embedding_dimension, hidden_sizes=parameters_net, nonlinearity=torch.nn.ReLU() if nonlinearity_parameters is None else nonlinearity_parameters(), batch_norm_last_layer=batch_norm, batch_norm_last_layer_momentum=batch_norm_momentum)()
self.logger.debug('We generate a default neural network for the parameters')
else:
raise RuntimeError("'parameters_net' needs to be either a torch.nn.Module, or a list, or None.")
if cuda:
self.simulations_net.cuda()
self.parameters_net.cuda()
self.logger.debug('We now run the training routine')
if self.sliced:
batch_steps = lambda samples, etas: self._single_sliced_score_matching(samples, etas, noise_type=noise_type, variance_reduction=variance_reduction)
else:
batch_steps = lambda samples, etas: self._batch_Fisher_div_with_c_x(samples, etas, lam=lam)
if load_all_data_GPU:
simulations = simulations.to(self.device)
if simulations_val is not None:
simulations_val = simulations_val.to(self.device)
parameters = parameters.to(self.device)
if parameters_val is not None:
parameters_val = parameters_val.to(self.device)
compute_test_loss = False
if parameters_val is not None and simulations_val is not None:
test_loss_list = []
compute_test_loss = True
n_theta_test = parameters_val.shape[0]
if optimizer_simulations is None:
optimizer_simulations = Adam(self.simulations_net.parameters(), lr=lr_simulations, **optimizer_simulations_kwargs)
else:
optimizer_simulations = optimizer_simulations(self.simulations_net.parameters(), lr=lr_simulations, **optimizer_simulations_kwargs)
if optimizer_parameters is None:
optimizer_parameters = Adam(self.parameters_net.parameters(), lr=lr_parameters, **optimizer_parameters_kwargs)
else:
optimizer_parameters = optimizer_parameters(self.parameters_net.parameters(), lr=lr_parameters, **optimizer_parameters_kwargs)
if batch_size is None:
batch_size = parameters.shape[0]
n_theta = parameters.shape[0]
loss_list = []
enable_scheduler_simulations = True
enable_scheduler_parameters = True
if scheduler_simulations is False:
enable_scheduler_simulations = False
else:
if scheduler_simulations is None:
scheduler_simulations = lr_scheduler.ExponentialLR
if len(scheduler_simulations_kwargs) == 0:
scheduler_simulations_kwargs = dict(gamma=0.99)
scheduler_simulations = scheduler_simulations(optimizer_simulations, **scheduler_simulations_kwargs)
if scheduler_parameters is False:
enable_scheduler_parameters = False
else:
if scheduler_parameters is None:
scheduler_parameters = lr_scheduler.ExponentialLR
if len(scheduler_parameters_kwargs) == 0:
scheduler_parameters_kwargs = dict(gamma=0.99)
scheduler_parameters = scheduler_parameters(optimizer_parameters, **scheduler_parameters_kwargs)
net_state_dict = None
net_state_dict_theta = None
for epoch in range(0, start_epoch_training):
if enable_scheduler_simulations:
scheduler_simulations.step()
if enable_scheduler_parameters:
scheduler_parameters.step()
for epoch in tqdm(range(start_epoch_training, n_epochs), disable=not use_tqdm):
self.simulations_net.train()
self.parameters_net.train()
indeces = self.rng.permutation(n_theta)
batch_index = 0
total_train_loss_epoch = 0
while batch_size * batch_index < n_theta:
optimizer_simulations.zero_grad()
optimizer_parameters.zero_grad()
batch_indeces = indeces[batch_size * batch_index:batch_size * (batch_index + 1)]
thetas_batch = parameters[batch_indeces].to(self.device)
etas = self.parameters_net(thetas_batch)
samples_batch = simulations[batch_indeces].to(self.device)
batch_loss = batch_steps(samples_batch, etas)
total_train_loss_epoch += batch_loss.item()
if lr_simulations == 0:
set_requires_grad(self.simulations_net, False)
if lr_parameters == 0:
set_requires_grad(self.parameters_net, False)
batch_loss.backward()
if lr_simulations == 0:
set_requires_grad(self.simulations_net, True)
if lr_parameters == 0:
set_requires_grad(self.parameters_net, True)
optimizer_simulations.step()
optimizer_parameters.step()
batch_index += 1
loss_list.append(total_train_loss_epoch / (batch_index + 1))
if compute_test_loss:
if batch_norm_update_before_test:
with torch.no_grad():
batch_index = 0
while batch_size * batch_index < n_theta:
thetas_batch = parameters[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
_ = self.parameters_net(thetas_batch)
batch_index += 1
self.simulations_net.eval()
self.parameters_net.eval()
batch_index = 0
total_test_loss_epoch = 0
while batch_size * batch_index < n_theta_test:
thetas_batch = parameters_val[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
samples_batch = simulations_val[batch_size * batch_index:batch_size * (batch_index + 1)].to(self.device)
etas_test = self.parameters_net(thetas_batch)
total_test_loss_epoch += batch_steps(samples_batch, etas_test).item()
batch_index += 1
test_loss_list.append(total_test_loss_epoch / (batch_index + 1))
if early_stopping and (epoch + 1) % epochs_early_stopping_interval == 0:
if epoch + 1 > start_epoch_early_stopping and net_state_dict is not None:
if test_loss_list[-1] > test_loss_list[-1 - epochs_early_stopping_interval]:
self.logger.info('Training has been early stopped at epoch {}.'.format(epoch + 1))
self.simulations_net.load_state_dict(net_state_dict)
self.parameters_net.load_state_dict(net_state_dict_theta)
break
net_state_dict = self.simulations_net.state_dict()
net_state_dict_theta = self.parameters_net.state_dict()
if enable_scheduler_simulations:
scheduler_simulations.step()
if enable_scheduler_parameters:
scheduler_parameters.step()
self.simulations_net.eval()
self.parameters_net.eval()
if compute_test_loss:
(self.train_losses, self.test_losses) = (loss_list, test_loss_list)
else:
(self.train_losses, self.test_losses) = (loss_list, None)
self.logger.info('Finished learning the transformation.')
self.simulations_net.cpu()
self.parameters_net.cpu()
|
abcpy
|
positive
|
def strip_tags(self, html):
<DeepExtract>
try:
u = unescape
except NameError:
h = HTMLParser()
u = h.unescape
html = u(html)
</DeepExtract>
s = MLStripper()
s.feed(html)
return s.get_data()
|
def strip_tags(self, html):
try:
u = unescape
except NameError:
h = HTMLParser()
u = h.unescape
html = u(html)
s = MLStripper()
s.feed(html)
return s.get_data()
|
CorpBot.py
|
positive
|
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0):
"""Run inference on a dataset."""
dataset = JsonDataset(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb())
<DeepExtract>
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), "Binary '{}' not found".format(binary)
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
opts += ['TEST.WEIGHTS', weights_file]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
outputs = subprocess_utils.process_in_parallel('detection', num_images, binary, output_dir, opts)
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(dict(all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, cfg=cfg_yaml), det_file)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
(all_boxes, all_segms, all_keyps) = (all_boxes, all_segms, all_keyps)
</DeepExtract>
else:
<DeepExtract>
assert not cfg.MODEL.RPN_ONLY, 'Use rpn_generate to generate proposals from RPN-only models'
(roidb, dataset, start_ind, end_ind, total_num_images) = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)
model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
num_images = len(roidb)
num_classes = cfg.MODEL.NUM_CLASSES
(all_boxes, all_segms, all_keyps) = empty_results(num_classes, num_images)
timers = defaultdict(Timer)
for (i, entry) in enumerate(roidb):
if cfg.TEST.PRECOMPUTED_PROPOSALS:
box_proposals = entry['boxes'][entry['gt_classes'] == 0]
if len(box_proposals) == 0:
continue
else:
box_proposals = None
im = cv2.imread(entry['image'])
with c2_utils.NamedCudaScope(gpu_id):
(cls_boxes_i, cls_segms_i, cls_keyps_i) = im_detect_all(model, im, box_proposals, timers)
extend_results(i, all_boxes, cls_boxes_i)
if cls_segms_i is not None:
extend_results(i, all_segms, cls_segms_i)
if cls_keyps_i is not None:
extend_results(i, all_keyps, cls_keyps_i)
if i % 10 == 0:
ave_total_time = np.sum([t.average_time for t in timers.values()])
eta_seconds = ave_total_time * (num_images - i - 1)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
det_time = timers['im_detect_bbox'].average_time + timers['im_detect_mask'].average_time + timers['im_detect_keypoints'].average_time
misc_time = timers['misc_bbox'].average_time + timers['misc_mask'].average_time + timers['misc_keypoints'].average_time
logger.info('im_detect: range [{:d}, {:d}] of {:d}: {:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'.format(start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, misc_time, eta))
if cfg.VIS:
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
vis_utils.vis_one_image(im[:, :, ::-1], '{:d}_{:s}'.format(i, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=cfg.VIS_TH, box_alpha=0.8, dataset=dataset, show_class=True)
cfg_yaml = yaml.dump(cfg)
if ind_range is not None:
det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
else:
det_name = 'detections.pkl'
det_file = os.path.join(output_dir, det_name)
save_object(dict(all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, cfg=cfg_yaml), det_file)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
(all_boxes, all_segms, all_keyps) = (all_boxes, all_segms, all_keyps)
</DeepExtract>
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir)
return results
|
def test_net_on_dataset(weights_file, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0):
"""Run inference on a dataset."""
dataset = JsonDataset(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb())
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, 'test_net' + binary_ext)
assert os.path.exists(binary), "Binary '{}' not found".format(binary)
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
opts += ['TEST.WEIGHTS', weights_file]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
outputs = subprocess_utils.process_in_parallel('detection', num_images, binary, output_dir, opts)
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(dict(all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, cfg=cfg_yaml), det_file)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
(all_boxes, all_segms, all_keyps) = (all_boxes, all_segms, all_keyps)
else:
assert not cfg.MODEL.RPN_ONLY, 'Use rpn_generate to generate proposals from RPN-only models'
(roidb, dataset, start_ind, end_ind, total_num_images) = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)
model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)
num_images = len(roidb)
num_classes = cfg.MODEL.NUM_CLASSES
(all_boxes, all_segms, all_keyps) = empty_results(num_classes, num_images)
timers = defaultdict(Timer)
for (i, entry) in enumerate(roidb):
if cfg.TEST.PRECOMPUTED_PROPOSALS:
box_proposals = entry['boxes'][entry['gt_classes'] == 0]
if len(box_proposals) == 0:
continue
else:
box_proposals = None
im = cv2.imread(entry['image'])
with c2_utils.NamedCudaScope(gpu_id):
(cls_boxes_i, cls_segms_i, cls_keyps_i) = im_detect_all(model, im, box_proposals, timers)
extend_results(i, all_boxes, cls_boxes_i)
if cls_segms_i is not None:
extend_results(i, all_segms, cls_segms_i)
if cls_keyps_i is not None:
extend_results(i, all_keyps, cls_keyps_i)
if i % 10 == 0:
ave_total_time = np.sum([t.average_time for t in timers.values()])
eta_seconds = ave_total_time * (num_images - i - 1)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
det_time = timers['im_detect_bbox'].average_time + timers['im_detect_mask'].average_time + timers['im_detect_keypoints'].average_time
misc_time = timers['misc_bbox'].average_time + timers['misc_mask'].average_time + timers['misc_keypoints'].average_time
logger.info('im_detect: range [{:d}, {:d}] of {:d}: {:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'.format(start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, misc_time, eta))
if cfg.VIS:
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
vis_utils.vis_one_image(im[:, :, ::-1], '{:d}_{:s}'.format(i, im_name), os.path.join(output_dir, 'vis'), cls_boxes_i, segms=cls_segms_i, keypoints=cls_keyps_i, thresh=cfg.VIS_TH, box_alpha=0.8, dataset=dataset, show_class=True)
cfg_yaml = yaml.dump(cfg)
if ind_range is not None:
det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
else:
det_name = 'detections.pkl'
det_file = os.path.join(output_dir, det_name)
save_object(dict(all_boxes=all_boxes, all_segms=all_segms, all_keyps=all_keyps, cfg=cfg_yaml), det_file)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
(all_boxes, all_segms, all_keyps) = (all_boxes, all_segms, all_keyps)
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir)
return results
|
CBNet
|
positive
|
def getBolusWizardCarbRatios(self):
logger.info('# Get Pump Status')
mtMessage = PumpBolusWizardCarbRatiosRequestMessage(self.session)
bayerMessage = BayerBinaryMessage(18, self.session, mtMessage.encode())
<DeepExtract>
self.clearMessage(timeout_ms=self.PRESEND_CLEAR_TIMEOUT_MS)
for packet in [bayerMessage.encode()[i:i + 60] for i in range(0, len(bayerMessage.encode()), 60)]:
message = struct.pack('>3sB', self.MAGIC_HEADER, len(packet)) + packet
self.device.write(bytearray(message))
</DeepExtract>
<DeepExtract>
logger.debug('## readResponse0x81')
try:
while True:
payload = self.readMessage()
if len(payload) < 33:
logger.warning('## readResponse0x81: message size less then expected, length = {0}'.format(len(payload)))
elif payload[18] & 255 != 129:
logger.warning('## readResponse0x81: message not a 0x81, got a 0x{0:x}'.format(payload[18]))
else:
break
except TimeoutException:
logger.error('readResponse0x81: timeout waiting for 0x81 response')
raise TimeoutException('Timeout waiting for 0x81 response')
if len(payload) <= 33:
logger.error('readResponse0x81: message size <= 0x21')
self.clearMessage()
raise UnexpectedMessageException('0x81 response was empty, connection lost')
elif len(payload) != 33 + payload[28] & 255 | payload[29] << 8 & 65280:
logger.error('readResponse0x81: message size mismatch')
self.clearMessage()
raise UnexpectedMessageException('0x81 response message size mismatch')
elif payload[33] != 85:
logger.error('readResponse0x81: message no internal 0x55')
self.clearMessage()
raise UnexpectedMessageException('0x81 response was not a 0x55 message')
if len(payload) == 48:
if payload[45] == 4:
logger.warning('## readResponse0x81: message [0x2D]==0x04 (noisy/busy)')
elif payload[45] != 2:
logger.error('readResponse0x81: message [0x2D]!=0x02 (unknown state)')
self.clearMessage()
raise UnexpectedMessageException('0x81 unknown state flag')
elif len(payload) == 39 and payload[35] == 0 and (payload[36] == 0):
logger.warning("## readResponse0x81: message containing '55 04 00 00' (network not connected)")
else:
logger.warning('## readResponse0x81: unknown 0x55 message type')
return payload
</DeepExtract>
<DeepExtract>
messageReceived = False
medMessage = None
while messageReceived == False:
message = self.readResponse0x80()
medMessage = MedtronicReceiveMessage.decode(message.payload, self.session)
if medMessage.messageType in [COM_D_COMMAND.READ_BOLUS_WIZARD_CARB_RATIOS_RESPONSE]:
messageReceived = True
else:
logger.warning('## getMedtronicMessage: waiting for message of [{0}], got 0x{1:x}'.format(''.join(('%04x ' % i for i in [COM_D_COMMAND.READ_BOLUS_WIZARD_CARB_RATIOS_RESPONSE])), medMessage.messageType))
response = medMessage
</DeepExtract>
return response
|
def getBolusWizardCarbRatios(self):
logger.info('# Get Pump Status')
mtMessage = PumpBolusWizardCarbRatiosRequestMessage(self.session)
bayerMessage = BayerBinaryMessage(18, self.session, mtMessage.encode())
self.clearMessage(timeout_ms=self.PRESEND_CLEAR_TIMEOUT_MS)
for packet in [bayerMessage.encode()[i:i + 60] for i in range(0, len(bayerMessage.encode()), 60)]:
message = struct.pack('>3sB', self.MAGIC_HEADER, len(packet)) + packet
self.device.write(bytearray(message))
logger.debug('## readResponse0x81')
try:
while True:
payload = self.readMessage()
if len(payload) < 33:
logger.warning('## readResponse0x81: message size less then expected, length = {0}'.format(len(payload)))
elif payload[18] & 255 != 129:
logger.warning('## readResponse0x81: message not a 0x81, got a 0x{0:x}'.format(payload[18]))
else:
break
except TimeoutException:
logger.error('readResponse0x81: timeout waiting for 0x81 response')
raise TimeoutException('Timeout waiting for 0x81 response')
if len(payload) <= 33:
logger.error('readResponse0x81: message size <= 0x21')
self.clearMessage()
raise UnexpectedMessageException('0x81 response was empty, connection lost')
elif len(payload) != 33 + payload[28] & 255 | payload[29] << 8 & 65280:
logger.error('readResponse0x81: message size mismatch')
self.clearMessage()
raise UnexpectedMessageException('0x81 response message size mismatch')
elif payload[33] != 85:
logger.error('readResponse0x81: message no internal 0x55')
self.clearMessage()
raise UnexpectedMessageException('0x81 response was not a 0x55 message')
if len(payload) == 48:
if payload[45] == 4:
logger.warning('## readResponse0x81: message [0x2D]==0x04 (noisy/busy)')
elif payload[45] != 2:
logger.error('readResponse0x81: message [0x2D]!=0x02 (unknown state)')
self.clearMessage()
raise UnexpectedMessageException('0x81 unknown state flag')
elif len(payload) == 39 and payload[35] == 0 and (payload[36] == 0):
logger.warning("## readResponse0x81: message containing '55 04 00 00' (network not connected)")
else:
logger.warning('## readResponse0x81: unknown 0x55 message type')
return payload
messageReceived = False
medMessage = None
while messageReceived == False:
message = self.readResponse0x80()
medMessage = MedtronicReceiveMessage.decode(message.payload, self.session)
if medMessage.messageType in [COM_D_COMMAND.READ_BOLUS_WIZARD_CARB_RATIOS_RESPONSE]:
messageReceived = True
else:
logger.warning('## getMedtronicMessage: waiting for message of [{0}], got 0x{1:x}'.format(''.join(('%04x ' % i for i in [COM_D_COMMAND.READ_BOLUS_WIZARD_CARB_RATIOS_RESPONSE])), medMessage.messageType))
response = medMessage
return response
|
decoding-contour-next-link
|
positive
|
def loss_def(self):
config = self.get_config()
(pos_h, pos_t, pos_r) = self.get_positive_instance(in_batch=True)
(neg_h, neg_t, neg_r) = self.get_negative_instance(in_batch=True)
pos_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_h), [-1, config.ent_size, 1])
pos_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_t), [-1, config.ent_size, 1])
pos_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, pos_r), [-1, config.rel_size])
neg_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_h), [-1, config.ent_size, 1])
neg_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_t), [-1, config.ent_size, 1])
neg_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, neg_r), [-1, config.rel_size])
pos_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, pos_r), [-1, config.rel_size, config.ent_size])
neg_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, neg_r), [-1, config.rel_size, config.ent_size])
p_h = tf.reshape(self._transfer(pos_matrix, pos_h_e), [-1, config.rel_size])
p_t = tf.reshape(self._transfer(pos_matrix, pos_t_e), [-1, config.rel_size])
p_r = pos_r_e
n_h = tf.reshape(self._transfer(neg_matrix, neg_h_e), [-1, config.rel_size])
n_t = tf.reshape(self._transfer(neg_matrix, neg_t_e), [-1, config.rel_size])
n_r = neg_r_e
<DeepExtract>
_p_score = abs(p_h + p_r - p_t)
</DeepExtract>
_p_score = tf.reshape(_p_score, [-1, 1, config.rel_size])
<DeepExtract>
_n_score = abs(n_h + n_r - n_t)
</DeepExtract>
_n_score = tf.reshape(_n_score, [-1, config.negative_ent + config.negative_rel, config.rel_size])
p_score = tf.reduce_sum(tf.reduce_mean(_p_score, 1, keep_dims=False), 1, keep_dims=True)
n_score = tf.reduce_sum(tf.reduce_mean(_n_score, 1, keep_dims=False), 1, keep_dims=True)
self.loss = tf.reduce_sum(tf.maximum(p_score - n_score + config.margin, 0))
|
def loss_def(self):
config = self.get_config()
(pos_h, pos_t, pos_r) = self.get_positive_instance(in_batch=True)
(neg_h, neg_t, neg_r) = self.get_negative_instance(in_batch=True)
pos_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_h), [-1, config.ent_size, 1])
pos_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, pos_t), [-1, config.ent_size, 1])
pos_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, pos_r), [-1, config.rel_size])
neg_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_h), [-1, config.ent_size, 1])
neg_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, neg_t), [-1, config.ent_size, 1])
neg_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, neg_r), [-1, config.rel_size])
pos_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, pos_r), [-1, config.rel_size, config.ent_size])
neg_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, neg_r), [-1, config.rel_size, config.ent_size])
p_h = tf.reshape(self._transfer(pos_matrix, pos_h_e), [-1, config.rel_size])
p_t = tf.reshape(self._transfer(pos_matrix, pos_t_e), [-1, config.rel_size])
p_r = pos_r_e
n_h = tf.reshape(self._transfer(neg_matrix, neg_h_e), [-1, config.rel_size])
n_t = tf.reshape(self._transfer(neg_matrix, neg_t_e), [-1, config.rel_size])
n_r = neg_r_e
_p_score = abs(p_h + p_r - p_t)
_p_score = tf.reshape(_p_score, [-1, 1, config.rel_size])
_n_score = abs(n_h + n_r - n_t)
_n_score = tf.reshape(_n_score, [-1, config.negative_ent + config.negative_rel, config.rel_size])
p_score = tf.reduce_sum(tf.reduce_mean(_p_score, 1, keep_dims=False), 1, keep_dims=True)
n_score = tf.reduce_sum(tf.reduce_mean(_n_score, 1, keep_dims=False), 1, keep_dims=True)
self.loss = tf.reduce_sum(tf.maximum(p_score - n_score + config.margin, 0))
|
CPL
|
positive
|
def test_catalog_item_required(self):
"""Catalog item specification is required."""
self.data.pop('catalog_item_1')
<DeepExtract>
form = self.form_class(package=self.package, data=self.data)
</DeepExtract>
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertTrue('catalog_item' in form.errors)
self.assertEqual(PackageItem.objects.count(), 0)
|
def test_catalog_item_required(self):
"""Catalog item specification is required."""
self.data.pop('catalog_item_1')
form = self.form_class(package=self.package, data=self.data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertTrue('catalog_item' in form.errors)
self.assertEqual(PackageItem.objects.count(), 0)
|
CTS
|
positive
|
def visit_Subscript(self, node):
<DeepExtract>
if node.value is None:
return None
if isinstance(node.value, tuple):
return tuple([self.visit(n) for n in node.value])
try:
self.blame_stack.append((node.value.lineno, node.value.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.value.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.value.__class__.__name__, repr(node.value)))
ret = visitor(node.value)
if info:
self.blame_stack.pop()
return ret
</DeepExtract>
<DeepExtract>
if len('[') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '['
</DeepExtract>
if isinstance(node.slice, ast.Tuple) and node.slice.elts:
<DeepExtract>
if node.slice.elts[0] is None:
return None
if isinstance(node.slice.elts[0], tuple):
return tuple([self.visit(n) for n in node.slice.elts[0]])
try:
self.blame_stack.append((node.slice.elts[0].lineno, node.slice.elts[0].col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.slice.elts[0].__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.slice.elts[0].__class__.__name__, repr(node.slice.elts[0])))
ret = visitor(node.slice.elts[0])
if info:
self.blame_stack.pop()
return ret
</DeepExtract>
if len(node.slice.elts) == 1:
<DeepExtract>
if len(', ') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ', '
</DeepExtract>
else:
for dim in node.slice.elts[1:]:
<DeepExtract>
if len(', ') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ', '
</DeepExtract>
<DeepExtract>
if dim is None:
return None
if isinstance(dim, tuple):
return tuple([self.visit(n) for n in dim])
try:
self.blame_stack.append((dim.lineno, dim.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % dim.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(dim.__class__.__name__, repr(dim)))
ret = visitor(dim)
if info:
self.blame_stack.pop()
return ret
</DeepExtract>
elif isinstance(node.slice, ast.Slice):
<DeepExtract>
if True:
if getattr(node.slice, 'lower', None) is not None:
self.visit(node.slice.lower)
self._write(':')
if getattr(node.slice, 'upper', None) is not None:
self.visit(node.slice.upper)
if getattr(node.slice, 'step', None) is not None:
self._write(':')
self.visit(node.slice.step)
else:
self._write('slice(')
self.visit(getattr(node.slice, 'lower', None) or AST_NONE)
self._write(', ')
self.visit(getattr(node.slice, 'upper', None) or AST_NONE)
self._write(', ')
self.visit(getattr(node.slice, 'step', None) or AST_NONE)
self._write(')')
</DeepExtract>
else:
<DeepExtract>
if node.slice is None:
return None
if isinstance(node.slice, tuple):
return tuple([self.visit(n) for n in node.slice])
try:
self.blame_stack.append((node.slice.lineno, node.slice.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.slice.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.slice.__class__.__name__, repr(node.slice)))
ret = visitor(node.slice)
if info:
self.blame_stack.pop()
return ret
</DeepExtract>
<DeepExtract>
if len(']') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ']'
</DeepExtract>
|
def visit_Subscript(self, node):
if node.value is None:
return None
if isinstance(node.value, tuple):
return tuple([self.visit(n) for n in node.value])
try:
self.blame_stack.append((node.value.lineno, node.value.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.value.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.value.__class__.__name__, repr(node.value)))
ret = visitor(node.value)
if info:
self.blame_stack.pop()
return ret
if len('[') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '['
if isinstance(node.slice, ast.Tuple) and node.slice.elts:
if node.slice.elts[0] is None:
return None
if isinstance(node.slice.elts[0], tuple):
return tuple([self.visit(n) for n in node.slice.elts[0]])
try:
self.blame_stack.append((node.slice.elts[0].lineno, node.slice.elts[0].col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.slice.elts[0].__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.slice.elts[0].__class__.__name__, repr(node.slice.elts[0])))
ret = visitor(node.slice.elts[0])
if info:
self.blame_stack.pop()
return ret
if len(node.slice.elts) == 1:
if len(', ') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ', '
else:
for dim in node.slice.elts[1:]:
if len(', ') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ', '
if dim is None:
return None
if isinstance(dim, tuple):
return tuple([self.visit(n) for n in dim])
try:
self.blame_stack.append((dim.lineno, dim.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % dim.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(dim.__class__.__name__, repr(dim)))
ret = visitor(dim)
if info:
self.blame_stack.pop()
return ret
elif isinstance(node.slice, ast.Slice):
if True:
if getattr(node.slice, 'lower', None) is not None:
self.visit(node.slice.lower)
self._write(':')
if getattr(node.slice, 'upper', None) is not None:
self.visit(node.slice.upper)
if getattr(node.slice, 'step', None) is not None:
self._write(':')
self.visit(node.slice.step)
else:
self._write('slice(')
self.visit(getattr(node.slice, 'lower', None) or AST_NONE)
self._write(', ')
self.visit(getattr(node.slice, 'upper', None) or AST_NONE)
self._write(', ')
self.visit(getattr(node.slice, 'step', None) or AST_NONE)
self._write(')')
else:
if node.slice is None:
return None
if isinstance(node.slice, tuple):
return tuple([self.visit(n) for n in node.slice])
try:
self.blame_stack.append((node.slice.lineno, node.slice.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.slice.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.slice.__class__.__name__, repr(node.slice)))
ret = visitor(node.slice)
if info:
self.blame_stack.pop()
return ret
if len(']') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += ']'
</DeepExtract>
|
chameleon
|
positive
|
def run(*args, **kwargs):
from cuppa.log import initialise_logging
from cuppa.log import mask_secrets
import SCons.Errors
import cuppa.output
caller = getframeinfo(stack()[1][0])
sconstruct_path = caller.filename
initialise_logging()
try:
import cuppa.construct
cuppa.construct.run(sconstruct_path, *args, **kwargs)
except SCons.Errors.BuildError as error:
<DeepExtract>
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
</DeepExtract>
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except SCons.Errors.StopError as error:
<DeepExtract>
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
</DeepExtract>
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except SCons.Errors.UserError as error:
<DeepExtract>
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
</DeepExtract>
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except Exception as error:
<DeepExtract>
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
</DeepExtract>
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise SCons.Errors.StopError(error)
|
def run(*args, **kwargs):
from cuppa.log import initialise_logging
from cuppa.log import mask_secrets
import SCons.Errors
import cuppa.output
caller = getframeinfo(stack()[1][0])
sconstruct_path = caller.filename
initialise_logging()
try:
import cuppa.construct
cuppa.construct.run(sconstruct_path, *args, **kwargs)
except SCons.Errors.BuildError as error:
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except SCons.Errors.StopError as error:
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except SCons.Errors.UserError as error:
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise
except Exception as error:
from cuppa.log import logger
from cuppa.colourise import as_info
if not suppress:
logger.fatal('Cuppa terminated by exception [{}: {}]'.format(as_info(error.__class__.__name__), as_info(str(error))))
if not logger.isEnabledFor(logging.EXCEPTION):
logger.warn('Use {} (or above) to see the stack'.format(as_info('--verbosity=exception')))
logger.exception(traceback.format_exc())
if len(error.args) >= 1:
error.args = (mask_secrets(str(error.args[0])),) + error.args[1:]
raise SCons.Errors.StopError(error)
|
cuppa
|
positive
|
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8('Dialog'))
Dialog.resize(423, 172)
Dialog.setSizeGripEnabled(True)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8('gridLayout'))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))
self.label = QtGui.QLabel(Dialog)
self.label.setMinimumSize(QtCore.QSize(75, 0))
self.label.setObjectName(_fromUtf8('label'))
self.horizontalLayout_2.addWidget(self.label)
self.txtDomain = QtGui.QLineEdit(Dialog)
self.txtDomain.setObjectName(_fromUtf8('txtDomain'))
self.horizontalLayout_2.addWidget(self.txtDomain)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8('horizontalLayout_3'))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setMinimumSize(QtCore.QSize(75, 0))
self.label_2.setObjectName(_fromUtf8('label_2'))
self.horizontalLayout_3.addWidget(self.label_2)
self.txtLootFileName = QtGui.QLineEdit(Dialog)
self.txtLootFileName.setObjectName(_fromUtf8('txtLootFileName'))
self.horizontalLayout_3.addWidget(self.txtLootFileName)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btnOk = QtGui.QPushButton(Dialog)
self.btnOk.setObjectName(_fromUtf8('btnOk'))
self.horizontalLayout.addWidget(self.btnOk)
self.btnCancel = QtGui.QPushButton(Dialog)
self.btnCancel.setObjectName(_fromUtf8('btnCancel'))
self.horizontalLayout.addWidget(self.btnCancel)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
<DeepExtract>
Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))
self.label.setText(_translate('Dialog', 'Domain', None))
self.label_2.setText(_translate('Dialog', 'Loot File', None))
self.btnOk.setText(_translate('Dialog', 'OK', None))
self.btnCancel.setText(_translate('Dialog', 'Cancel', None))
</DeepExtract>
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8('Dialog'))
Dialog.resize(423, 172)
Dialog.setSizeGripEnabled(True)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8('gridLayout'))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8('verticalLayout_2'))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))
self.label = QtGui.QLabel(Dialog)
self.label.setMinimumSize(QtCore.QSize(75, 0))
self.label.setObjectName(_fromUtf8('label'))
self.horizontalLayout_2.addWidget(self.label)
self.txtDomain = QtGui.QLineEdit(Dialog)
self.txtDomain.setObjectName(_fromUtf8('txtDomain'))
self.horizontalLayout_2.addWidget(self.txtDomain)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8('horizontalLayout_3'))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setMinimumSize(QtCore.QSize(75, 0))
self.label_2.setObjectName(_fromUtf8('label_2'))
self.horizontalLayout_3.addWidget(self.label_2)
self.txtLootFileName = QtGui.QLineEdit(Dialog)
self.txtLootFileName.setObjectName(_fromUtf8('txtLootFileName'))
self.horizontalLayout_3.addWidget(self.txtLootFileName)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.btnOk = QtGui.QPushButton(Dialog)
self.btnOk.setObjectName(_fromUtf8('btnOk'))
self.horizontalLayout.addWidget(self.btnOk)
self.btnCancel = QtGui.QPushButton(Dialog)
self.btnCancel.setObjectName(_fromUtf8('btnCancel'))
self.horizontalLayout.addWidget(self.btnCancel)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))
self.label.setText(_translate('Dialog', 'Domain', None))
self.label_2.setText(_translate('Dialog', 'Loot File', None))
self.btnOk.setText(_translate('Dialog', 'OK', None))
self.btnCancel.setText(_translate('Dialog', 'Cancel', None))
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
autoDANE
|
positive
|
def _targets_request(self, targets):
"""Formats each target for the request"""
targets_request = []
for target in targets:
target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True))
if target_request.get('Input', None):
<DeepExtract>
try:
json.loads(target_request['Input'])
target_request['Input'] = target_request['Input']
except json.decoder.JSONDecodeError:
target_request['Input'] = str(json.dumps(target_request['Input']))
</DeepExtract>
if target_request.get('InputTransformer', None):
if target_request.get('InputTransformer').get('InputTemplate', None):
<DeepExtract>
try:
json.loads(target_request['InputTransformer']['InputTemplate'])
target_request['InputTransformer']['InputTemplate'] = target_request['InputTransformer']['InputTemplate']
except json.decoder.JSONDecodeError:
target_request['InputTransformer']['InputTemplate'] = str(json.dumps(target_request['InputTransformer']['InputTemplate']))
</DeepExtract>
if target_request.get('InputTransformer').get('InputPathsMap', None):
target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map']
targets_request.append(target_request)
return targets_request
|
def _targets_request(self, targets):
"""Formats each target for the request"""
targets_request = []
for target in targets:
target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True))
if target_request.get('Input', None):
try:
json.loads(target_request['Input'])
target_request['Input'] = target_request['Input']
except json.decoder.JSONDecodeError:
target_request['Input'] = str(json.dumps(target_request['Input']))
if target_request.get('InputTransformer', None):
if target_request.get('InputTransformer').get('InputTemplate', None):
try:
json.loads(target_request['InputTransformer']['InputTemplate'])
target_request['InputTransformer']['InputTemplate'] = target_request['InputTransformer']['InputTemplate']
except json.decoder.JSONDecodeError:
target_request['InputTransformer']['InputTemplate'] = str(json.dumps(target_request['InputTransformer']['InputTemplate']))
if target_request.get('InputTransformer').get('InputPathsMap', None):
target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map']
targets_request.append(target_request)
return targets_request
|
amazon.aws
|
positive
|
def regularize(C12, E, idx, no_uniq_jitter):
"""Jitter (add noise).
After resampling some of the particles will be identical.
Therefore, if noise.is_deterministic: some noise must be added.
This is adjusted by the regularization 'reg' factor
(so-named because Dirac-deltas are approximated Gaussian kernels),
which controls the strength of the jitter.
This causes a bias. But, as N-->∞, the reg. bandwidth-->0, i.e. bias-->0.
Ref: `bib.doucet2001sequential`, section 12.2.2.
"""
E = E[idx]
if no_uniq_jitter:
<DeepExtract>
duplicates = idx == np.roll(idx, 1)
duplicates |= idx == np.roll(idx, -1)
dups = duplicates
</DeepExtract>
<DeepExtract>
(N_, M) = C12.shape
if sum(dups) is None:
sum(dups) = N_
if N_ > 2 * M:
cholR = chol_reduce(C12)
D = rnd.randn(sum(dups), cholR.shape[0])
chi2 = np.sum(D ** 2, axis=1)
sample = D @ cholR
else:
chi2_compensate_for_rank = min(M / N_, 1.0)
D = rnd.randn(sum(dups), N_)
chi2 = np.sum(D ** 2, axis=1) * chi2_compensate_for_rank
sample = D @ C12
(sample, chi2) = (sample, chi2)
</DeepExtract>
E[dups] += sample
else:
<DeepExtract>
(N_, M) = C12.shape
if len(E) is None:
len(E) = N_
if N_ > 2 * M:
cholR = chol_reduce(C12)
D = rnd.randn(len(E), cholR.shape[0])
chi2 = np.sum(D ** 2, axis=1)
sample = D @ cholR
else:
chi2_compensate_for_rank = min(M / N_, 1.0)
D = rnd.randn(len(E), N_)
chi2 = np.sum(D ** 2, axis=1) * chi2_compensate_for_rank
sample = D @ C12
(sample, chi2) = (sample, chi2)
</DeepExtract>
E += sample
return (E, chi2)
|
def regularize(C12, E, idx, no_uniq_jitter):
"""Jitter (add noise).
After resampling some of the particles will be identical.
Therefore, if noise.is_deterministic: some noise must be added.
This is adjusted by the regularization 'reg' factor
(so-named because Dirac-deltas are approximated Gaussian kernels),
which controls the strength of the jitter.
This causes a bias. But, as N-->∞, the reg. bandwidth-->0, i.e. bias-->0.
Ref: `bib.doucet2001sequential`, section 12.2.2.
"""
E = E[idx]
if no_uniq_jitter:
duplicates = idx == np.roll(idx, 1)
duplicates |= idx == np.roll(idx, -1)
dups = duplicates
(N_, M) = C12.shape
if sum(dups) is None:
sum(dups) = N_
if N_ > 2 * M:
cholR = chol_reduce(C12)
D = rnd.randn(sum(dups), cholR.shape[0])
chi2 = np.sum(D ** 2, axis=1)
sample = D @ cholR
else:
chi2_compensate_for_rank = min(M / N_, 1.0)
D = rnd.randn(sum(dups), N_)
chi2 = np.sum(D ** 2, axis=1) * chi2_compensate_for_rank
sample = D @ C12
(sample, chi2) = (sample, chi2)
E[dups] += sample
else:
(N_, M) = C12.shape
if len(E) is None:
len(E) = N_
if N_ > 2 * M:
cholR = chol_reduce(C12)
D = rnd.randn(len(E), cholR.shape[0])
chi2 = np.sum(D ** 2, axis=1)
sample = D @ cholR
else:
chi2_compensate_for_rank = min(M / N_, 1.0)
D = rnd.randn(len(E), N_)
chi2 = np.sum(D ** 2, axis=1) * chi2_compensate_for_rank
sample = D @ C12
(sample, chi2) = (sample, chi2)
E += sample
return (E, chi2)
|
DAPPER
|
positive
|
def _update_chunk_length(self):
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
<DeepExtract>
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
</DeepExtract>
raise httplib.IncompleteRead(line)
|
def _update_chunk_length(self):
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
raise httplib.IncompleteRead(line)
|
aws-waf-security-automation
|
positive
|
def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> None:
"""
Submitting jobs to yarn queue and then checking till the jobs are in running state
will lead to orphan jobs being created in some scenarios.
We take kernel_launch_timeout time and divide this into two parts.
If the queue is unavailable we take max 20% of the time to poll the queue periodically
and if the queue becomes available the rest of timeout is met in 80% of the remaining
time.
This algorithm is subject to change. Please read the below cases to understand
when and how checks are applied.
Confirms if the yarn queue has capacity to handle the resource requests that
will be sent to it.
First check ensures the driver and executor memory request falls within
the container size of yarn configuration. This check requires executor and
driver memory to be available in the env.
Second,Current version of check, takes into consideration node label partitioning
on given queues. Provided the queue name and node label this checks if
the given partition has capacity available for kernel startup.
All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY
specified, first check is performed.
If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed.
Proper error messages are sent back for user experience
:param kwargs:
:return:
"""
env_dict = kwargs.get('env', {})
executor_memory = int(env_dict.get('KERNEL_EXECUTOR_MEMORY', 0))
driver_memory = int(env_dict.get('KERNEL_DRIVER_MEMORY', 0))
if executor_memory * driver_memory > 0:
container_memory = self.resource_mgr.cluster_node_container_memory()
if max(executor_memory, driver_memory) > container_memory:
self.log_and_raise(http_status_code=500, reason='Container Memory not sufficient for a executor/driver allocation')
candidate_queue_name = env_dict.get('KERNEL_QUEUE', None)
node_label = env_dict.get('KERNEL_NODE_LABEL', None)
partition_availability_threshold = float(env_dict.get('YARN_PARTITION_THRESHOLD', 95.0))
if candidate_queue_name is None or node_label is None:
return
self.start_time = RemoteProcessProxy.get_current_time()
self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name)
if self.candidate_queue is None:
self.log.warning('Queue: {} not found in cluster.Availability check will not be performed'.format(candidate_queue_name))
return
self.candidate_partition = self.resource_mgr.cluster_queue_partition(self.candidate_queue, node_label)
if self.candidate_partition is None:
self.log.debug('Partition: {} not found in {} queue.Availability check will not be performed'.format(node_label, candidate_queue_name))
return
self.log.debug('Checking endpoint: {} if partition: {} has used capacity <= {}%'.format(self.yarn_endpoint, self.candidate_partition, partition_availability_threshold))
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition, partition_availability_threshold)
if not yarn_available:
self.log.debug('Retrying for {} ms since resources are not available'.format(self.yarn_resource_check_wait_time))
while not yarn_available:
<DeepExtract>
time.sleep(poll_interval)
time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time())
if time_interval > self.yarn_resource_check_wait_time:
error_http_code = 500
reason = 'Yarn Compute Resource is unavailable after {} seconds'.format(self.yarn_resource_check_wait_time)
self.log_and_raise(http_status_code=error_http_code, reason=reason)
</DeepExtract>
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition, partition_availability_threshold)
self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time())
|
def confirm_yarn_queue_availability(self, **kwargs: dict[str, Any] | None) -> None:
"""
Submitting jobs to yarn queue and then checking till the jobs are in running state
will lead to orphan jobs being created in some scenarios.
We take kernel_launch_timeout time and divide this into two parts.
If the queue is unavailable we take max 20% of the time to poll the queue periodically
and if the queue becomes available the rest of timeout is met in 80% of the remaining
time.
This algorithm is subject to change. Please read the below cases to understand
when and how checks are applied.
Confirms if the yarn queue has capacity to handle the resource requests that
will be sent to it.
First check ensures the driver and executor memory request falls within
the container size of yarn configuration. This check requires executor and
driver memory to be available in the env.
Second,Current version of check, takes into consideration node label partitioning
on given queues. Provided the queue name and node label this checks if
the given partition has capacity available for kernel startup.
All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY
specified, first check is performed.
If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed.
Proper error messages are sent back for user experience
:param kwargs:
:return:
"""
env_dict = kwargs.get('env', {})
executor_memory = int(env_dict.get('KERNEL_EXECUTOR_MEMORY', 0))
driver_memory = int(env_dict.get('KERNEL_DRIVER_MEMORY', 0))
if executor_memory * driver_memory > 0:
container_memory = self.resource_mgr.cluster_node_container_memory()
if max(executor_memory, driver_memory) > container_memory:
self.log_and_raise(http_status_code=500, reason='Container Memory not sufficient for a executor/driver allocation')
candidate_queue_name = env_dict.get('KERNEL_QUEUE', None)
node_label = env_dict.get('KERNEL_NODE_LABEL', None)
partition_availability_threshold = float(env_dict.get('YARN_PARTITION_THRESHOLD', 95.0))
if candidate_queue_name is None or node_label is None:
return
self.start_time = RemoteProcessProxy.get_current_time()
self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name)
if self.candidate_queue is None:
self.log.warning('Queue: {} not found in cluster.Availability check will not be performed'.format(candidate_queue_name))
return
self.candidate_partition = self.resource_mgr.cluster_queue_partition(self.candidate_queue, node_label)
if self.candidate_partition is None:
self.log.debug('Partition: {} not found in {} queue.Availability check will not be performed'.format(node_label, candidate_queue_name))
return
self.log.debug('Checking endpoint: {} if partition: {} has used capacity <= {}%'.format(self.yarn_endpoint, self.candidate_partition, partition_availability_threshold))
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition, partition_availability_threshold)
if not yarn_available:
self.log.debug('Retrying for {} ms since resources are not available'.format(self.yarn_resource_check_wait_time))
while not yarn_available:
time.sleep(poll_interval)
time_interval = RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time())
if time_interval > self.yarn_resource_check_wait_time:
error_http_code = 500
reason = 'Yarn Compute Resource is unavailable after {} seconds'.format(self.yarn_resource_check_wait_time)
self.log_and_raise(http_status_code=error_http_code, reason=reason)
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition, partition_availability_threshold)
self.kernel_launch_timeout -= RemoteProcessProxy.get_time_diff(self.start_time, RemoteProcessProxy.get_current_time())
|
enterprise_gateway
|
positive
|
def reverse(self, x, adj, latent, mode, edge_index=None):
"""
Args:
x: generated subgraph node features so far with shape (1, N, 9), some part of the x is masked
adj: generated subgraph adacency features so far with shape (1, 4, N, N) some part of the adj is masked
latent: sample latent vector with shape (1, 9) (mode == 0) or (1, 4) (mode == 1)
mode: generation mode. if mode == 0, generate a new node, if mode == 1, generate a new edge
edge_index [1, 2]
Returns:
out: generated node/edge features with shape (1, 9) (mode == 0) or (1, 4) , (mode == 1)
"""
assert mode == 0 or edge_index is not None, 'if you want to generate edge, you must specify edge_index'
assert x.size(0) == 1
assert adj.size(0) == 1
assert edge_index is None or (edge_index.size(0) == 1 and edge_index.size(1) == 2)
if mode == 0:
st_net = self.node_st_net
<DeepExtract>
adj = adj[:, :3]
node_emb = self.rgcn(x, adj)
if hasattr(self, 'batchNorm'):
node_emb = self.batchNorm(node_emb.transpose(1, 2)).transpose(1, 2)
graph_emb = torch.sum(node_emb, dim=1, keepdim=False).contiguous()
emb = graph_emb
</DeepExtract>
else:
st_net = self.edge_st_net
<DeepExtract>
batch_size = x.size(0)
assert batch_size == edge_index.size(0)
adj = adj[:, :3]
node_emb = self.rgcn(x, adj)
if hasattr(self, 'batchNorm'):
node_emb = self.batchNorm(node_emb.transpose(1, 2)).transpose(1, 2)
graph_emb = torch.sum(node_emb, dim=1, keepdim=False).contiguous().view(batch_size, 1, -1)
edge_index = edge_index.view(batch_size, -1, 1).repeat(1, 1, self.emb_size)
graph_node_emb = torch.cat((torch.gather(node_emb, dim=1, index=edge_index), graph_emb), dim=1)
graph_node_emb = graph_node_emb.view(batch_size, -1)
emb = graph_node_emb
</DeepExtract>
for i in reversed(range(self.num_flow_layer)):
(s, t) = st_net[i](emb)
if self.st_type == 'sigmoid':
latent = (latent - t) / s
elif self.st_type == 'exp':
s = s.exp()
latent = latent / s - t
elif self.st_type == 'softplus':
latent = latent / s - t
else:
raise ValueError('unsupported st type')
return latent
|
def reverse(self, x, adj, latent, mode, edge_index=None):
"""
Args:
x: generated subgraph node features so far with shape (1, N, 9), some part of the x is masked
adj: generated subgraph adacency features so far with shape (1, 4, N, N) some part of the adj is masked
latent: sample latent vector with shape (1, 9) (mode == 0) or (1, 4) (mode == 1)
mode: generation mode. if mode == 0, generate a new node, if mode == 1, generate a new edge
edge_index [1, 2]
Returns:
out: generated node/edge features with shape (1, 9) (mode == 0) or (1, 4) , (mode == 1)
"""
assert mode == 0 or edge_index is not None, 'if you want to generate edge, you must specify edge_index'
assert x.size(0) == 1
assert adj.size(0) == 1
assert edge_index is None or (edge_index.size(0) == 1 and edge_index.size(1) == 2)
if mode == 0:
st_net = self.node_st_net
adj = adj[:, :3]
node_emb = self.rgcn(x, adj)
if hasattr(self, 'batchNorm'):
node_emb = self.batchNorm(node_emb.transpose(1, 2)).transpose(1, 2)
graph_emb = torch.sum(node_emb, dim=1, keepdim=False).contiguous()
emb = graph_emb
else:
st_net = self.edge_st_net
batch_size = x.size(0)
assert batch_size == edge_index.size(0)
adj = adj[:, :3]
node_emb = self.rgcn(x, adj)
if hasattr(self, 'batchNorm'):
node_emb = self.batchNorm(node_emb.transpose(1, 2)).transpose(1, 2)
graph_emb = torch.sum(node_emb, dim=1, keepdim=False).contiguous().view(batch_size, 1, -1)
edge_index = edge_index.view(batch_size, -1, 1).repeat(1, 1, self.emb_size)
graph_node_emb = torch.cat((torch.gather(node_emb, dim=1, index=edge_index), graph_emb), dim=1)
graph_node_emb = graph_node_emb.view(batch_size, -1)
emb = graph_node_emb
for i in reversed(range(self.num_flow_layer)):
(s, t) = st_net[i](emb)
if self.st_type == 'sigmoid':
latent = (latent - t) / s
elif self.st_type == 'exp':
s = s.exp()
latent = latent / s - t
elif self.st_type == 'softplus':
latent = latent / s - t
else:
raise ValueError('unsupported st type')
return latent
|
DIG
|
positive
|
def score(data, labels, batch_size=128):
self.model.training = False
y_preds = [[] for c in self.num_classes]
losses = []
for start in range(0, len(data), batch_size):
if start + batch_size < len(data):
stop = start + batch_size
else:
stop = len(data)
<DeepExtract>
predictions = self.model(data[start:stop], training=False)
loss = 0
for i in range(self.num_tasks):
loss += self.loss_object([lIndex[start:stop] for lIndex in labels][i], predictions[i]) / self.num_tasks
(predictions, loss) = (predictions, loss)
</DeepExtract>
for (i, p) in enumerate(predictions):
y_preds[i].extend(np.argmax(p, 1))
losses.append(loss)
sys.stdout.write('processed %i of %i records \r' % (stop, len(data)))
sys.stdout.flush()
scores = []
for i in range(self.num_tasks):
micro = f1_score(labels[i], y_preds[i], average='micro')
macro = f1_score(labels[i], y_preds[i], average='macro')
scores.append([micro, macro])
print()
return (scores, np.mean(losses))
|
def score(data, labels, batch_size=128):
self.model.training = False
y_preds = [[] for c in self.num_classes]
losses = []
for start in range(0, len(data), batch_size):
if start + batch_size < len(data):
stop = start + batch_size
else:
stop = len(data)
predictions = self.model(data[start:stop], training=False)
loss = 0
for i in range(self.num_tasks):
loss += self.loss_object([lIndex[start:stop] for lIndex in labels][i], predictions[i]) / self.num_tasks
(predictions, loss) = (predictions, loss)
for (i, p) in enumerate(predictions):
y_preds[i].extend(np.argmax(p, 1))
losses.append(loss)
sys.stdout.write('processed %i of %i records \r' % (stop, len(data)))
sys.stdout.flush()
scores = []
for i in range(self.num_tasks):
micro = f1_score(labels[i], y_preds[i], average='micro')
macro = f1_score(labels[i], y_preds[i], average='macro')
scores.append([micro, macro])
print()
return (scores, np.mean(losses))
|
Benchmarks
|
positive
|
def find_line(varname):
<DeepExtract>
ls = list(filter(lambda l: varname in l, list(open('python_lab.py'))))
</DeepExtract>
return ls[0] if len(ls) else None
|
def find_line(varname):
ls = list(filter(lambda l: varname in l, list(open('python_lab.py'))))
return ls[0] if len(ls) else None
|
coding-the-matrix
|
positive
|
def run(self, tasks: Union[str, List[str]]=None, folds: Union[int, List[int]]=None):
"""
:param tasks: a single task name [str] or a list of task names to run. If None, then the whole benchmark will be used.
:param folds: a fold [int] or a list of folds to run. If None, then the all folds from each task definition will be used.
"""
try:
assert not self.framework_install_required or self._is_setup_done(), f'Framework {self.framework_name} [{self.framework_def.version}] is not installed.'
<DeepExtract>
task_defs = self._benchmark_tasks() if tasks is None else [self._get_task_def(name) for name in tasks] if isinstance(tasks, list) else [self._get_task_def(tasks)]
if len(task_defs) == 0:
raise ValueError('No task available.')
task_defs = task_defs
</DeepExtract>
jobs = flatten([self._task_jobs(task_def, folds) for task_def in task_defs])
<DeepExtract>
if not jobs:
results = []
self.job_runner = self._create_job_runner(jobs)
def on_interrupt(*_):
log.warning('*** SESSION CANCELLED BY USER ***')
log.warning('*** Please wait for the application to terminate gracefully ***')
self.job_runner.stop()
self.cleanup()
try:
with signal_handler(signal.SIGINT, on_interrupt):
with OSMonitoring(name=jobs[0].name if len(jobs) == 1 else None, interval_seconds=rconfig().monitoring.interval_seconds, check_on_exit=True, statistics=rconfig().monitoring.statistics, verbosity=rconfig().monitoring.verbosity):
self.job_runner.start()
except (KeyboardInterrupt, InterruptedError):
pass
finally:
results = self.job_runner.results
for res in results:
if res.result is not None and math.isnan(res.result.duration):
res.result.duration = res.duration
results = results
</DeepExtract>
log.info(f'Processing results for {self.sid}')
log.debug(results)
if tasks is None:
<DeepExtract>
scores = list(filter(None, flatten([res.result for res in results])))
if len(scores) == 0:
scoreboard = None
board = Scoreboard(scores, framework_name=self.framework_name, task_name=task_name, scores_dir=self.output_dirs.scores) if task_name else Scoreboard(scores, framework_name=self.framework_name, benchmark_name=self.benchmark_name, scores_dir=self.output_dirs.scores)
if rconfig().results.save:
self._save(board)
log.info('Summing up scores for current run:\n%s', board.as_printable_data_frame(verbosity=2).dropna(how='all', axis='columns').to_string(index=False))
scoreboard = board.as_data_frame()
</DeepExtract>
else:
for task_def in task_defs:
task_results = filter(lambda res: res.result is not None and res.result.task == task_def.name, results)
<DeepExtract>
scores = list(filter(None, flatten([res.result for res in task_results])))
if len(scores) == 0:
scoreboard = None
board = Scoreboard(scores, framework_name=self.framework_name, task_name=task_def.name, scores_dir=self.output_dirs.scores) if task_def.name else Scoreboard(scores, framework_name=self.framework_name, benchmark_name=self.benchmark_name, scores_dir=self.output_dirs.scores)
if rconfig().results.save:
self._save(board)
log.info('Summing up scores for current run:\n%s', board.as_printable_data_frame(verbosity=2).dropna(how='all', axis='columns').to_string(index=False))
scoreboard = board.as_data_frame()
</DeepExtract>
return scoreboard
finally:
<DeepExtract>
pass
</DeepExtract>
|
def run(self, tasks: Union[str, List[str]]=None, folds: Union[int, List[int]]=None):
"""
:param tasks: a single task name [str] or a list of task names to run. If None, then the whole benchmark will be used.
:param folds: a fold [int] or a list of folds to run. If None, then the all folds from each task definition will be used.
"""
try:
assert not self.framework_install_required or self._is_setup_done(), f'Framework {self.framework_name} [{self.framework_def.version}] is not installed.'
task_defs = self._benchmark_tasks() if tasks is None else [self._get_task_def(name) for name in tasks] if isinstance(tasks, list) else [self._get_task_def(tasks)]
if len(task_defs) == 0:
raise ValueError('No task available.')
task_defs = task_defs
jobs = flatten([self._task_jobs(task_def, folds) for task_def in task_defs])
if not jobs:
results = []
self.job_runner = self._create_job_runner(jobs)
def on_interrupt(*_):
log.warning('*** SESSION CANCELLED BY USER ***')
log.warning('*** Please wait for the application to terminate gracefully ***')
self.job_runner.stop()
self.cleanup()
try:
with signal_handler(signal.SIGINT, on_interrupt):
with OSMonitoring(name=jobs[0].name if len(jobs) == 1 else None, interval_seconds=rconfig().monitoring.interval_seconds, check_on_exit=True, statistics=rconfig().monitoring.statistics, verbosity=rconfig().monitoring.verbosity):
self.job_runner.start()
except (KeyboardInterrupt, InterruptedError):
pass
finally:
results = self.job_runner.results
for res in results:
if res.result is not None and math.isnan(res.result.duration):
res.result.duration = res.duration
results = results
log.info(f'Processing results for {self.sid}')
log.debug(results)
if tasks is None:
scores = list(filter(None, flatten([res.result for res in results])))
if len(scores) == 0:
scoreboard = None
board = Scoreboard(scores, framework_name=self.framework_name, task_name=task_name, scores_dir=self.output_dirs.scores) if task_name else Scoreboard(scores, framework_name=self.framework_name, benchmark_name=self.benchmark_name, scores_dir=self.output_dirs.scores)
if rconfig().results.save:
self._save(board)
log.info('Summing up scores for current run:\n%s', board.as_printable_data_frame(verbosity=2).dropna(how='all', axis='columns').to_string(index=False))
scoreboard = board.as_data_frame()
else:
for task_def in task_defs:
task_results = filter(lambda res: res.result is not None and res.result.task == task_def.name, results)
scores = list(filter(None, flatten([res.result for res in task_results])))
if len(scores) == 0:
scoreboard = None
board = Scoreboard(scores, framework_name=self.framework_name, task_name=task_def.name, scores_dir=self.output_dirs.scores) if task_def.name else Scoreboard(scores, framework_name=self.framework_name, benchmark_name=self.benchmark_name, scores_dir=self.output_dirs.scores)
if rconfig().results.save:
self._save(board)
log.info('Summing up scores for current run:\n%s', board.as_printable_data_frame(verbosity=2).dropna(how='all', axis='columns').to_string(index=False))
scoreboard = board.as_data_frame()
return scoreboard
finally:
pass
</DeepExtract>
|
automlbenchmark
|
positive
|
def test_convert(self):
M_hat = self.M.convert('poly').convert('mask')
P_hat = self.P.convert('mask').convert('poly')
<DeepExtract>
diff = self.M.get_mask_tensor() - M_hat.get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_mask = diff
</DeepExtract>
<DeepExtract>
diff = self.P.get_mask_tensor() - P_hat.get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_poly = diff
</DeepExtract>
self.assertTrue(diff_mask == diff_poly)
self.assertTrue(diff_mask <= 8169.0)
self.assertTrue(diff_poly <= 8169.0)
|
def test_convert(self):
M_hat = self.M.convert('poly').convert('mask')
P_hat = self.P.convert('mask').convert('poly')
diff = self.M.get_mask_tensor() - M_hat.get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_mask = diff
diff = self.P.get_mask_tensor() - P_hat.get_mask_tensor()
diff = torch.sum(torch.abs(diff.float())).item()
diff_poly = diff
self.assertTrue(diff_mask == diff_poly)
self.assertTrue(diff_mask <= 8169.0)
self.assertTrue(diff_poly <= 8169.0)
|
CenterMask
|
positive
|
def _do_refresh_request(http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
<DeepExtract>
body = urllib.parse.urlencode({'grant_type': 'refresh_token', 'client_id': self.client_id, 'client_secret': self.client_secret, 'refresh_token': self.refresh_token})
body = body
</DeepExtract>
<DeepExtract>
headers = {'content-type': 'application/x-www-form-urlencoded'}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
headers = headers
</DeepExtract>
logger.info('Refreshing access_token')
(resp, content) = http_request(self.token_uri, method='POST', body=body, headers=headers)
if six.PY3 and isinstance(content, bytes):
content = content.decode('utf-8')
if resp.status == 200:
d = json.loads(content)
self.token_response = d
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
else:
logger.info('Failed to retrieve access token: %s', content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = json.loads(content)
if 'error' in d:
error_msg = d['error']
if 'error_description' in d:
error_msg += ': ' + d['error_description']
self.invalid = True
if self.store:
self.store.locked_put(self)
except (TypeError, ValueError):
pass
raise AccessTokenRefreshError(error_msg)
|
def _do_refresh_request(http_request):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
body = urllib.parse.urlencode({'grant_type': 'refresh_token', 'client_id': self.client_id, 'client_secret': self.client_secret, 'refresh_token': self.refresh_token})
body = body
headers = {'content-type': 'application/x-www-form-urlencoded'}
if self.user_agent is not None:
headers['user-agent'] = self.user_agent
headers = headers
logger.info('Refreshing access_token')
(resp, content) = http_request(self.token_uri, method='POST', body=body, headers=headers)
if six.PY3 and isinstance(content, bytes):
content = content.decode('utf-8')
if resp.status == 200:
d = json.loads(content)
self.token_response = d
self.access_token = d['access_token']
self.refresh_token = d.get('refresh_token', self.refresh_token)
if 'expires_in' in d:
self.token_expiry = datetime.timedelta(seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
else:
logger.info('Failed to retrieve access token: %s', content)
error_msg = 'Invalid response %s.' % resp['status']
try:
d = json.loads(content)
if 'error' in d:
error_msg = d['error']
if 'error_description' in d:
error_msg += ': ' + d['error_description']
self.invalid = True
if self.store:
self.store.locked_put(self)
except (TypeError, ValueError):
pass
raise AccessTokenRefreshError(error_msg)
|
CUPS-Cloud-Print
|
positive
|
@classmethod
def create(cls, opt_func, lr, layer_groups, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
<DeepExtract>
split_groups = []
for l in layer_groups:
(l1, l2) = ([], [])
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
split_groups = split_groups
</DeepExtract>
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
(opt.lr, opt.opt_func) = (listify(lr, layer_groups), opt_func)
return opt
|
@classmethod
def create(cls, opt_func, lr, layer_groups, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
split_groups = []
for l in layer_groups:
(l1, l2) = ([], [])
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
split_groups = split_groups
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
(opt.lr, opt.opt_func) = (listify(lr, layer_groups), opt_func)
return opt
|
alfred
|
positive
|
@pytest.mark.parametrize('knn_methods', knn_methods)
def test_knorau(knn_methods):
<DeepExtract>
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(encode_labels, rng)
model = LogisticRegression(C=1, random_state=rng)
pool_classifiers = BaggingClassifier(model, n_estimators=100, n_jobs=-1, random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
</DeepExtract>
knorau = KNORAU(pool_classifiers, knn_classifier=knn_methods)
knorau.fit(X_dsel, y_dsel)
assert np.isclose(knorau.score(X_test, y_test), 0.9787234042553191)
|
@pytest.mark.parametrize('knn_methods', knn_methods)
def test_knorau(knn_methods):
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(encode_labels, rng)
model = LogisticRegression(C=1, random_state=rng)
pool_classifiers = BaggingClassifier(model, n_estimators=100, n_jobs=-1, random_state=rng)
pool_classifiers.fit(X_train, y_train)
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
knorau = KNORAU(pool_classifiers, knn_classifier=knn_methods)
knorau.fit(X_dsel, y_dsel)
assert np.isclose(knorau.score(X_test, y_test), 0.9787234042553191)
|
DESlib
|
positive
|
def to_dsl(parsed, optimize=True, schema=None):
"""Convert KQL to Elasticsearch Query DSL."""
if not isinstance(parsed, ast.KqlNode):
<DeepExtract>
if isinstance(parsed, bytes):
parsed = parsed.decode('utf-8')
lark_parsed = lark_parse(parsed)
converted = KqlParser(parsed, schema=schema).visit(lark_parsed)
parsed = converted.optimize(recursive=True) if optimize else converted
</DeepExtract>
return ToDsl.convert(parsed)
|
def to_dsl(parsed, optimize=True, schema=None):
"""Convert KQL to Elasticsearch Query DSL."""
if not isinstance(parsed, ast.KqlNode):
if isinstance(parsed, bytes):
parsed = parsed.decode('utf-8')
lark_parsed = lark_parse(parsed)
converted = KqlParser(parsed, schema=schema).visit(lark_parsed)
parsed = converted.optimize(recursive=True) if optimize else converted
return ToDsl.convert(parsed)
|
detection-rules
|
positive
|
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""Show bounding boxes.
Defined in :numref:`sec_anchor`"""
def make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
<DeepExtract>
if labels is None:
labels = default_values
elif not isinstance(labels, (list, tuple)):
labels = [labels]
labels = labels
</DeepExtract>
<DeepExtract>
if colors is None:
colors = ['b', 'g', 'r', 'm', 'c']
elif not isinstance(colors, (list, tuple)):
colors = [colors]
colors = colors
</DeepExtract>
for (i, bbox) in enumerate(bboxes):
color = colors[i % len(colors)]
rect = d2l.bbox_to_rect(d2l.numpy(bbox), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i], va='center', ha='center', fontsize=9, color=text_color, bbox=dict(facecolor=color, lw=0))
|
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""Show bounding boxes.
Defined in :numref:`sec_anchor`"""
def make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
if labels is None:
labels = default_values
elif not isinstance(labels, (list, tuple)):
labels = [labels]
labels = labels
if colors is None:
colors = ['b', 'g', 'r', 'm', 'c']
elif not isinstance(colors, (list, tuple)):
colors = [colors]
colors = colors
for (i, bbox) in enumerate(bboxes):
color = colors[i % len(colors)]
rect = d2l.bbox_to_rect(d2l.numpy(bbox), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i], va='center', ha='center', fontsize=9, color=text_color, bbox=dict(facecolor=color, lw=0))
|
d2l-zh
|
positive
|
def init(self):
super(GPAC, self).init()
self._init.setdefault('no_calibration', False)
if not self._init['no_calibration']:
<DeepExtract>
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for (idx, channel) in enumerate(self._ch_cal.keys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
self._ch_cal[channel]['name'] = ''.join([c for c in values[0].decode('utf-8', errors='ignore') if c in string.printable])
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['min'] = values[2]
self._ch_cal[channel]['max'] = values[3]
self._ch_cal[channel]['ADCI']['gain'] = -values[4]
self._ch_cal[channel]['ADCI']['offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DAC']['gain'] = values[8]
self._ch_cal[channel]['DAC']['offset'] = values[9]
self._ch_cal[channel]['limit'] = values[10]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
</DeepExtract>
logger.info('Found adapter card: {} with ID {}'.format('General Purpose Analog Card (GPAC)', self.get_id()))
else:
logger.info('General Purpose Analog Card (GPAC): skip reading calibration parameters from EEPROM')
<DeepExtract>
if unit == 'raw':
0.1 = 0.1
elif unit == 'A':
0.1 = int(0.1 * 1000 * self.CURRENT_LIMIT_GAIN)
elif unit == 'mA':
0.1 = int(0.1 * self.CURRENT_LIMIT_GAIN)
elif unit == 'uA':
0.1 = int(0.1 / 1000 * self.CURRENT_LIMIT_GAIN)
else:
raise TypeError('Invalid unit type.')
I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=0.1)
</DeepExtract>
for i in range(12):
<DeepExtract>
dac_offset = self._ch_cal['ISRC' + str(i)]['DAC']['offset']
dac_gain = self._ch_cal['ISRC' + str(i)]['DAC']['gain']
if unit == 'raw':
0.0 = 0.0
elif unit == 'A':
0.0 = int((-0.0 * 1000000 - dac_offset) / dac_gain)
elif unit == 'mA':
0.0 = int((-0.0 * 1000 - dac_offset) / dac_gain)
elif unit == 'uA':
0.0 = int((-0.0 - dac_offset) / dac_gain)
else:
raise TypeError('Invalid unit type.')
self._set_dac_value(channel='ISRC' + str(i), value=0.0)
</DeepExtract>
|
def init(self):
super(GPAC, self).init()
self._init.setdefault('no_calibration', False)
if not self._init['no_calibration']:
header = self.get_format()
if header == self.HEADER_GPAC:
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for (idx, channel) in enumerate(self._ch_cal.keys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
self._ch_cal[channel]['name'] = ''.join([c for c in values[0].decode('utf-8', errors='ignore') if c in string.printable])
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['min'] = values[2]
self._ch_cal[channel]['max'] = values[3]
self._ch_cal[channel]['ADCI']['gain'] = -values[4]
self._ch_cal[channel]['ADCI']['offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DAC']['gain'] = values[8]
self._ch_cal[channel]['DAC']['offset'] = values[9]
self._ch_cal[channel]['limit'] = values[10]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
logger.info('Found adapter card: {} with ID {}'.format('General Purpose Analog Card (GPAC)', self.get_id()))
else:
logger.info('General Purpose Analog Card (GPAC): skip reading calibration parameters from EEPROM')
if unit == 'raw':
0.1 = 0.1
elif unit == 'A':
0.1 = int(0.1 * 1000 * self.CURRENT_LIMIT_GAIN)
elif unit == 'mA':
0.1 = int(0.1 * self.CURRENT_LIMIT_GAIN)
elif unit == 'uA':
0.1 = int(0.1 / 1000 * self.CURRENT_LIMIT_GAIN)
else:
raise TypeError('Invalid unit type.')
I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=0.1)
for i in range(12):
dac_offset = self._ch_cal['ISRC' + str(i)]['DAC']['offset']
dac_gain = self._ch_cal['ISRC' + str(i)]['DAC']['gain']
if unit == 'raw':
0.0 = 0.0
elif unit == 'A':
0.0 = int((-0.0 * 1000000 - dac_offset) / dac_gain)
elif unit == 'mA':
0.0 = int((-0.0 * 1000 - dac_offset) / dac_gain)
elif unit == 'uA':
0.0 = int((-0.0 - dac_offset) / dac_gain)
else:
raise TypeError('Invalid unit type.')
self._set_dac_value(channel='ISRC' + str(i), value=0.0)
</DeepExtract>
|
basil
|
positive
|
def test_match__names_only__one(self):
foo = self.Object(foo=self.VALUE)
<DeepExtract>
return super(Attrs, self).assert_match(__unit__.Attrs(*args, **kwargs), foo)
</DeepExtract>
<DeepExtract>
return super(Attrs, self).assert_no_match(__unit__.Attrs(*args, **kwargs), foo)
</DeepExtract>
|
def test_match__names_only__one(self):
foo = self.Object(foo=self.VALUE)
return super(Attrs, self).assert_match(__unit__.Attrs(*args, **kwargs), foo)
return super(Attrs, self).assert_no_match(__unit__.Attrs(*args, **kwargs), foo)
</DeepExtract>
|
callee
|
positive
|
def notify_socket_from(self, conf, socketfile=None):
<DeepExtract>
notify_socket_folder = expand_path(_notify_socket_folder, conf.root_mode())
notify_folder = os_path(self._root, notify_socket_folder)
notify_name = 'notify.' + str(conf.name() or 'systemctl')
notify_socket = os.path.join(notify_folder, notify_name)
socketfile = socketfile or notify_socket
if len(socketfile) > 100:
if True:
logg.debug('https://unix.stackexchange.com/questions/367008/%s', 'why-is-socket-path-length-limited-to-a-hundred-chars')
logg.debug('old notify socketfile (%s) = %s', len(socketfile), socketfile)
notify_name44 = o44(notify_name)
notify_name77 = o77(notify_name)
socketfile = os.path.join(notify_folder, notify_name77)
if len(socketfile) > 100:
socketfile = os.path.join(notify_folder, notify_name44)
pref = 'zz.%i.%s' % (get_USER_ID(), o22(os.path.basename(notify_socket_folder)))
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name77)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name44)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), notify_name44)
if True:
logg.info('new notify socketfile (%s) = %s', len(socketfile), socketfile)
socketfile = socketfile
</DeepExtract>
try:
if not os.path.isdir(os.path.dirname(socketfile)):
os.makedirs(os.path.dirname(socketfile))
if os.path.exists(socketfile):
os.unlink(socketfile)
except Exception as e:
logg.warning('error %s: %s', socketfile, e)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(socketfile)
os.chmod(socketfile, 511)
return Systemctl.NotifySocket(sock, socketfile)
|
def notify_socket_from(self, conf, socketfile=None):
notify_socket_folder = expand_path(_notify_socket_folder, conf.root_mode())
notify_folder = os_path(self._root, notify_socket_folder)
notify_name = 'notify.' + str(conf.name() or 'systemctl')
notify_socket = os.path.join(notify_folder, notify_name)
socketfile = socketfile or notify_socket
if len(socketfile) > 100:
if True:
logg.debug('https://unix.stackexchange.com/questions/367008/%s', 'why-is-socket-path-length-limited-to-a-hundred-chars')
logg.debug('old notify socketfile (%s) = %s', len(socketfile), socketfile)
notify_name44 = o44(notify_name)
notify_name77 = o77(notify_name)
socketfile = os.path.join(notify_folder, notify_name77)
if len(socketfile) > 100:
socketfile = os.path.join(notify_folder, notify_name44)
pref = 'zz.%i.%s' % (get_USER_ID(), o22(os.path.basename(notify_socket_folder)))
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name77)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), pref, notify_name44)
if len(socketfile) > 100:
socketfile = os.path.join(get_TMP(), notify_name44)
if True:
logg.info('new notify socketfile (%s) = %s', len(socketfile), socketfile)
socketfile = socketfile
try:
if not os.path.isdir(os.path.dirname(socketfile)):
os.makedirs(os.path.dirname(socketfile))
if os.path.exists(socketfile):
os.unlink(socketfile)
except Exception as e:
logg.warning('error %s: %s', socketfile, e)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(socketfile)
os.chmod(socketfile, 511)
return Systemctl.NotifySocket(sock, socketfile)
|
deployment
|
positive
|
def insert(self, index, item):
cls = self.__class__
if not isinstance(item, cls.element_type):
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
list.insert(self, index, item)
|
def insert(self, index, item):
cls = self.__class__
if not isinstance(item, cls.element_type):
raise NotImplementedError()
list.insert(self, index, item)
|
docker-map
|
positive
|
def extract_image_chips(self, img, points, desired_size=256, padding=0):
"""
crop and align face
Parameters:
----------
img: numpy array, bgr order of shape (1, 3, n, m)
input image
points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
desired_size: default 256
padding: default 0
Retures:
-------
crop_imgs: list, n
cropped and aligned faces
"""
crop_imgs = []
for p in points:
shape = []
for k in range(len(p) / 2):
shape.append(p[k])
shape.append(p[k + 5])
if padding > 0:
padding = padding
else:
padding = 0
mean_face_shape_x = [0.224152, 0.75610125, 0.490127, 0.254149, 0.726104]
mean_face_shape_y = [0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233]
from_points = []
to_points = []
for i in range(len(shape) / 2):
x = (padding + mean_face_shape_x[i]) / (2 * padding + 1) * desired_size
y = (padding + mean_face_shape_y[i]) / (2 * padding + 1) * desired_size
to_points.append([x, y])
from_points.append([shape[2 * i], shape[2 * i + 1]])
<DeepExtract>
assert len(from_points) > 0
colMat = []
for i in range(len(from_points)):
colMat.append(from_points[i][0])
colMat.append(from_points[i][1])
colMat = np.matrix(colMat).transpose()
from_mat = colMat
</DeepExtract>
<DeepExtract>
assert len(to_points) > 0
colMat = []
for i in range(len(to_points)):
colMat.append(to_points[i][0])
colMat.append(to_points[i][1])
colMat = np.matrix(colMat).transpose()
to_mat = colMat
</DeepExtract>
<DeepExtract>
assert from_mat.shape[0] == to_mat.shape[0] and from_mat.shape[0] % 2 == 0
sigma_from = 0.0
sigma_to = 0.0
cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])
from_shape_points = from_mat.reshape(from_mat.shape[0] / 2, 2)
to_shape_points = to_mat.reshape(to_mat.shape[0] / 2, 2)
mean_from = from_shape_points.mean(axis=0)
mean_to = to_shape_points.mean(axis=0)
for i in range(from_shape_points.shape[0]):
temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)
sigma_from += temp_dis * temp_dis
temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)
sigma_to += temp_dis * temp_dis
cov += (to_shape_points[i].transpose() - mean_to.transpose()) * (from_shape_points[i] - mean_from)
sigma_from = sigma_from / to_shape_points.shape[0]
sigma_to = sigma_to / to_shape_points.shape[0]
cov = cov / to_shape_points.shape[0]
s = np.matrix([[1.0, 0.0], [0.0, 1.0]])
(u, d, vt) = np.linalg.svd(cov)
if np.linalg.det(cov) < 0:
if d[1] < d[0]:
s[1, 1] = -1
else:
s[0, 0] = -1
r = u * s * vt
c = 1.0
if sigma_from != 0:
c = 1.0 / sigma_from * np.trace(np.diag(d) * s)
tran_b = mean_to.transpose() - c * r * mean_from.transpose()
tran_m = c * r
(tran_m, tran_b) = (tran_m, tran_b)
</DeepExtract>
probe_vec = np.matrix([1.0, 0.0]).transpose()
probe_vec = tran_m * probe_vec
scale = np.linalg.norm(probe_vec)
angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0, 0])
from_center = [(shape[0] + shape[2]) / 2.0, (shape[1] + shape[3]) / 2.0]
to_center = [0, 0]
to_center[1] = desired_size * 0.4
to_center[0] = desired_size * 0.5
ex = to_center[0] - from_center[0]
ey = to_center[1] - from_center[1]
rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]), -1 * angle, scale)
rot_mat[0][2] += ex
rot_mat[1][2] += ey
chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))
crop_imgs.append(chips)
return crop_imgs
|
def extract_image_chips(self, img, points, desired_size=256, padding=0):
"""
crop and align face
Parameters:
----------
img: numpy array, bgr order of shape (1, 3, n, m)
input image
points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
desired_size: default 256
padding: default 0
Retures:
-------
crop_imgs: list, n
cropped and aligned faces
"""
crop_imgs = []
for p in points:
shape = []
for k in range(len(p) / 2):
shape.append(p[k])
shape.append(p[k + 5])
if padding > 0:
padding = padding
else:
padding = 0
mean_face_shape_x = [0.224152, 0.75610125, 0.490127, 0.254149, 0.726104]
mean_face_shape_y = [0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233]
from_points = []
to_points = []
for i in range(len(shape) / 2):
x = (padding + mean_face_shape_x[i]) / (2 * padding + 1) * desired_size
y = (padding + mean_face_shape_y[i]) / (2 * padding + 1) * desired_size
to_points.append([x, y])
from_points.append([shape[2 * i], shape[2 * i + 1]])
assert len(from_points) > 0
colMat = []
for i in range(len(from_points)):
colMat.append(from_points[i][0])
colMat.append(from_points[i][1])
colMat = np.matrix(colMat).transpose()
from_mat = colMat
assert len(to_points) > 0
colMat = []
for i in range(len(to_points)):
colMat.append(to_points[i][0])
colMat.append(to_points[i][1])
colMat = np.matrix(colMat).transpose()
to_mat = colMat
assert from_mat.shape[0] == to_mat.shape[0] and from_mat.shape[0] % 2 == 0
sigma_from = 0.0
sigma_to = 0.0
cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])
from_shape_points = from_mat.reshape(from_mat.shape[0] / 2, 2)
to_shape_points = to_mat.reshape(to_mat.shape[0] / 2, 2)
mean_from = from_shape_points.mean(axis=0)
mean_to = to_shape_points.mean(axis=0)
for i in range(from_shape_points.shape[0]):
temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)
sigma_from += temp_dis * temp_dis
temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)
sigma_to += temp_dis * temp_dis
cov += (to_shape_points[i].transpose() - mean_to.transpose()) * (from_shape_points[i] - mean_from)
sigma_from = sigma_from / to_shape_points.shape[0]
sigma_to = sigma_to / to_shape_points.shape[0]
cov = cov / to_shape_points.shape[0]
s = np.matrix([[1.0, 0.0], [0.0, 1.0]])
(u, d, vt) = np.linalg.svd(cov)
if np.linalg.det(cov) < 0:
if d[1] < d[0]:
s[1, 1] = -1
else:
s[0, 0] = -1
r = u * s * vt
c = 1.0
if sigma_from != 0:
c = 1.0 / sigma_from * np.trace(np.diag(d) * s)
tran_b = mean_to.transpose() - c * r * mean_from.transpose()
tran_m = c * r
(tran_m, tran_b) = (tran_m, tran_b)
probe_vec = np.matrix([1.0, 0.0]).transpose()
probe_vec = tran_m * probe_vec
scale = np.linalg.norm(probe_vec)
angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0, 0])
from_center = [(shape[0] + shape[2]) / 2.0, (shape[1] + shape[3]) / 2.0]
to_center = [0, 0]
to_center[1] = desired_size * 0.4
to_center[0] = desired_size * 0.5
ex = to_center[0] - from_center[0]
ey = to_center[1] - from_center[1]
rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]), -1 * angle, scale)
rot_mat[0][2] += ex
rot_mat[1][2] += ey
chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))
crop_imgs.append(chips)
return crop_imgs
|
Audio-driven-TalkingFace-HeadPose
|
positive
|
def post(self, request, bill_pk, entity_slug, *args, **kwargs):
if self.action_update_items:
if not request.user.is_authenticated:
return HttpResponseForbidden()
<DeepExtract>
if not self.queryset:
self.queryset = BillModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user).select_related('vendor', 'ledger', 'ledger__entity').order_by('-updated')
queryset = super().get_queryset()
</DeepExtract>
bill_model: BillModel = self.get_object(queryset=queryset)
self.object = bill_model
bill_itemtxs_formset_class = get_bill_itemtxs_formset_class(bill_model)
itemtxs_formset = bill_itemtxs_formset_class(request.POST, user_model=self.request.user, bill_model=bill_model, entity_slug=entity_slug)
if itemtxs_formset.has_changed():
if itemtxs_formset.is_valid():
itemtxs_list = itemtxs_formset.save(commit=False)
entity_qs = EntityModel.objects.for_user(user_model=self.request.user)
entity_model: EntityModel = get_object_or_404(entity_qs, slug__exact=entity_slug)
for itemtxs in itemtxs_list:
itemtxs.bill_model_id = bill_model.uuid
itemtxs.clean()
itemtxs_formset.save()
itemtxs_qs = bill_model.update_amount_due()
bill_model.new_state(commit=True)
bill_model.clean()
bill_model.save(update_fields=['amount_due', 'amount_receivable', 'amount_unearned', 'amount_earned', 'updated'])
bill_model.migrate_state(entity_slug=entity_slug, user_model=self.request.user, itemtxs_qs=itemtxs_qs, raise_exception=False)
messages.add_message(request, message=f'Items for Invoice {bill_model.bill_number} saved.', level=messages.SUCCESS, extra_tags='is-success')
return HttpResponseRedirect(redirect_to=reverse('django_ledger:bill-update', kwargs={'entity_slug': entity_slug, 'bill_pk': bill_pk}))
<DeepExtract>
context = super(BillModelCreateView, self).get_context_data(**kwargs)
if self.for_purchase_order:
po_pk = self.kwargs['po_pk']
po_item_uuids_qry_param = self.request.GET.get('item_uuids')
if po_item_uuids_qry_param:
try:
po_item_uuids = po_item_uuids_qry_param.split(',')
except:
context = HttpResponseBadRequest()
else:
context = HttpResponseBadRequest()
po_qs = PurchaseOrderModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user).prefetch_related('itemtransactionmodel_set')
po_model: PurchaseOrderModel = get_object_or_404(po_qs, uuid__exact=po_pk)
po_itemtxs_qs = po_model.itemtransactionmodel_set.filter(bill_model__isnull=True, uuid__in=po_item_uuids)
context['po_model'] = po_model
context['po_itemtxs_qs'] = po_itemtxs_qs
form_action = reverse('django_ledger:bill-create-po', kwargs={'entity_slug': self.kwargs['entity_slug'], 'po_pk': po_model.uuid}) + f'?item_uuids={po_item_uuids_qry_param}'
elif self.for_estimate:
estimate_qs = EstimateModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user)
estimate_uuid = self.kwargs['ce_pk']
estimate_model: EstimateModel = get_object_or_404(estimate_qs, uuid__exact=estimate_uuid)
form_action = reverse('django_ledger:bill-create-estimate', kwargs={'entity_slug': self.kwargs['entity_slug'], 'ce_pk': estimate_model.uuid})
else:
form_action = reverse('django_ledger:bill-create', kwargs={'entity_slug': self.kwargs['entity_slug']})
context['form_action_url'] = form_action
context = context
</DeepExtract>
return self.render_to_response(context=context)
return super(BillModelUpdateView, self).post(request, **kwargs)
|
def post(self, request, bill_pk, entity_slug, *args, **kwargs):
if self.action_update_items:
if not request.user.is_authenticated:
return HttpResponseForbidden()
if not self.queryset:
self.queryset = BillModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user).select_related('vendor', 'ledger', 'ledger__entity').order_by('-updated')
queryset = super().get_queryset()
bill_model: BillModel = self.get_object(queryset=queryset)
self.object = bill_model
bill_itemtxs_formset_class = get_bill_itemtxs_formset_class(bill_model)
itemtxs_formset = bill_itemtxs_formset_class(request.POST, user_model=self.request.user, bill_model=bill_model, entity_slug=entity_slug)
if itemtxs_formset.has_changed():
if itemtxs_formset.is_valid():
itemtxs_list = itemtxs_formset.save(commit=False)
entity_qs = EntityModel.objects.for_user(user_model=self.request.user)
entity_model: EntityModel = get_object_or_404(entity_qs, slug__exact=entity_slug)
for itemtxs in itemtxs_list:
itemtxs.bill_model_id = bill_model.uuid
itemtxs.clean()
itemtxs_formset.save()
itemtxs_qs = bill_model.update_amount_due()
bill_model.new_state(commit=True)
bill_model.clean()
bill_model.save(update_fields=['amount_due', 'amount_receivable', 'amount_unearned', 'amount_earned', 'updated'])
bill_model.migrate_state(entity_slug=entity_slug, user_model=self.request.user, itemtxs_qs=itemtxs_qs, raise_exception=False)
messages.add_message(request, message=f'Items for Invoice {bill_model.bill_number} saved.', level=messages.SUCCESS, extra_tags='is-success')
return HttpResponseRedirect(redirect_to=reverse('django_ledger:bill-update', kwargs={'entity_slug': entity_slug, 'bill_pk': bill_pk}))
context = super(BillModelCreateView, self).get_context_data(**kwargs)
if self.for_purchase_order:
po_pk = self.kwargs['po_pk']
po_item_uuids_qry_param = self.request.GET.get('item_uuids')
if po_item_uuids_qry_param:
try:
po_item_uuids = po_item_uuids_qry_param.split(',')
except:
context = HttpResponseBadRequest()
else:
context = HttpResponseBadRequest()
po_qs = PurchaseOrderModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user).prefetch_related('itemtransactionmodel_set')
po_model: PurchaseOrderModel = get_object_or_404(po_qs, uuid__exact=po_pk)
po_itemtxs_qs = po_model.itemtransactionmodel_set.filter(bill_model__isnull=True, uuid__in=po_item_uuids)
context['po_model'] = po_model
context['po_itemtxs_qs'] = po_itemtxs_qs
form_action = reverse('django_ledger:bill-create-po', kwargs={'entity_slug': self.kwargs['entity_slug'], 'po_pk': po_model.uuid}) + f'?item_uuids={po_item_uuids_qry_param}'
elif self.for_estimate:
estimate_qs = EstimateModel.objects.for_entity(entity_slug=self.kwargs['entity_slug'], user_model=self.request.user)
estimate_uuid = self.kwargs['ce_pk']
estimate_model: EstimateModel = get_object_or_404(estimate_qs, uuid__exact=estimate_uuid)
form_action = reverse('django_ledger:bill-create-estimate', kwargs={'entity_slug': self.kwargs['entity_slug'], 'ce_pk': estimate_model.uuid})
else:
form_action = reverse('django_ledger:bill-create', kwargs={'entity_slug': self.kwargs['entity_slug']})
context['form_action_url'] = form_action
context = context
return self.render_to_response(context=context)
return super(BillModelUpdateView, self).post(request, **kwargs)
|
django-ledger
|
positive
|
def incoming(self, raw_data):
try:
data = json.loads(raw_data)
self.logger.info('incoming = \n%s' % json.dumps(data, indent=2))
if 'from' in data.get('message', {}):
self.message = data['message']
self.from_id = str(self.message['from']['id'])
elif 'forward_from' in data.get('channel_post', {}):
self.message = data['channel_post']
self.from_id = str(self.message['forward_from']['id'])
elif 'sender_chat' in data.get('channel_post', {}):
self.message = data['channel_post']
self.from_id = str(self.message['sender_chat']['id'])
else:
return
self.chat_id = str(self.message['chat']['id'])
self.chat_type = self.message['chat'].get('type')
<DeepExtract>
if not self.chat:
return
message_from = self.message.get('from', {})
for k in ('username', 'id'):
if k in message_from:
title = '@' + str(message_from[k])
if title != self.chat.title:
self.chat.title = title
self.chat.save()
break
names = []
for k in ('first_name', 'last_name'):
if k in message_from:
names.append(message_from[k])
name = ' '.join(names).strip()
if name != self.chat.name:
self.chat.name = name
self.chat.save()
</DeepExtract>
<DeepExtract>
for a in ('chat_', 'group_', 'coder_'):
if hasattr(self, a):
delattr(self, a)
</DeepExtract>
was_messaging = False
has_command = False
if 'text' in self.message:
text = self.message['text']
if text.startswith('/'):
has_command = True
for msg in self.execute_command(text):
<DeepExtract>
if not isinstance(msg, dict):
msg = {'text': msg}
if not msg['text']:
return
msg['text'] = fix_url_text(msg['text'])
if len(msg['text']) > self.MAX_LENGTH_MESSAGE:
msg['text'] = msg['text'][:self.MAX_LENGTH_MESSAGE - 3] + '...'
msg['chat_id'] = chat_id or self.from_id
msg['disable_web_page_preview'] = True
if reply_markup:
msg['reply_markup'] = reply_markup
if 'reply_markup' not in msg:
msg['reply_markup'] = telegram.ReplyKeyboardRemove()
chat_type = getattr(self, 'chat_type', None)
if reply_markup is False or chat_type is None or chat_type in ['group', 'supergroup', 'channel']:
msg.pop('reply_markup', None)
try:
ret = self.sendMessage(parse_mode='Markdown', **msg)
except telegram.error.Unauthorized as e:
raise e
except Exception as e:
self.logger.warning(f'message = {msg}')
self.logger.error(f'Exception send message {e}')
ret = self.sendMessage(**msg)
return ret
</DeepExtract>
was_messaging = True
if not has_command and self.chat and self.chat.settings.get('_forwarding'):
self.forwardMessage(chat_id=self.chat.settings.get('_forwarding'), from_chat_id=self.from_id, message_id=self.message['message_id'])
if not self.coder and was_messaging:
<DeepExtract>
if not isinstance(f'Follow {self.follow_url} to connect your account.', dict):
f'Follow {self.follow_url} to connect your account.' = {'text': f'Follow {self.follow_url} to connect your account.'}
if not f'Follow {self.follow_url} to connect your account.'['text']:
return
f'Follow {self.follow_url} to connect your account.'['text'] = fix_url_text(f'Follow {self.follow_url} to connect your account.'['text'])
if len(f'Follow {self.follow_url} to connect your account.'['text']) > self.MAX_LENGTH_MESSAGE:
f'Follow {self.follow_url} to connect your account.'['text'] = f'Follow {self.follow_url} to connect your account.'['text'][:self.MAX_LENGTH_MESSAGE - 3] + '...'
f'Follow {self.follow_url} to connect your account.'['chat_id'] = chat_id or self.from_id
f'Follow {self.follow_url} to connect your account.'['disable_web_page_preview'] = True
if reply_markup:
f'Follow {self.follow_url} to connect your account.'['reply_markup'] = reply_markup
if 'reply_markup' not in f'Follow {self.follow_url} to connect your account.':
f'Follow {self.follow_url} to connect your account.'['reply_markup'] = telegram.ReplyKeyboardRemove()
chat_type = getattr(self, 'chat_type', None)
if reply_markup is False or chat_type is None or chat_type in ['group', 'supergroup', 'channel']:
f'Follow {self.follow_url} to connect your account.'.pop('reply_markup', None)
try:
ret = self.sendMessage(parse_mode='Markdown', **f'Follow {self.follow_url} to connect your account.')
except telegram.error.Unauthorized as e:
raise e
except Exception as e:
self.logger.warning(f"message = {f'Follow {self.follow_url} to connect your account.'}")
self.logger.error(f'Exception send message {e}')
ret = self.sendMessage(**f'Follow {self.follow_url} to connect your account.')
return ret
</DeepExtract>
else:
if self.coder and self.coder.settings.get('telegram', {}).get('unauthorized', False):
self.coder.settings.setdefault('telegram', {})['unauthorized'] = False
self.coder.save()
chat = self.group or self.chat
if chat:
History.objects.create(chat=chat, message=data).save()
except Exception as e:
self.logger.info('Exception incoming message:\n%s\n%s' % (format_exc(), raw_data))
self.logger.error(f'Exception incoming message: {e}')
try:
self.sendMessage(self.ADMIN_CHAT_ID, 'What need from me?', reply_to_message_id=self.message['message_id'])
except Exception:
pass
if hasattr(self, 'from_id'):
self.sendMessage(self.from_id, 'Thanks, but I do not know what I should do about it.')
|
def incoming(self, raw_data):
try:
data = json.loads(raw_data)
self.logger.info('incoming = \n%s' % json.dumps(data, indent=2))
if 'from' in data.get('message', {}):
self.message = data['message']
self.from_id = str(self.message['from']['id'])
elif 'forward_from' in data.get('channel_post', {}):
self.message = data['channel_post']
self.from_id = str(self.message['forward_from']['id'])
elif 'sender_chat' in data.get('channel_post', {}):
self.message = data['channel_post']
self.from_id = str(self.message['sender_chat']['id'])
else:
return
self.chat_id = str(self.message['chat']['id'])
self.chat_type = self.message['chat'].get('type')
if not self.chat:
return
message_from = self.message.get('from', {})
for k in ('username', 'id'):
if k in message_from:
title = '@' + str(message_from[k])
if title != self.chat.title:
self.chat.title = title
self.chat.save()
break
names = []
for k in ('first_name', 'last_name'):
if k in message_from:
names.append(message_from[k])
name = ' '.join(names).strip()
if name != self.chat.name:
self.chat.name = name
self.chat.save()
for a in ('chat_', 'group_', 'coder_'):
if hasattr(self, a):
delattr(self, a)
was_messaging = False
has_command = False
if 'text' in self.message:
text = self.message['text']
if text.startswith('/'):
has_command = True
for msg in self.execute_command(text):
if not isinstance(msg, dict):
msg = {'text': msg}
if not msg['text']:
return
msg['text'] = fix_url_text(msg['text'])
if len(msg['text']) > self.MAX_LENGTH_MESSAGE:
msg['text'] = msg['text'][:self.MAX_LENGTH_MESSAGE - 3] + '...'
msg['chat_id'] = chat_id or self.from_id
msg['disable_web_page_preview'] = True
if reply_markup:
msg['reply_markup'] = reply_markup
if 'reply_markup' not in msg:
msg['reply_markup'] = telegram.ReplyKeyboardRemove()
chat_type = getattr(self, 'chat_type', None)
if reply_markup is False or chat_type is None or chat_type in ['group', 'supergroup', 'channel']:
msg.pop('reply_markup', None)
try:
ret = self.sendMessage(parse_mode='Markdown', **msg)
except telegram.error.Unauthorized as e:
raise e
except Exception as e:
self.logger.warning(f'message = {msg}')
self.logger.error(f'Exception send message {e}')
ret = self.sendMessage(**msg)
return ret
was_messaging = True
if not has_command and self.chat and self.chat.settings.get('_forwarding'):
self.forwardMessage(chat_id=self.chat.settings.get('_forwarding'), from_chat_id=self.from_id, message_id=self.message['message_id'])
if not self.coder and was_messaging:
if not isinstance(f'Follow {self.follow_url} to connect your account.', dict):
f'Follow {self.follow_url} to connect your account.' = {'text': f'Follow {self.follow_url} to connect your account.'}
if not f'Follow {self.follow_url} to connect your account.'['text']:
return
f'Follow {self.follow_url} to connect your account.'['text'] = fix_url_text(f'Follow {self.follow_url} to connect your account.'['text'])
if len(f'Follow {self.follow_url} to connect your account.'['text']) > self.MAX_LENGTH_MESSAGE:
f'Follow {self.follow_url} to connect your account.'['text'] = f'Follow {self.follow_url} to connect your account.'['text'][:self.MAX_LENGTH_MESSAGE - 3] + '...'
f'Follow {self.follow_url} to connect your account.'['chat_id'] = chat_id or self.from_id
f'Follow {self.follow_url} to connect your account.'['disable_web_page_preview'] = True
if reply_markup:
f'Follow {self.follow_url} to connect your account.'['reply_markup'] = reply_markup
if 'reply_markup' not in f'Follow {self.follow_url} to connect your account.':
f'Follow {self.follow_url} to connect your account.'['reply_markup'] = telegram.ReplyKeyboardRemove()
chat_type = getattr(self, 'chat_type', None)
if reply_markup is False or chat_type is None or chat_type in ['group', 'supergroup', 'channel']:
f'Follow {self.follow_url} to connect your account.'.pop('reply_markup', None)
try:
ret = self.sendMessage(parse_mode='Markdown', **f'Follow {self.follow_url} to connect your account.')
except telegram.error.Unauthorized as e:
raise e
except Exception as e:
self.logger.warning(f"message = {f'Follow {self.follow_url} to connect your account.'}")
self.logger.error(f'Exception send message {e}')
ret = self.sendMessage(**f'Follow {self.follow_url} to connect your account.')
return ret
else:
if self.coder and self.coder.settings.get('telegram', {}).get('unauthorized', False):
self.coder.settings.setdefault('telegram', {})['unauthorized'] = False
self.coder.save()
chat = self.group or self.chat
if chat:
History.objects.create(chat=chat, message=data).save()
except Exception as e:
self.logger.info('Exception incoming message:\n%s\n%s' % (format_exc(), raw_data))
self.logger.error(f'Exception incoming message: {e}')
try:
self.sendMessage(self.ADMIN_CHAT_ID, 'What need from me?', reply_to_message_id=self.message['message_id'])
except Exception:
pass
if hasattr(self, 'from_id'):
self.sendMessage(self.from_id, 'Thanks, but I do not know what I should do about it.')
|
clist
|
positive
|
def get_featured_examples(config, examples, meta, data_type, emb_dicts, related_words_ids_mat, related_words_dict=None):
"""
Given spaCy processed examples, we further get featured examples
using different functions to get different features.
"""
print('Processing {} examples...'.format(data_type))
total = 0
total_ = 0
examples_with_features = []
for example in tqdm(examples):
total_ += 1
if filter_example(config, example, 'train'):
continue
total += 1
<DeepExtract>
fields = ['ques', 'ans_sent']
fields_cp_token_set = {'ques': set(example['ans_sent_tokens']), 'ans_sent': set(example['ques_tokens'])}
length_limits = {'ques': config.ques_limit, 'answer': config.ans_limit, 'ans_sent': config.sent_limit, 'word': config.char_limit, 'bpe': config.bpe_limit}
tags = config.emb_config.keys()
for field in fields:
(start, end) = (example['y1_in_sent'], example['y2_in_sent'])
for tag in tags:
field_id = field + '_' + tag + '_ids'
field_tag = field + '_' + tag
field_length = len(example[field + '_tokens'])
if tag == 'word':
example[field_id] = spacydoc2wids(example[field + '_doc'], emb_dicts[tag], length_limits[field])
elif tag == 'char':
example[field_id] = spacydoc2cids(example[field + '_doc'], emb_dicts[tag], length_limits[field], length_limits['word'])
elif tag == 'is_overlap':
example[field_tag] = spacydoc2is_overlap(example[field + '_doc'], fields_cp_token_set[field], length_limits[field], lower=True)
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
elif tag == 'bpe':
example[field_id] = spacydoc2bpeids(example[field + '_doc'], emb_dicts[tag], length_limits[field], length_limits['bpe'])
elif tag == 'answer_iob':
if field == 'ans_sent':
example[field_tag] = get_answer_iob(field_length, start, end)
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
elif tag in ['pos', 'ner', 'iob', 'dep']:
example[field_id] = spacydoc2tagids(example[field + '_doc'], tag, emb_dicts[tag], length_limits[field])
elif tag in ['is_alpha', 'is_ascii', 'is_digit', 'is_lower', 'is_title', 'is_punct', 'is_left_punct', 'is_right_punct', 'is_bracket', 'is_quote', 'is_currency', 'is_stop', 'like_url', 'like_num', 'like_email']:
example[field_tag] = spacydoc2features(example[field + '_doc'], tag, length_limits[field])
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
else:
pass
example['tgt_tokens'] = [x.lower() for x in example['ques_tokens'][1:-1]]
example['src_tokens'] = [x.lower() for x in example['ans_sent_tokens']]
example = example
</DeepExtract>
<DeepExtract>
(tgt, switch, copy_position, switch_oov, copy_position_oov, switch_soft, copy_position_soft, input_copied_hard_soft) = get_copy_labels(example['ans_sent_tokens'], example['ques_tokens'], config.sent_limit, config.ques_limit, emb_dicts['word'], related_words_ids_mat)
example['switch'] = switch
example['copy_position'] = copy_position
example['tgt'] = tgt
example['switch_oov'] = switch_oov
example['copy_position_oov'] = copy_position_oov
example['switch_soft'] = switch_soft
example['copy_position_soft'] = copy_position_soft
example['input_copied_hard_soft'] = input_copied_hard_soft
example = example
</DeepExtract>
<DeepExtract>
example['ans_sent_is_content'] = get_content_ids(example['ans_sent_doc'], FUNCTION_WORDS_LIST, config.sent_limit)
example['ques_is_content'] = get_content_ids(example['ques_doc'], FUNCTION_WORDS_LIST, config.ques_limit)
example['ans_sent_is_clue_hard'] = (example['ans_sent_is_overlap'] + example['ans_sent_is_content'] == 2.0).astype(float)
example['ans_sent_is_content_ids'] = feature2ids(example['ans_sent_is_content'], emb_dicts['is_content'], len(example['ans_sent_doc']), config.sent_limit)
example['ques_is_content_ids'] = feature2ids(example['ques_is_content'], emb_dicts['is_content'], len(example['ques_doc']), config.ques_limit)
example['ans_sent_is_clue_hard_ids'] = feature2ids(example['ans_sent_is_clue_hard'], emb_dicts['is_clue_hard'], len(example['ans_sent_doc']), config.sent_limit)
example = example
</DeepExtract>
examples_with_features.append(example)
print('Built {} / {} instances of features in total'.format(total, total_))
meta['num_q_filtered'] = total
return (examples_with_features, meta)
|
def get_featured_examples(config, examples, meta, data_type, emb_dicts, related_words_ids_mat, related_words_dict=None):
"""
Given spaCy processed examples, we further get featured examples
using different functions to get different features.
"""
print('Processing {} examples...'.format(data_type))
total = 0
total_ = 0
examples_with_features = []
for example in tqdm(examples):
total_ += 1
if filter_example(config, example, 'train'):
continue
total += 1
fields = ['ques', 'ans_sent']
fields_cp_token_set = {'ques': set(example['ans_sent_tokens']), 'ans_sent': set(example['ques_tokens'])}
length_limits = {'ques': config.ques_limit, 'answer': config.ans_limit, 'ans_sent': config.sent_limit, 'word': config.char_limit, 'bpe': config.bpe_limit}
tags = config.emb_config.keys()
for field in fields:
(start, end) = (example['y1_in_sent'], example['y2_in_sent'])
for tag in tags:
field_id = field + '_' + tag + '_ids'
field_tag = field + '_' + tag
field_length = len(example[field + '_tokens'])
if tag == 'word':
example[field_id] = spacydoc2wids(example[field + '_doc'], emb_dicts[tag], length_limits[field])
elif tag == 'char':
example[field_id] = spacydoc2cids(example[field + '_doc'], emb_dicts[tag], length_limits[field], length_limits['word'])
elif tag == 'is_overlap':
example[field_tag] = spacydoc2is_overlap(example[field + '_doc'], fields_cp_token_set[field], length_limits[field], lower=True)
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
elif tag == 'bpe':
example[field_id] = spacydoc2bpeids(example[field + '_doc'], emb_dicts[tag], length_limits[field], length_limits['bpe'])
elif tag == 'answer_iob':
if field == 'ans_sent':
example[field_tag] = get_answer_iob(field_length, start, end)
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
elif tag in ['pos', 'ner', 'iob', 'dep']:
example[field_id] = spacydoc2tagids(example[field + '_doc'], tag, emb_dicts[tag], length_limits[field])
elif tag in ['is_alpha', 'is_ascii', 'is_digit', 'is_lower', 'is_title', 'is_punct', 'is_left_punct', 'is_right_punct', 'is_bracket', 'is_quote', 'is_currency', 'is_stop', 'like_url', 'like_num', 'like_email']:
example[field_tag] = spacydoc2features(example[field + '_doc'], tag, length_limits[field])
example[field_id] = feature2ids(example[field_tag], emb_dicts[tag], field_length, length_limits[field])
else:
pass
example['tgt_tokens'] = [x.lower() for x in example['ques_tokens'][1:-1]]
example['src_tokens'] = [x.lower() for x in example['ans_sent_tokens']]
example = example
(tgt, switch, copy_position, switch_oov, copy_position_oov, switch_soft, copy_position_soft, input_copied_hard_soft) = get_copy_labels(example['ans_sent_tokens'], example['ques_tokens'], config.sent_limit, config.ques_limit, emb_dicts['word'], related_words_ids_mat)
example['switch'] = switch
example['copy_position'] = copy_position
example['tgt'] = tgt
example['switch_oov'] = switch_oov
example['copy_position_oov'] = copy_position_oov
example['switch_soft'] = switch_soft
example['copy_position_soft'] = copy_position_soft
example['input_copied_hard_soft'] = input_copied_hard_soft
example = example
example['ans_sent_is_content'] = get_content_ids(example['ans_sent_doc'], FUNCTION_WORDS_LIST, config.sent_limit)
example['ques_is_content'] = get_content_ids(example['ques_doc'], FUNCTION_WORDS_LIST, config.ques_limit)
example['ans_sent_is_clue_hard'] = (example['ans_sent_is_overlap'] + example['ans_sent_is_content'] == 2.0).astype(float)
example['ans_sent_is_content_ids'] = feature2ids(example['ans_sent_is_content'], emb_dicts['is_content'], len(example['ans_sent_doc']), config.sent_limit)
example['ques_is_content_ids'] = feature2ids(example['ques_is_content'], emb_dicts['is_content'], len(example['ques_doc']), config.ques_limit)
example['ans_sent_is_clue_hard_ids'] = feature2ids(example['ans_sent_is_clue_hard'], emb_dicts['is_clue_hard'], len(example['ans_sent_doc']), config.sent_limit)
example = example
examples_with_features.append(example)
print('Built {} / {} instances of features in total'.format(total, total_))
meta['num_q_filtered'] = total
return (examples_with_features, meta)
|
ACS-QG
|
positive
|
def visit_section_title(self, node):
node.eff_level = min(self.section_level, 6) - 1
(beforeskip, afterskip, font) = self.section_params[node.eff_level]
section = node.parent
<DeepExtract>
if self.vspace == 1999:
self.vspace = 0
if self.vspace:
self.nl()
self.cmd('\\vs{%d}\n' % self.vspace)
self.vspace = 0
self.noindent()
</DeepExtract>
<DeepExtract>
if isinstance('\\penalty-300%\n', six.string_types):
'\\penalty-300%\n' = ['\\penalty-300%\n']
for c in '\\penalty-300%\n':
if c:
self.context.append(c)
self.last_output_char = c[-1]
</DeepExtract>
<DeepExtract>
ids = section['ids']
refid = section.get('refid')
if refid is not None:
ids.append(refid)
for id_ in ids:
self.cmd('\\label{%s}%%\n' % id_)
self.cmd('\\hypertarget{%s}{}%%\n' % id_)
</DeepExtract>
<DeepExtract>
if isinstance('%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font), six.string_types):
'%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font) = ['%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font)]
for c in '%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font):
if c:
self.context.append(c)
self.last_output_char = c[-1]
</DeepExtract>
|
def visit_section_title(self, node):
node.eff_level = min(self.section_level, 6) - 1
(beforeskip, afterskip, font) = self.section_params[node.eff_level]
section = node.parent
if self.vspace == 1999:
self.vspace = 0
if self.vspace:
self.nl()
self.cmd('\\vs{%d}\n' % self.vspace)
self.vspace = 0
self.noindent()
if isinstance('\\penalty-300%\n', six.string_types):
'\\penalty-300%\n' = ['\\penalty-300%\n']
for c in '\\penalty-300%\n':
if c:
self.context.append(c)
self.last_output_char = c[-1]
ids = section['ids']
refid = section.get('refid')
if refid is not None:
ids.append(refid)
for id_ in ids:
self.cmd('\\label{%s}%%\n' % id_)
self.cmd('\\hypertarget{%s}{}%%\n' % id_)
if isinstance('%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font), six.string_types):
'%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font) = ['%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font)]
for c in '%s\n{\\noindent\\interlinepenalty \\@M%s%%\n' % (beforeskip, font):
if c:
self.context.append(c)
self.last_output_char = c[-1]
</DeepExtract>
|
ebookmaker
|
positive
|
def calculate_jobs(options):
def range_f(begin, end, step):
seq = []
while True:
if step > 0 and begin > end:
break
if step < 0 and begin < end:
break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1:
return seq
mid = int(n / 2)
<DeepExtract>
n = len(seq[:mid])
if n <= 1:
left = seq[:mid]
mid = int(n / 2)
left = permute_sequence(seq[:mid][:mid])
right = permute_sequence(seq[:mid][mid + 1:])
ret = [seq[:mid][mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
left = ret
</DeepExtract>
<DeepExtract>
n = len(seq[mid + 1:])
if n <= 1:
right = seq[mid + 1:]
mid = int(n / 2)
left = permute_sequence(seq[mid + 1:][:mid])
right = permute_sequence(seq[mid + 1:][mid + 1:])
ret = [seq[mid + 1:][mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
right = ret
</DeepExtract>
ret = [seq[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
return ret
<DeepExtract>
n = len(range_f(options.c_begin, options.c_end, options.c_step))
if n <= 1:
c_seq = range_f(options.c_begin, options.c_end, options.c_step)
mid = int(n / 2)
left = permute_sequence(range_f(options.c_begin, options.c_end, options.c_step)[:mid])
right = permute_sequence(range_f(options.c_begin, options.c_end, options.c_step)[mid + 1:])
ret = [range_f(options.c_begin, options.c_end, options.c_step)[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
c_seq = ret
</DeepExtract>
<DeepExtract>
n = len(range_f(options.g_begin, options.g_end, options.g_step))
if n <= 1:
g_seq = range_f(options.g_begin, options.g_end, options.g_step)
mid = int(n / 2)
left = permute_sequence(range_f(options.g_begin, options.g_end, options.g_step)[:mid])
right = permute_sequence(range_f(options.g_begin, options.g_end, options.g_step)[mid + 1:])
ret = [range_f(options.g_begin, options.g_end, options.g_step)[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
g_seq = ret
</DeepExtract>
if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
(i, j) = (0, 0)
jobs = []
while i < nr_c or j < nr_g:
if i / nr_c < j / nr_g:
line = []
for k in range(0, j):
line.append((c_seq[i], g_seq[k]))
i = i + 1
jobs.append(line)
else:
line = []
for k in range(0, i):
line.append((c_seq[k], g_seq[j]))
j = j + 1
jobs.append(line)
resumed_jobs = {}
if options.resume_pathname is None:
return (jobs, resumed_jobs)
for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall('rate=([0-9.]+)', line)
if not rst:
continue
rate = float(rst[0])
(c, g) = (None, None)
rst = re.findall('log2c=([0-9.-]+)', line)
if rst:
c = float(rst[0])
rst = re.findall('log2g=([0-9.-]+)', line)
if rst:
g = float(rst[0])
resumed_jobs[c, g] = rate
return (jobs, resumed_jobs)
|
def calculate_jobs(options):
def range_f(begin, end, step):
seq = []
while True:
if step > 0 and begin > end:
break
if step < 0 and begin < end:
break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1:
return seq
mid = int(n / 2)
n = len(seq[:mid])
if n <= 1:
left = seq[:mid]
mid = int(n / 2)
left = permute_sequence(seq[:mid][:mid])
right = permute_sequence(seq[:mid][mid + 1:])
ret = [seq[:mid][mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
left = ret
n = len(seq[mid + 1:])
if n <= 1:
right = seq[mid + 1:]
mid = int(n / 2)
left = permute_sequence(seq[mid + 1:][:mid])
right = permute_sequence(seq[mid + 1:][mid + 1:])
ret = [seq[mid + 1:][mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
right = ret
ret = [seq[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
return ret
n = len(range_f(options.c_begin, options.c_end, options.c_step))
if n <= 1:
c_seq = range_f(options.c_begin, options.c_end, options.c_step)
mid = int(n / 2)
left = permute_sequence(range_f(options.c_begin, options.c_end, options.c_step)[:mid])
right = permute_sequence(range_f(options.c_begin, options.c_end, options.c_step)[mid + 1:])
ret = [range_f(options.c_begin, options.c_end, options.c_step)[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
c_seq = ret
n = len(range_f(options.g_begin, options.g_end, options.g_step))
if n <= 1:
g_seq = range_f(options.g_begin, options.g_end, options.g_step)
mid = int(n / 2)
left = permute_sequence(range_f(options.g_begin, options.g_end, options.g_step)[:mid])
right = permute_sequence(range_f(options.g_begin, options.g_end, options.g_step)[mid + 1:])
ret = [range_f(options.g_begin, options.g_end, options.g_step)[mid]]
while left or right:
if left:
ret.append(left.pop(0))
if right:
ret.append(right.pop(0))
g_seq = ret
if not options.grid_with_c:
c_seq = [None]
if not options.grid_with_g:
g_seq = [None]
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
(i, j) = (0, 0)
jobs = []
while i < nr_c or j < nr_g:
if i / nr_c < j / nr_g:
line = []
for k in range(0, j):
line.append((c_seq[i], g_seq[k]))
i = i + 1
jobs.append(line)
else:
line = []
for k in range(0, i):
line.append((c_seq[k], g_seq[j]))
j = j + 1
jobs.append(line)
resumed_jobs = {}
if options.resume_pathname is None:
return (jobs, resumed_jobs)
for line in open(options.resume_pathname, 'r'):
line = line.strip()
rst = re.findall('rate=([0-9.]+)', line)
if not rst:
continue
rate = float(rst[0])
(c, g) = (None, None)
rst = re.findall('log2c=([0-9.-]+)', line)
if rst:
c = float(rst[0])
rst = re.findall('log2g=([0-9.-]+)', line)
if rst:
g = float(rst[0])
resumed_jobs[c, g] = rate
return (jobs, resumed_jobs)
|
BeautifyBasedOnGAN
|
positive
|
def __init__(self):
np.set_printoptions(precision=2)
self.game_mode = 'no_bias'
self.name = 'Crossgap-v0'
self.sim_times = 0
self.rad2deg = 180.0 / math.pi
self.coommand_last_time = 0.5
self.if_debug = 1
try:
os.mkdir('./save_img/')
except Exception as e:
pass
<DeepExtract>
version = 'V1.0'
desc = 'Add crossgap_env'
print(colorize('===========================================================', 'red'))
print(colorize('This is %s' % self.name, 'white'))
print(colorize('===== Version %s=====' % version, 'yellow'))
print(colorize('===== Desc %s=====' % desc, 'yellow'))
print(colorize('Observation_29 = [current_pos_3, current_spd_3, current_acc_3, current_rot_3, t_1, cross_spd_1, pos_err_3, spd_start_3, acc_start_3, spd_end_3, acc_end_3]', 'blue'))
print(colorize('Action_4 = [roll, pitch, yaw, throttle]', 'magenta'))
print(colorize('===========================================================', 'red'))
</DeepExtract>
self.transform_R_world_to_ned = np.matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]]).T
self.control_amp = 1
self.Kp = self.control_amp * 6 * np.eye(3, 3)
self.Kv = self.control_amp * 6 * np.eye(3, 3)
self.start_pos = [-10, 10, 10]
self.pos_bias = np.matrix([[self.start_pos[0]], [self.start_pos[1]], [self.start_pos[2]]])
plane_size = -0.0
rad2angle = 180.0 / math.pi
self.narrow_gap = narrow_gap([10 - plane_size, 0, 5 - plane_size], [0 / rad2angle, 0 / rad2angle, -30 / rad2angle])
self.narrow_gap.para_cross_spd = 4.0
self.narrow_gap.cross_ballistic_trajectory(if_draw=0)
self.narrow_gap.approach_trajectory(self.start_pos, [0, 0, 0], [0, 0, 0], if_draw=0)
self.need_replan = 1
max_angle = 30.0
action_low_bound = np.array([-max_angle / self.rad2deg, -max_angle / self.rad2deg, -max_angle / self.rad2deg, 0])
action_up_bound = np.array([max_angle / self.rad2deg, max_angle / self.rad2deg, max_angle / self.rad2deg, 1])
max_time = 6
max_cross_spd = 10
max_obs_pos = 20.0
max_obs_spd = 20.0
max_obs_acc = 20.0
max_obs_euler = 180.0
obs_up_bound = np.array([max_obs_pos, max_obs_pos, max_obs_pos, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc, max_obs_euler, max_obs_euler, max_obs_euler, max_time, max_cross_spd, max_obs_pos, max_obs_pos, max_obs_pos, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc])
obs_low_bound = -obs_up_bound
self.sum_reward = 0.0
self.action_space = spaces.Box(low=action_low_bound, high=action_up_bound, dtype=np.float32)
self.observation_space = spaces.Box(low=obs_low_bound, high=obs_up_bound, dtype=np.float32)
self.observation = np.zeros(29, dtype=np.float32)
self.if_log = 0
self.log_str = ''
self.observation_log = np.zeros([10000, 29], dtype=np.float32)
self.action_log = np.zeros([10000, 4], dtype=np.float32)
self.reward_log = np.zeros([10000, 1], dtype=np.float32)
self.last_acc = np.matrix([0, 0, 0]).T
self.log_idx = 0
self.last_response_time = cv2.getTickCount()
self.last_ctrl = np.matrix([0, 0, 0, 0])
self.current_ctrl = np.matrix([0, 0, 0, 0])
try:
self.log_txt = open('c:/deep_drone_log.txt', 'w')
except:
self.log_txt = open('./deep_drone_log.txt', 'w')
self.first_run = 1
self.trajectory_start_t = 0
<DeepExtract>
self.client = airsim.MultirotorClient(ip='127.0.0.1', timeout_value=5)
self.initialized = 0
try:
self.client.enableApiControl(True)
self.client.armDisarm(True)
self.client.reset()
self.client.enableApiControl(True)
self.client.takeoffAsync(1)
self.client.hoverAsync()
self.initialized = 1
self.air_sim_vehicle_pose = self.client.simGetVehiclePose()
except Exception as e:
print(e)
print(colorize('===== Connect error ====', 'red'))
time.sleep(5)
self.initialized = 0
if self.initialized == 1:
self.get_state()
else:
self.init_airsim_client()
</DeepExtract>
<DeepExtract>
del self.client
self.init_airsim_client()
try:
self.client.enableApiControl(True)
self.client.reset()
if self.first_run == 1:
self.first_run = 0
self.client.enableApiControl(True)
self.client.armDisarm(True)
self.client.enableApiControl(True)
self.client.takeoffAsync(1)
self.client.hoverAsync()
self.get_state()
self.need_replan = 1
except Exception as e:
print(e)
print(colorize('===== Error in reset =====', 'red'))
self.init_airsim_client()
self.reset()
if self.if_log:
self.plot_log()
self.log_idx = 0
self.step_count = 0
self.sum_reward = 0.0
self.sim_times = self.sim_times + 1
self.client.moveByAngleThrottleAsync(0, 0, 0.6, 0, 300000000.0)
time.sleep(1)
return np.array(self.state)
</DeepExtract>
self.if_log = 1
|
def __init__(self):
np.set_printoptions(precision=2)
self.game_mode = 'no_bias'
self.name = 'Crossgap-v0'
self.sim_times = 0
self.rad2deg = 180.0 / math.pi
self.coommand_last_time = 0.5
self.if_debug = 1
try:
os.mkdir('./save_img/')
except Exception as e:
pass
version = 'V1.0'
desc = 'Add crossgap_env'
print(colorize('===========================================================', 'red'))
print(colorize('This is %s' % self.name, 'white'))
print(colorize('===== Version %s=====' % version, 'yellow'))
print(colorize('===== Desc %s=====' % desc, 'yellow'))
print(colorize('Observation_29 = [current_pos_3, current_spd_3, current_acc_3, current_rot_3, t_1, cross_spd_1, pos_err_3, spd_start_3, acc_start_3, spd_end_3, acc_end_3]', 'blue'))
print(colorize('Action_4 = [roll, pitch, yaw, throttle]', 'magenta'))
print(colorize('===========================================================', 'red'))
self.transform_R_world_to_ned = np.matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]]).T
self.control_amp = 1
self.Kp = self.control_amp * 6 * np.eye(3, 3)
self.Kv = self.control_amp * 6 * np.eye(3, 3)
self.start_pos = [-10, 10, 10]
self.pos_bias = np.matrix([[self.start_pos[0]], [self.start_pos[1]], [self.start_pos[2]]])
plane_size = -0.0
rad2angle = 180.0 / math.pi
self.narrow_gap = narrow_gap([10 - plane_size, 0, 5 - plane_size], [0 / rad2angle, 0 / rad2angle, -30 / rad2angle])
self.narrow_gap.para_cross_spd = 4.0
self.narrow_gap.cross_ballistic_trajectory(if_draw=0)
self.narrow_gap.approach_trajectory(self.start_pos, [0, 0, 0], [0, 0, 0], if_draw=0)
self.need_replan = 1
max_angle = 30.0
action_low_bound = np.array([-max_angle / self.rad2deg, -max_angle / self.rad2deg, -max_angle / self.rad2deg, 0])
action_up_bound = np.array([max_angle / self.rad2deg, max_angle / self.rad2deg, max_angle / self.rad2deg, 1])
max_time = 6
max_cross_spd = 10
max_obs_pos = 20.0
max_obs_spd = 20.0
max_obs_acc = 20.0
max_obs_euler = 180.0
obs_up_bound = np.array([max_obs_pos, max_obs_pos, max_obs_pos, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc, max_obs_euler, max_obs_euler, max_obs_euler, max_time, max_cross_spd, max_obs_pos, max_obs_pos, max_obs_pos, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc, max_obs_spd, max_obs_spd, max_obs_spd, max_obs_acc, max_obs_acc, max_obs_acc])
obs_low_bound = -obs_up_bound
self.sum_reward = 0.0
self.action_space = spaces.Box(low=action_low_bound, high=action_up_bound, dtype=np.float32)
self.observation_space = spaces.Box(low=obs_low_bound, high=obs_up_bound, dtype=np.float32)
self.observation = np.zeros(29, dtype=np.float32)
self.if_log = 0
self.log_str = ''
self.observation_log = np.zeros([10000, 29], dtype=np.float32)
self.action_log = np.zeros([10000, 4], dtype=np.float32)
self.reward_log = np.zeros([10000, 1], dtype=np.float32)
self.last_acc = np.matrix([0, 0, 0]).T
self.log_idx = 0
self.last_response_time = cv2.getTickCount()
self.last_ctrl = np.matrix([0, 0, 0, 0])
self.current_ctrl = np.matrix([0, 0, 0, 0])
try:
self.log_txt = open('c:/deep_drone_log.txt', 'w')
except:
self.log_txt = open('./deep_drone_log.txt', 'w')
self.first_run = 1
self.trajectory_start_t = 0
self.client = airsim.MultirotorClient(ip='127.0.0.1', timeout_value=5)
self.initialized = 0
try:
self.client.enableApiControl(True)
self.client.armDisarm(True)
self.client.reset()
self.client.enableApiControl(True)
self.client.takeoffAsync(1)
self.client.hoverAsync()
self.initialized = 1
self.air_sim_vehicle_pose = self.client.simGetVehiclePose()
except Exception as e:
print(e)
print(colorize('===== Connect error ====', 'red'))
time.sleep(5)
self.initialized = 0
if self.initialized == 1:
self.get_state()
else:
self.init_airsim_client()
del self.client
self.init_airsim_client()
try:
self.client.enableApiControl(True)
self.client.reset()
if self.first_run == 1:
self.first_run = 0
self.client.enableApiControl(True)
self.client.armDisarm(True)
self.client.enableApiControl(True)
self.client.takeoffAsync(1)
self.client.hoverAsync()
self.get_state()
self.need_replan = 1
except Exception as e:
print(e)
print(colorize('===== Error in reset =====', 'red'))
self.init_airsim_client()
self.reset()
if self.if_log:
self.plot_log()
self.log_idx = 0
self.step_count = 0
self.sum_reward = 0.0
self.sim_times = self.sim_times + 1
self.client.moveByAngleThrottleAsync(0, 0, 0.6, 0, 300000000.0)
time.sleep(1)
return np.array(self.state)
self.if_log = 1
|
crossgap_il_rl
|
positive
|
@transaction.commit_manually
def handle(self, csvpath, *args, **options):
loader = ContributionLoader(source=options.get('source'), description='load from denormalized CSVs', imported_by='loadcontributions.py (%s)' % os.getenv('LOGNAME', 'unknown'))
try:
input_iterator = VerifiedCSVSource(open(os.path.abspath(csvpath)), FIELDNAMES, skiprows=1 + int(options['skip']))
output_func = chain_filters(LoaderEmitter(loader), Every(self.COMMIT_FREQUENCY, progress_tick), Every(self.COMMIT_FREQUENCY, lambda i: reset_queries()))
<DeepExtract>
record_processor = chain_filters(CSVFieldVerifier(), FieldRemover('id'), FieldRemover('import_reference'), FieldAdder('import_reference', loader.import_session), FieldModifier('amount', lambda a: Decimal(str(a))), FieldModifier(['cycle'], parse_int), FieldModifier(['date'], parse_date), BooleanFilter('is_amendment'), UnicodeFilter(), StringLengthFilter(Contribution))
</DeepExtract>
load_data(input_iterator, record_processor, output_func)
transaction.commit()
except KeyboardInterrupt:
traceback.print_exception(*sys.exc_info())
transaction.rollback()
raise
except:
traceback.print_exception(*sys.exc_info())
transaction.rollback()
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
|
@transaction.commit_manually
def handle(self, csvpath, *args, **options):
loader = ContributionLoader(source=options.get('source'), description='load from denormalized CSVs', imported_by='loadcontributions.py (%s)' % os.getenv('LOGNAME', 'unknown'))
try:
input_iterator = VerifiedCSVSource(open(os.path.abspath(csvpath)), FIELDNAMES, skiprows=1 + int(options['skip']))
output_func = chain_filters(LoaderEmitter(loader), Every(self.COMMIT_FREQUENCY, progress_tick), Every(self.COMMIT_FREQUENCY, lambda i: reset_queries()))
record_processor = chain_filters(CSVFieldVerifier(), FieldRemover('id'), FieldRemover('import_reference'), FieldAdder('import_reference', loader.import_session), FieldModifier('amount', lambda a: Decimal(str(a))), FieldModifier(['cycle'], parse_int), FieldModifier(['date'], parse_date), BooleanFilter('is_amendment'), UnicodeFilter(), StringLengthFilter(Contribution))
load_data(input_iterator, record_processor, output_func)
transaction.commit()
except KeyboardInterrupt:
traceback.print_exception(*sys.exc_info())
transaction.rollback()
raise
except:
traceback.print_exception(*sys.exc_info())
transaction.rollback()
raise
finally:
sys.stdout.flush()
sys.stderr.flush()
|
datacommons
|
positive
|
def test_execute_bp_cd_missing(tmp_path: pathlib.Path) -> None:
"""Test execute bp cd missing."""
<DeepExtract>
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
</DeepExtract>
section['component-definition'] = 'tests/data/csv/component-definitions/foobar/component-definition.json'
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.FAILURE
|
def test_execute_bp_cd_missing(tmp_path: pathlib.Path) -> None:
"""Test execute bp cd missing."""
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
section['component-definition'] = 'tests/data/csv/component-definitions/foobar/component-definition.json'
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.FAILURE
|
compliance-trestle
|
positive
|
def get_final_preds(config, batch_heatmaps, center, scale):
<DeepExtract>
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = preds[:, :, 0] % width
preds[:, :, 1] = np.floor(preds[:, :, 1] / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
(coords, maxvals) = (preds, maxvals)
</DeepExtract>
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
if config.TEST.POST_PROCESS:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
diff = np.array([hm[py][px + 1] - hm[py][px - 1], hm[py + 1][px] - hm[py - 1][px]])
coords[n][p] += np.sign(diff) * 0.25
preds = coords.copy()
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i], [heatmap_width, heatmap_height])
return (preds, maxvals)
|
def get_final_preds(config, batch_heatmaps, center, scale):
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = preds[:, :, 0] % width
preds[:, :, 1] = np.floor(preds[:, :, 1] / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
(coords, maxvals) = (preds, maxvals)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
if config.TEST.POST_PROCESS:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:
diff = np.array([hm[py][px + 1] - hm[py][px - 1], hm[py + 1][px] - hm[py - 1][px]])
coords[n][p] += np.sign(diff) * 0.25
preds = coords.copy()
for i in range(coords.shape[0]):
preds[i] = transform_preds(coords[i], center[i], scale[i], [heatmap_width, heatmap_height])
return (preds, maxvals)
|
AICity
|
positive
|
def test_on_message_invokes_on_dispatch_when_reply_to_not_set(self):
ret_val = 'fooo'
<DeepExtract>
with Connection('memory://') as conn:
ch = conn.channel()
body = {'method': 'foo', 'args': {'bar': 'foo_arg'}, 'class': A.__class__.__name__}
data = ch.prepare_message(body)
data['properties']['reply_to'] = reply_to
delivery_tag = delivery_tag or uuid()
data['properties']['delivery_tag'] = delivery_tag
(body, message) = (body, ch.message_to_python(data))
</DeepExtract>
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
result = a._on_message(body, message)
a._DISPATCH.assert_called_once_wiith(message, body)
self.assertIsNone(result)
self.assertEqual(a.reply.call_count, 0)
|
def test_on_message_invokes_on_dispatch_when_reply_to_not_set(self):
ret_val = 'fooo'
with Connection('memory://') as conn:
ch = conn.channel()
body = {'method': 'foo', 'args': {'bar': 'foo_arg'}, 'class': A.__class__.__name__}
data = ch.prepare_message(body)
data['properties']['reply_to'] = reply_to
delivery_tag = delivery_tag or uuid()
data['properties']['delivery_tag'] = delivery_tag
(body, message) = (body, ch.message_to_python(data))
a = A()
a.reply = Mock()
a._DISPATCH = Mock(return_value=ret_val)
result = a._on_message(body, message)
a._DISPATCH.assert_called_once_wiith(message, body)
self.assertIsNone(result)
self.assertEqual(a.reply.call_count, 0)
|
cell
|
positive
|
def free(self, block):
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
self._pending_free_blocks.append(block)
else:
try:
<DeepExtract>
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
</DeepExtract>
self._allocated_blocks.remove(block)
<DeepExtract>
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[arena, start]
except KeyError:
pass
else:
(start, _) = self._absorb(prev_block)
try:
next_block = self._start_to_block[arena, stop]
except KeyError:
pass
else:
(_, stop) = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[arena, start] = block
self._stop_to_block[arena, stop] = block
</DeepExtract>
finally:
self._lock.release()
|
def free(self, block):
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
self._pending_free_blocks.append(block)
else:
try:
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
self._allocated_blocks.remove(block)
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[arena, start]
except KeyError:
pass
else:
(start, _) = self._absorb(prev_block)
try:
next_block = self._start_to_block[arena, stop]
except KeyError:
pass
else:
(_, stop) = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[arena, start] = block
self._stop_to_block[arena, stop] = block
finally:
self._lock.release()
|
3DFasterRCNN_LungNoduleDetector
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.