before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def nearest_neighbor_features_per_object(reference_embeddings, query_embeddings, reference_labels, k_nearest_neighbors, gt_ids=None, n_chunks=100):
"""Calculates the distance to the nearest neighbor per object.
For every pixel of query_embeddings calculate the distance to the
nearest neighbor in the (possibly subsampled) reference_embeddings per object.
Args:
reference_embeddings: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings: Tensor of shape [n_query_images, height, width,
embedding_dim], the embedding vectors for the query frames.
reference_labels: Tensor of shape [height, width, 1], the class labels of
the reference frame.
max_neighbors_per_object: Integer, the maximum number of candidates
for the nearest neighbor query per object after subsampling,
or 0 for no subsampling.
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
gt_ids: Int tensor of shape [n_objs] of the sorted unique ground truth
ids in the first frame. If None, it will be derived from
reference_labels.
n_chunks: Integer, the number of chunks to use to save memory
(set to 1 for no chunking).
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[n_query_images, height, width, n_objects, feature_dim].
gt_ids: An int32 tensor of the unique sorted object ids present
in the reference labels.
"""
assert reference_embeddings.size()[:2] == reference_labels.size()[:2]
(h, w, _) = query_embeddings.size()
reference_labels_flat = reference_labels.view(-1)
if gt_ids is None:
ref_obj_ids = torch.unique(reference_labels_flat)[-1]
ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1)
gt_ids = torch.from_numpy(ref_obj_ids)
gt_ids = gt_ids.int()
if torch.cuda.is_available():
gt_ids = gt_ids.cuda()
else:
gt_ids = gt_ids.cpu()
gt_ids = np.arange(0, gt_ids + 1)
gt_ids = torch.from_numpy(gt_ids)
gt_ids = gt_ids.int()
if torch.cuda.is_available():
gt_ids = gt_ids.cuda()
embedding_dim = query_embeddings.size()[-1]
query_embeddings_flat = query_embeddings.view(-1, embedding_dim)
reference_embeddings_flat = reference_embeddings.view(-1, embedding_dim)
<DeepExtract>
chunk_size = int(np.ceil(float(query_embeddings_flat.size()[0]) / n_chunks))
wrong_label_mask = reference_labels_flat != torch.unsqueeze(gt_ids, 1)
all_features = []
for n in range(n_chunks):
if n_chunks == 1:
query_embeddings_flat_chunk = query_embeddings_flat
else:
chunk_start = n * chunk_size
chunk_end = (n + 1) * chunk_size
query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end]
features = _nn_features_per_object_for_chunk(reference_embeddings_flat, query_embeddings_flat_chunk, wrong_label_mask, k_nearest_neighbors)
all_features.append(features)
if n_chunks == 1:
nn_features = all_features[0]
else:
nn_features = torch.cat(all_features, dim=0)
nn_features = nn_features
</DeepExtract>
nn_features_dim = nn_features.size()[-1]
nn_features_reshape = nn_features.view(1, h, w, gt_ids.size(0), nn_features_dim)
return (nn_features_reshape, gt_ids)
|
def nearest_neighbor_features_per_object(reference_embeddings, query_embeddings, reference_labels, k_nearest_neighbors, gt_ids=None, n_chunks=100):
"""Calculates the distance to the nearest neighbor per object.
For every pixel of query_embeddings calculate the distance to the
nearest neighbor in the (possibly subsampled) reference_embeddings per object.
Args:
reference_embeddings: Tensor of shape [height, width, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings: Tensor of shape [n_query_images, height, width,
embedding_dim], the embedding vectors for the query frames.
reference_labels: Tensor of shape [height, width, 1], the class labels of
the reference frame.
max_neighbors_per_object: Integer, the maximum number of candidates
for the nearest neighbor query per object after subsampling,
or 0 for no subsampling.
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
gt_ids: Int tensor of shape [n_objs] of the sorted unique ground truth
ids in the first frame. If None, it will be derived from
reference_labels.
n_chunks: Integer, the number of chunks to use to save memory
(set to 1 for no chunking).
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[n_query_images, height, width, n_objects, feature_dim].
gt_ids: An int32 tensor of the unique sorted object ids present
in the reference labels.
"""
assert reference_embeddings.size()[:2] == reference_labels.size()[:2]
(h, w, _) = query_embeddings.size()
reference_labels_flat = reference_labels.view(-1)
if gt_ids is None:
ref_obj_ids = torch.unique(reference_labels_flat)[-1]
ref_obj_ids = np.arange(0, ref_obj_ids.cpu() + 1)
gt_ids = torch.from_numpy(ref_obj_ids)
gt_ids = gt_ids.int()
if torch.cuda.is_available():
gt_ids = gt_ids.cuda()
else:
gt_ids = gt_ids.cpu()
gt_ids = np.arange(0, gt_ids + 1)
gt_ids = torch.from_numpy(gt_ids)
gt_ids = gt_ids.int()
if torch.cuda.is_available():
gt_ids = gt_ids.cuda()
embedding_dim = query_embeddings.size()[-1]
query_embeddings_flat = query_embeddings.view(-1, embedding_dim)
reference_embeddings_flat = reference_embeddings.view(-1, embedding_dim)
chunk_size = int(np.ceil(float(query_embeddings_flat.size()[0]) / n_chunks))
wrong_label_mask = reference_labels_flat != torch.unsqueeze(gt_ids, 1)
all_features = []
for n in range(n_chunks):
if n_chunks == 1:
query_embeddings_flat_chunk = query_embeddings_flat
else:
chunk_start = n * chunk_size
chunk_end = (n + 1) * chunk_size
query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end]
features = _nn_features_per_object_for_chunk(reference_embeddings_flat, query_embeddings_flat_chunk, wrong_label_mask, k_nearest_neighbors)
all_features.append(features)
if n_chunks == 1:
nn_features = all_features[0]
else:
nn_features = torch.cat(all_features, dim=0)
nn_features = nn_features
nn_features_dim = nn_features.size()[-1]
nn_features_reshape = nn_features.view(1, h, w, gt_ids.size(0), nn_features_dim)
return (nn_features_reshape, gt_ids)
|
CVPR2020_MANet
|
positive
|
@get('/test')
def test_method():
success = True
response = {'cleanup': {'success': False, 'data': []}, 'put': {'success': False, 'data': ['skipped']}, 'describe': {'success': False, 'data': ['skipped']}, 'list': {'success': False, 'data': ['skipped']}, 'idempotency': {'success': False, 'data': ['skipped']}, 'delete': {'success': False, 'data': ['skipped']}}
try:
response['cleanup']['success'] = True
<DeepExtract>
items = method_wrapper(get_list)
</DeepExtract>
assert items['success'], items['error']
for item in items['response']:
response['cleanup']['data'].append('cleaning up item %s' % item)
<DeepExtract>
return method_wrapper(delete_item, item_id=item)
</DeepExtract>
if len(items) == 0:
response['cleanup']['data'].append('No items to cleanup')
else:
response['cleanup']['data'].append('Successfully cleaned up all items')
except Exception as e:
traceback.print_exc()
success = False
response['cleanup']['success'] = False
response['cleanup']['data'].append('%s %s' % (str(e.__class__), str(e)))
if success:
try:
<DeepExtract>
if not '{"content": "test_content"}':
'{"content": "test_content"}' = request.body.read().decode('utf-8')
if not 'application/json':
'application/json' = request.content_type
response['put'] = method_wrapper(put_item, item_id='test', data='{"content": "test_content"}', content_type='application/json')
</DeepExtract>
assert response['put']['success'], response['put']['error']
response['put']['data'] = ['Successfully put item']
except Exception as e:
traceback.print_exc()
success = False
response['put']['success'] = False
response['put']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
<DeepExtract>
response['describe'] = method_wrapper(get_item, item_id='test')
</DeepExtract>
assert response['describe']['success'], response['describe']['error']
expected = {'item_id': 'test', 'content': 'test_content'}
assert response['describe']['response'] == expected, 'unexpected response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(response['describe']['response']))
response['describe']['data'] = ['Successfully described item']
except Exception as e:
traceback.print_exc()
success = False
response['describe']['success'] = False
response['describe']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
<DeepExtract>
response['list'] = method_wrapper(get_list)
</DeepExtract>
assert response['list']['success'], response['list']['error']
expected = ['test']
assert response['list']['response'] == expected, 'unexpected response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(response['list']['response']))
response['list']['data'] = ['Successfully listed items']
except Exception as e:
traceback.print_exc()
success = False
response['list']['success'] = False
response['list']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
response['idempotency']['success'] = True
response['idempotency']['data'] = []
<DeepExtract>
if not '{"content": "test_content_idempotent"}':
'{"content": "test_content_idempotent"}' = request.body.read().decode('utf-8')
if not 'application/json':
'application/json' = request.content_type
return method_wrapper(put_item, item_id='test', data='{"content": "test_content_idempotent"}', content_type='application/json')
</DeepExtract>
response['idempotency']['data'].append('Put an additional item with duplicate item_id')
<DeepExtract>
describe = method_wrapper(get_item, item_id='test')
</DeepExtract>
assert describe['success'], describe['error']
expected = {'item_id': 'test', 'content': 'test_content_idempotent'}
assert describe['response'] == expected, 'unexpected describe response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(describe['response']))
response['idempotency']['data'].append('Verified describe has updated content')
<DeepExtract>
list_items = method_wrapper(get_list)
</DeepExtract>
assert list_items['success'], list_items['error']
expected = ['test']
assert list_items['response'] == expected, 'unexpected list response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(list_items['response']))
response['idempotency']['data'].append('Verified list does not contain duplicates')
response['idempotency']['data'].append('Successfully tested idempotency')
except Exception as e:
traceback.print_exc()
success = False
response['idempotency']['success'] = False
response['idempotency']['data'].append('%s %s' % (str(e.__class__), str(e)))
if success:
try:
<DeepExtract>
response['delete'] = method_wrapper(delete_item, item_id='test')
</DeepExtract>
assert response['delete']['success'], response['delete']['error']
response['delete']['data'] = ['Successfully deleted item']
except Exception as e:
traceback.print_exc()
success = False
response['delete']['success'] = False
response['delete']['data'] = ['%s %s' % (str(e.__class__), str(e))]
return {'success': success, 'data': response}
|
@get('/test')
def test_method():
success = True
response = {'cleanup': {'success': False, 'data': []}, 'put': {'success': False, 'data': ['skipped']}, 'describe': {'success': False, 'data': ['skipped']}, 'list': {'success': False, 'data': ['skipped']}, 'idempotency': {'success': False, 'data': ['skipped']}, 'delete': {'success': False, 'data': ['skipped']}}
try:
response['cleanup']['success'] = True
items = method_wrapper(get_list)
assert items['success'], items['error']
for item in items['response']:
response['cleanup']['data'].append('cleaning up item %s' % item)
return method_wrapper(delete_item, item_id=item)
if len(items) == 0:
response['cleanup']['data'].append('No items to cleanup')
else:
response['cleanup']['data'].append('Successfully cleaned up all items')
except Exception as e:
traceback.print_exc()
success = False
response['cleanup']['success'] = False
response['cleanup']['data'].append('%s %s' % (str(e.__class__), str(e)))
if success:
try:
if not '{"content": "test_content"}':
'{"content": "test_content"}' = request.body.read().decode('utf-8')
if not 'application/json':
'application/json' = request.content_type
response['put'] = method_wrapper(put_item, item_id='test', data='{"content": "test_content"}', content_type='application/json')
assert response['put']['success'], response['put']['error']
response['put']['data'] = ['Successfully put item']
except Exception as e:
traceback.print_exc()
success = False
response['put']['success'] = False
response['put']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
response['describe'] = method_wrapper(get_item, item_id='test')
assert response['describe']['success'], response['describe']['error']
expected = {'item_id': 'test', 'content': 'test_content'}
assert response['describe']['response'] == expected, 'unexpected response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(response['describe']['response']))
response['describe']['data'] = ['Successfully described item']
except Exception as e:
traceback.print_exc()
success = False
response['describe']['success'] = False
response['describe']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
response['list'] = method_wrapper(get_list)
assert response['list']['success'], response['list']['error']
expected = ['test']
assert response['list']['response'] == expected, 'unexpected response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(response['list']['response']))
response['list']['data'] = ['Successfully listed items']
except Exception as e:
traceback.print_exc()
success = False
response['list']['success'] = False
response['list']['data'] = ['%s %s' % (str(e.__class__), str(e))]
if success:
try:
response['idempotency']['success'] = True
response['idempotency']['data'] = []
if not '{"content": "test_content_idempotent"}':
'{"content": "test_content_idempotent"}' = request.body.read().decode('utf-8')
if not 'application/json':
'application/json' = request.content_type
return method_wrapper(put_item, item_id='test', data='{"content": "test_content_idempotent"}', content_type='application/json')
response['idempotency']['data'].append('Put an additional item with duplicate item_id')
describe = method_wrapper(get_item, item_id='test')
assert describe['success'], describe['error']
expected = {'item_id': 'test', 'content': 'test_content_idempotent'}
assert describe['response'] == expected, 'unexpected describe response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(describe['response']))
response['idempotency']['data'].append('Verified describe has updated content')
list_items = method_wrapper(get_list)
assert list_items['success'], list_items['error']
expected = ['test']
assert list_items['response'] == expected, 'unexpected list response object, expecting: "%s" got: "%s" ' % (json.dumps(expected), json.dumps(list_items['response']))
response['idempotency']['data'].append('Verified list does not contain duplicates')
response['idempotency']['data'].append('Successfully tested idempotency')
except Exception as e:
traceback.print_exc()
success = False
response['idempotency']['success'] = False
response['idempotency']['data'].append('%s %s' % (str(e.__class__), str(e)))
if success:
try:
response['delete'] = method_wrapper(delete_item, item_id='test')
assert response['delete']['success'], response['delete']['error']
response['delete']['data'] = ['Successfully deleted item']
except Exception as e:
traceback.print_exc()
success = False
response['delete']['success'] = False
response['delete']['data'] = ['%s %s' % (str(e.__class__), str(e))]
return {'success': success, 'data': response}
|
aws-servicebroker
|
positive
|
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
<DeepExtract>
field = getattr(p, field_name)
if 4 is None and 8 is None:
with self.assertRaises(ValueError):
getattr(field, 'width')
with self.assertRaises(ValueError):
getattr(field, 'height')
else:
self.assertEqual(field.width, 4)
self.assertEqual(field.height, 8)
width_field_name = field_name + '_width'
if hasattr(p, width_field_name):
self.assertEqual(getattr(p, width_field_name), 4)
height_field_name = field_name + '_height'
if hasattr(p, height_field_name):
self.assertEqual(getattr(p, height_field_name), 8)
</DeepExtract>
p.save()
<DeepExtract>
field = getattr(p, field_name)
if 4 is None and 8 is None:
with self.assertRaises(ValueError):
getattr(field, 'width')
with self.assertRaises(ValueError):
getattr(field, 'height')
else:
self.assertEqual(field.width, 4)
self.assertEqual(field.height, 8)
width_field_name = field_name + '_width'
if hasattr(p, width_field_name):
self.assertEqual(getattr(p, width_field_name), 4)
height_field_name = field_name + '_height'
if hasattr(p, height_field_name):
self.assertEqual(getattr(p, height_field_name), 8)
</DeepExtract>
|
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
field = getattr(p, field_name)
if 4 is None and 8 is None:
with self.assertRaises(ValueError):
getattr(field, 'width')
with self.assertRaises(ValueError):
getattr(field, 'height')
else:
self.assertEqual(field.width, 4)
self.assertEqual(field.height, 8)
width_field_name = field_name + '_width'
if hasattr(p, width_field_name):
self.assertEqual(getattr(p, width_field_name), 4)
height_field_name = field_name + '_height'
if hasattr(p, height_field_name):
self.assertEqual(getattr(p, height_field_name), 8)
p.save()
field = getattr(p, field_name)
if 4 is None and 8 is None:
with self.assertRaises(ValueError):
getattr(field, 'width')
with self.assertRaises(ValueError):
getattr(field, 'height')
else:
self.assertEqual(field.width, 4)
self.assertEqual(field.height, 8)
width_field_name = field_name + '_width'
if hasattr(p, width_field_name):
self.assertEqual(getattr(p, width_field_name), 4)
height_field_name = field_name + '_height'
if hasattr(p, height_field_name):
self.assertEqual(getattr(p, height_field_name), 8)
</DeepExtract>
|
django-firebird
|
positive
|
def _filter(self, input, target_genomes):
"""Add adapters to input probes.
"""
input = list(input)
logger.info('Computing adapter votes across all target genomes')
<DeepExtract>
logger.info('Building map from k-mers to probes')
kmer_probe_map = probe.SharedKmerProbeMap.construct(probe.construct_kmer_probe_map_to_find_probe_covers(input, self.mismatches, self.lcf_thres, min_k=self.kmer_probe_map_k, k=self.kmer_probe_map_k))
probe.open_probe_finding_pool(kmer_probe_map, self.cover_range_fn)
def iter_all_seqs():
for genomes_from_group in target_genomes:
for g in genomes_from_group:
for seq in g.seqs:
yield seq
cumulative_votes = [(0, 0) for _ in range(len(input))]
for sequence in iter_all_seqs():
votes = self._votes_in_sequence(input, sequence)
votes_flipped = self._flip_AB_votes(votes)
cumulative_votes_with_nonflipped = self._sum_votes_per_probe(cumulative_votes, votes)
sum_nonflipped = self._sum_plurality_vote_across_probes(cumulative_votes_with_nonflipped)
cumulative_votes_with_flipped = self._sum_votes_per_probe(cumulative_votes, votes_flipped)
sum_flipped = self._sum_plurality_vote_across_probes(cumulative_votes_with_flipped)
if sum_flipped > sum_nonflipped:
cumulative_votes = cumulative_votes_with_flipped
else:
cumulative_votes = cumulative_votes_with_nonflipped
probe.close_probe_finding_pool()
votes = cumulative_votes
</DeepExtract>
logger.info('Adding adapters to probes based on votes')
input_with_adapters = []
for i in range(len(input)):
p = input[i]
vote = votes[i]
assert len(vote) == 2
if vote[0] > vote[1]:
new_p = p.with_prepended_str(self.adapter_a_5end).with_appended_str(self.adapter_a_3end)
else:
new_p = p.with_prepended_str(self.adapter_b_5end).with_appended_str(self.adapter_b_3end)
input_with_adapters += [new_p]
return input_with_adapters
|
def _filter(self, input, target_genomes):
"""Add adapters to input probes.
"""
input = list(input)
logger.info('Computing adapter votes across all target genomes')
logger.info('Building map from k-mers to probes')
kmer_probe_map = probe.SharedKmerProbeMap.construct(probe.construct_kmer_probe_map_to_find_probe_covers(input, self.mismatches, self.lcf_thres, min_k=self.kmer_probe_map_k, k=self.kmer_probe_map_k))
probe.open_probe_finding_pool(kmer_probe_map, self.cover_range_fn)
def iter_all_seqs():
for genomes_from_group in target_genomes:
for g in genomes_from_group:
for seq in g.seqs:
yield seq
cumulative_votes = [(0, 0) for _ in range(len(input))]
for sequence in iter_all_seqs():
votes = self._votes_in_sequence(input, sequence)
votes_flipped = self._flip_AB_votes(votes)
cumulative_votes_with_nonflipped = self._sum_votes_per_probe(cumulative_votes, votes)
sum_nonflipped = self._sum_plurality_vote_across_probes(cumulative_votes_with_nonflipped)
cumulative_votes_with_flipped = self._sum_votes_per_probe(cumulative_votes, votes_flipped)
sum_flipped = self._sum_plurality_vote_across_probes(cumulative_votes_with_flipped)
if sum_flipped > sum_nonflipped:
cumulative_votes = cumulative_votes_with_flipped
else:
cumulative_votes = cumulative_votes_with_nonflipped
probe.close_probe_finding_pool()
votes = cumulative_votes
logger.info('Adding adapters to probes based on votes')
input_with_adapters = []
for i in range(len(input)):
p = input[i]
vote = votes[i]
assert len(vote) == 2
if vote[0] > vote[1]:
new_p = p.with_prepended_str(self.adapter_a_5end).with_appended_str(self.adapter_a_3end)
else:
new_p = p.with_prepended_str(self.adapter_b_5end).with_appended_str(self.adapter_b_3end)
input_with_adapters += [new_p]
return input_with_adapters
|
catch
|
positive
|
def create_resolves_to(self, source_id: str, target_id: str, start_date: datetime, end_date: datetime, description: Optional[str]=None) -> Relationship:
"""
Create the `resolves-to` relationship between the source and the target.
The relation is added to the bundle.
Parameters
----------
source_id : str
Id of the source, must be the id of a `domain-name`.
target_id : str
Id of the target, must be the id of a `domain-name` or an `ipv4-addr`.
start_date : datetime
Starting date for the relationship.
end_date : datetime
Ending date for the relationship.
description : str, optional
Description of the relationship (e.g. the type (name-server, redirect, etc.).
Returns
-------
Relationship
Created relationship.
"""
<DeepExtract>
kwargs = {'created_by_ref': self.author, 'confidence': self.helper.connect_confidence_level}
if description is not None:
kwargs['description'] = description
if start_date != '' and end_date != '':
kwargs |= {'start_time': start_date, 'stop_time': end_date}
rel = Relationship(id=StixCoreRelationship.generate_id('resolves-to', source_id, target_id, start_date, end_date), relationship_type='resolves-to', source_ref=source_id, target_ref=target_id, **kwargs)
</DeepExtract>
self.bundle.append(rel)
return rel
|
def create_resolves_to(self, source_id: str, target_id: str, start_date: datetime, end_date: datetime, description: Optional[str]=None) -> Relationship:
"""
Create the `resolves-to` relationship between the source and the target.
The relation is added to the bundle.
Parameters
----------
source_id : str
Id of the source, must be the id of a `domain-name`.
target_id : str
Id of the target, must be the id of a `domain-name` or an `ipv4-addr`.
start_date : datetime
Starting date for the relationship.
end_date : datetime
Ending date for the relationship.
description : str, optional
Description of the relationship (e.g. the type (name-server, redirect, etc.).
Returns
-------
Relationship
Created relationship.
"""
kwargs = {'created_by_ref': self.author, 'confidence': self.helper.connect_confidence_level}
if description is not None:
kwargs['description'] = description
if start_date != '' and end_date != '':
kwargs |= {'start_time': start_date, 'stop_time': end_date}
rel = Relationship(id=StixCoreRelationship.generate_id('resolves-to', source_id, target_id, start_date, end_date), relationship_type='resolves-to', source_ref=source_id, target_ref=target_id, **kwargs)
self.bundle.append(rel)
return rel
|
connectors
|
positive
|
def perform_unlock_bypass(ctx):
if is_unlocked(ctx):
log.info("Device is already using 'unlocked' command table.")
return True
if not is_vulnerable(ctx):
log.error('Device does not appear to be vulnerable.')
return False
ctx.patch_memory(_patch_list, impl='i2c')
<DeepExtract>
unlocked_cmds = ('editenv', 'dek_blob', 'fuse', 'loadb', 'mw', 'nand', 'ubi')
available = ctx.commands(cached=False, detailed=False)
for cmd in unlocked_cmds:
if cmd in available:
success = True
success = False
</DeepExtract>
if success:
log.info("Success! Device is now using 'unlocked' command table.")
else:
log.error("Failed to switch to 'unlocked' command table.")
return success
|
def perform_unlock_bypass(ctx):
if is_unlocked(ctx):
log.info("Device is already using 'unlocked' command table.")
return True
if not is_vulnerable(ctx):
log.error('Device does not appear to be vulnerable.')
return False
ctx.patch_memory(_patch_list, impl='i2c')
unlocked_cmds = ('editenv', 'dek_blob', 'fuse', 'loadb', 'mw', 'nand', 'ubi')
available = ctx.commands(cached=False, detailed=False)
for cmd in unlocked_cmds:
if cmd in available:
success = True
success = False
if success:
log.info("Success! Device is now using 'unlocked' command table.")
else:
log.error("Failed to switch to 'unlocked' command table.")
return success
|
depthcharge
|
positive
|
def __init__(self, block, layers, groups=1, bottleneck_width=32, num_classes=1000, dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False, avd=False, norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
self.inplanes = stem_width * 2 if deep_stem else 64
self.avg_down = avg_down
self.avd = avd
super(SCNet, self).__init__()
conv_layer = nn.Conv2d
if deep_stem:
self.conv1 = nn.Sequential(conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False))
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
<DeepExtract>
downsample = None
if stride != 1 or self.inplanes != 64 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(64 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 64, stride, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=False, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 64, stride, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=False, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 64 * block.expansion
for i in range(1, layers[0]):
layers.append(block(self.inplanes, 64, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer1 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 128 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(128 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 128, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 128, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 128 * block.expansion
for i in range(1, layers[1]):
layers.append(block(self.inplanes, 128, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer2 = nn.Sequential(*layers)
</DeepExtract>
if dilated or dilation == 4:
<DeepExtract>
downsample = None
if 1 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if 2 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 2 == 1 or 2 == 2:
layers.append(block(self.inplanes, 256, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 2 == 4:
layers.append(block(self.inplanes, 256, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(2))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 1 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if 4 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 4 == 1 or 4 == 2:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 4 == 4:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(4))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=4, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
</DeepExtract>
elif dilation == 2:
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if 1 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 1 == 1 or 1 == 2:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 1 == 4:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(1))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 1 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if 2 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 2 == 1 or 2 == 2:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 2 == 4:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(2))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
</DeepExtract>
else:
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
</DeepExtract>
<DeepExtract>
downsample = None
if 2 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 512, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 512, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
</DeepExtract>
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, norm_layer):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
def __init__(self, block, layers, groups=1, bottleneck_width=32, num_classes=1000, dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False, avd=False, norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
self.inplanes = stem_width * 2 if deep_stem else 64
self.avg_down = avg_down
self.avd = avd
super(SCNet, self).__init__()
conv_layer = nn.Conv2d
if deep_stem:
self.conv1 = nn.Sequential(conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_width), nn.ReLU(inplace=True), conv_layer(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False))
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
downsample = None
if stride != 1 or self.inplanes != 64 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 64 * block.expansion, kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(64 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 64, stride, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=False, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 64, stride, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=False, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 64 * block.expansion
for i in range(1, layers[0]):
layers.append(block(self.inplanes, 64, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer1 = nn.Sequential(*layers)
downsample = None
if 2 != 1 or self.inplanes != 128 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 128 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(128 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 128, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 128, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 128 * block.expansion
for i in range(1, layers[1]):
layers.append(block(self.inplanes, 128, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer2 = nn.Sequential(*layers)
if dilated or dilation == 4:
downsample = None
if 1 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if 2 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 2 == 1 or 2 == 2:
layers.append(block(self.inplanes, 256, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 2 == 4:
layers.append(block(self.inplanes, 256, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(2))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
downsample = None
if 1 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if 4 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 4 == 1 or 4 == 2:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 4 == 4:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(4))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=4, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
elif dilation == 2:
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if 1 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 1 == 1 or 1 == 2:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 1 == 4:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(1))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
downsample = None
if 1 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if 2 == 1:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if 2 == 1 or 2 == 2:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif 2 == 4:
layers.append(block(self.inplanes, 512, 1, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(2))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
else:
downsample = None
if 2 != 1 or self.inplanes != 256 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 256 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(256 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 256, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 256 * block.expansion
for i in range(1, layers[2]):
layers.append(block(self.inplanes, 256, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer3 = nn.Sequential(*layers)
downsample = None
if 2 != 1 or self.inplanes != 512 * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, 512 * block.expansion, kernel_size=1, stride=2, bias=False))
down_layers.append(norm_layer(512 * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, 512, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=1, is_first=is_first, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, 512, 2, downsample=downsample, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=2, is_first=is_first, norm_layer=norm_layer))
else:
raise RuntimeError('=> unknown dilation size: {}'.format(dilation))
self.inplanes = 512 * block.expansion
for i in range(1, layers[3]):
layers.append(block(self.inplanes, 512, cardinality=self.cardinality, bottleneck_width=self.bottleneck_width, avd=self.avd, dilation=dilation, norm_layer=norm_layer))
self.layer4 = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, norm_layer):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
|
awesome-attention-mechanism-in-cv
|
positive
|
def _retrieve_issue_activity(base_url, id):
<DeepExtract>
activity_url = base_url + 'show_activity.cgi?id=' + id
</DeepExtract>
printdbg('Retrieving activity of issue #%s from %s' % (id, activity_url))
data = self._urlopen_auth(activity_url).read()
parser = SoupHtmlParser(data, id)
changes = parser.parse_changes()
return changes
|
def _retrieve_issue_activity(base_url, id):
activity_url = base_url + 'show_activity.cgi?id=' + id
printdbg('Retrieving activity of issue #%s from %s' % (id, activity_url))
data = self._urlopen_auth(activity_url).read()
parser = SoupHtmlParser(data, id)
changes = parser.parse_changes()
return changes
|
Bicho
|
positive
|
def _set_frequency(frequency: float) -> float:
"""Sets the bus frequency.
Sets the FT232H clock divisor value, according to the desired bus
frequency. The actual bus frequency is then as close as possible to the
desired value, but may still be slightly different.
Args:
frequency (:obj:`float`): Desired bus frequency (in Hz)
Returns:
Actual bus frequency
"""
divcode = ft232h_cmds['enable_clk_div5']
divisor = int((ft232h_clock['base'] + frequency / 2) / frequency) - 1
divisor = max(0, min(65535, divisor))
actual_freq = ft232h_clock['base'] / (divisor + 1)
error = actual_freq / frequency - 1
divisor_hs = int((ft232h_clock['high'] + frequency / 2) / frequency) - 1
divisor_hs = max(0, min(65535, divisor_hs))
actual_freq_hs = ft232h_clock['high'] / (divisor_hs + 1)
error_hs = actual_freq_hs / frequency - 1
if abs(error_hs) <= abs(error):
divcode = ft232h_cmds['disable_clk_div5']
divisor = divisor_hs
actual_freq = actual_freq_hs
cmd = bytearray((divcode,))
cmd.extend((ft232h_cmds['set_tck_divisor'], divisor & 255, divisor >> 8 & 255))
cmd.extend((ft232h_cmds['send_immediate'],))
<DeepExtract>
offset = 0
size = len(cmd)
try:
while offset < size:
write_size = self._writebuffer_chunksize
if offset + write_size > size:
write_size = size - offset
try:
length = self._usb_dev.write(self._in_ep, cmd[offset:offset + write_size], self._usb_write_timeout)
except USBError:
raise
if length <= 0:
raise USBError('Usb bulk write error')
offset += length
return offset
except USBError:
print('An error occurred while writing to USB')
raise
</DeepExtract>
bytes_ = bytes(self._read_data_bytes(2))
if len(bytes_) >= 2 and bytes_[0] == 'ú':
raise IOError('Invalid command @ %d' % bytes_[1])
<DeepExtract>
if self._ctrl_transfer_out(ft232h_sio_req['reset'], ft232h_sio_args['purge_RX']):
raise IOError('Unable to flush RX buffer')
self._readoffset = 0
self._readbuffer = bytearray()
</DeepExtract>
return actual_freq
|
def _set_frequency(frequency: float) -> float:
"""Sets the bus frequency.
Sets the FT232H clock divisor value, according to the desired bus
frequency. The actual bus frequency is then as close as possible to the
desired value, but may still be slightly different.
Args:
frequency (:obj:`float`): Desired bus frequency (in Hz)
Returns:
Actual bus frequency
"""
divcode = ft232h_cmds['enable_clk_div5']
divisor = int((ft232h_clock['base'] + frequency / 2) / frequency) - 1
divisor = max(0, min(65535, divisor))
actual_freq = ft232h_clock['base'] / (divisor + 1)
error = actual_freq / frequency - 1
divisor_hs = int((ft232h_clock['high'] + frequency / 2) / frequency) - 1
divisor_hs = max(0, min(65535, divisor_hs))
actual_freq_hs = ft232h_clock['high'] / (divisor_hs + 1)
error_hs = actual_freq_hs / frequency - 1
if abs(error_hs) <= abs(error):
divcode = ft232h_cmds['disable_clk_div5']
divisor = divisor_hs
actual_freq = actual_freq_hs
cmd = bytearray((divcode,))
cmd.extend((ft232h_cmds['set_tck_divisor'], divisor & 255, divisor >> 8 & 255))
cmd.extend((ft232h_cmds['send_immediate'],))
offset = 0
size = len(cmd)
try:
while offset < size:
write_size = self._writebuffer_chunksize
if offset + write_size > size:
write_size = size - offset
try:
length = self._usb_dev.write(self._in_ep, cmd[offset:offset + write_size], self._usb_write_timeout)
except USBError:
raise
if length <= 0:
raise USBError('Usb bulk write error')
offset += length
return offset
except USBError:
print('An error occurred while writing to USB')
raise
bytes_ = bytes(self._read_data_bytes(2))
if len(bytes_) >= 2 and bytes_[0] == 'ú':
raise IOError('Invalid command @ %d' % bytes_[1])
if self._ctrl_transfer_out(ft232h_sio_req['reset'], ft232h_sio_args['purge_RX']):
raise IOError('Unable to flush RX buffer')
self._readoffset = 0
self._readbuffer = bytearray()
return actual_freq
|
crappy
|
positive
|
def test_try_catch_finally_statement(self):
unparser = Unparser()
<DeepExtract>
ast = es5.parse(textwrap.dedent('\n try {\n var x = 100;\n y = 111;\n }\n catch (e) {\n var x = 200;\n y = 222;\n }\n finally {\n z = 1;\n }\n ').strip())
</DeepExtract>
self.assertEqual(dict(unparser(ast)), {Try: [[{'x': 100, 'y': 111}, {Catch: [['e', {'x': 200, 'y': 222}]]}, {Finally: [[{'z': 1}]]}]]})
|
def test_try_catch_finally_statement(self):
unparser = Unparser()
ast = es5.parse(textwrap.dedent('\n try {\n var x = 100;\n y = 111;\n }\n catch (e) {\n var x = 200;\n y = 222;\n }\n finally {\n z = 1;\n }\n ').strip())
self.assertEqual(dict(unparser(ast)), {Try: [[{'x': 100, 'y': 111}, {Catch: [['e', {'x': 200, 'y': 222}]]}, {Finally: [[{'z': 1}]]}]]})
|
calmjs.parse
|
positive
|
def read_json(self):
"""read JSON
serialize the content of the file and transform it in
a json format type
Returns:
JSON -- serialized json data
"""
<DeepExtract>
encoding = 'utf-8'
text = ''
try:
with codecs.open(self.file_name, 'r', encoding) as file:
text = file.read()
except (IOError, UnicodeError):
pass
file = text
</DeepExtract>
if file:
return json.loads(file)
return []
|
def read_json(self):
"""read JSON
serialize the content of the file and transform it in
a json format type
Returns:
JSON -- serialized json data
"""
encoding = 'utf-8'
text = ''
try:
with codecs.open(self.file_name, 'r', encoding) as file:
text = file.read()
except (IOError, UnicodeError):
pass
file = text
if file:
return json.loads(file)
return []
|
Deviot
|
positive
|
def update_parametrization(obj):
<DeepExtract>
if obj.mbdyn.type == 'node':
node = bpy.context.scene.mbdyn.nodes[obj.mbdyn.dkey]
elif obj.mbdyn.type == 'element':
node = bpy.context.scene.mbdyn.elems[obj.mbdyn.dkey]
elif obj.mbdyn.type == 'reference':
node = bpy.context.scene.mbdyn.refs[obj.mbdyn.dkey]
else:
node = None
</DeepExtract>
if node:
return assign_parametrization(obj, node)
else:
return {'NOTFOUND_DICT'}
|
def update_parametrization(obj):
if obj.mbdyn.type == 'node':
node = bpy.context.scene.mbdyn.nodes[obj.mbdyn.dkey]
elif obj.mbdyn.type == 'element':
node = bpy.context.scene.mbdyn.elems[obj.mbdyn.dkey]
elif obj.mbdyn.type == 'reference':
node = bpy.context.scene.mbdyn.refs[obj.mbdyn.dkey]
else:
node = None
if node:
return assign_parametrization(obj, node)
else:
return {'NOTFOUND_DICT'}
|
blendyn
|
positive
|
@verbose(True, verbose_output=False, timeout=None, _str=None)
def analyze(self, data):
"""
start pattern analysis for words and wordsstripped
"""
data['CARDS'] = deepcopy(self.datastruct)
self.words = data['StringsRAW']['wordsinsensitive']
self.wordsstripped = data['StringsRAW']['wordsstripped']
<DeepExtract>
temp_list = []
temp_var = findall(self.detectionamericanexpress, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['AMERICANEXPRESS'].append({'Count': temp_list.count(temp_var), 'AmericanExpress': temp_var})
</DeepExtract>
<DeepExtract>
temp_list = []
temp_var = findall(self.detectionvisa, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['VISA'].append({'Count': temp_list.count(temp_var), 'Visa': temp_var})
</DeepExtract>
<DeepExtract>
temp_list = []
temp_var = findall(self.detectionmastercard, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['MASTERCARD'].append({'Count': temp_list.count(temp_var), 'MasterCard': temp_var})
</DeepExtract>
<DeepExtract>
temp_list = []
temp_var = findall(self.detectiondiscover, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['DISCOVER'].append({'Count': temp_list.count(temp_var), 'Discover': temp_var})
</DeepExtract>
<DeepExtract>
temp_list = []
temp_var = findall(self.detectionjcb, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['JCB'].append({'Count': temp_list.count(temp_var), 'JCB': temp_var})
</DeepExtract>
<DeepExtract>
temp_list = []
temp_var = findall(self.detectiondinersclub, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['DINERSCLUB'].append({'Count': temp_list.count(temp_var), 'DinersClub': temp_var})
</DeepExtract>
|
@verbose(True, verbose_output=False, timeout=None, _str=None)
def analyze(self, data):
"""
start pattern analysis for words and wordsstripped
"""
data['CARDS'] = deepcopy(self.datastruct)
self.words = data['StringsRAW']['wordsinsensitive']
self.wordsstripped = data['StringsRAW']['wordsstripped']
temp_list = []
temp_var = findall(self.detectionamericanexpress, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['AMERICANEXPRESS'].append({'Count': temp_list.count(temp_var), 'AmericanExpress': temp_var})
temp_list = []
temp_var = findall(self.detectionvisa, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['VISA'].append({'Count': temp_list.count(temp_var), 'Visa': temp_var})
temp_list = []
temp_var = findall(self.detectionmastercard, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['MASTERCARD'].append({'Count': temp_list.count(temp_var), 'MasterCard': temp_var})
temp_list = []
temp_var = findall(self.detectiondiscover, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['DISCOVER'].append({'Count': temp_list.count(temp_var), 'Discover': temp_var})
temp_list = []
temp_var = findall(self.detectionjcb, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['JCB'].append({'Count': temp_list.count(temp_var), 'JCB': temp_var})
temp_list = []
temp_var = findall(self.detectiondinersclub, self.wordsstripped)
if len(temp_var) > 0:
for _ in temp_var:
temp_list.append(_)
for temp_var in set(temp_list):
data['CARDS']['DINERSCLUB'].append({'Count': temp_list.count(temp_var), 'DinersClub': temp_var})
</DeepExtract>
|
analyzer
|
positive
|
def _createMo(node, parentMo):
(pkgName, className) = parseMoClassName(node.tag)
fqClassName = 'cobra.model.' + pkgName + '.' + className
pyClass = ClassLoader.loadClass(fqClassName)
parentDnStr = None
moProps = {}
for (attr, val) in list(node.attrib.items()):
if attr == 'dn':
parentDnStr = getParentDn(str(val))
elif attr == 'rn' or attr == 'instanceId':
pass
elif attr == 'status' and (not val):
pass
else:
moProps[attr] = str(val)
namingVals = []
for propMeta in pyClass.meta.namingProps:
propName = propMeta.moPropName
namingVals.append(moProps[propName])
del moProps[propName]
parentMoOrDn = parentMo if parentMo else parentDnStr
mo = pyClass(parentMoOrDn, *namingVals, markDirty=False, **moProps)
mo.resetProps()
for childNode in node:
<DeepExtract>
(pkgName, className) = parseMoClassName(childNode.tag)
fqClassName = 'cobra.model.' + pkgName + '.' + className
pyClass = ClassLoader.loadClass(fqClassName)
parentDnStr = None
moProps = {}
for (attr, val) in list(childNode.attrib.items()):
if attr == 'dn':
parentDnStr = getParentDn(str(val))
elif attr == 'rn' or attr == 'instanceId':
pass
elif attr == 'status' and (not val):
pass
else:
moProps[attr] = str(val)
namingVals = []
for propMeta in pyClass.meta.namingProps:
propName = propMeta.moPropName
namingVals.append(moProps[propName])
del moProps[propName]
parentMoOrDn = mo if mo else parentDnStr
mo = pyClass(parentMoOrDn, *namingVals, markDirty=False, **moProps)
mo.resetProps()
for childNode in childNode:
_createMo(childNode, mo)
return mo
</DeepExtract>
return mo
|
def _createMo(node, parentMo):
(pkgName, className) = parseMoClassName(node.tag)
fqClassName = 'cobra.model.' + pkgName + '.' + className
pyClass = ClassLoader.loadClass(fqClassName)
parentDnStr = None
moProps = {}
for (attr, val) in list(node.attrib.items()):
if attr == 'dn':
parentDnStr = getParentDn(str(val))
elif attr == 'rn' or attr == 'instanceId':
pass
elif attr == 'status' and (not val):
pass
else:
moProps[attr] = str(val)
namingVals = []
for propMeta in pyClass.meta.namingProps:
propName = propMeta.moPropName
namingVals.append(moProps[propName])
del moProps[propName]
parentMoOrDn = parentMo if parentMo else parentDnStr
mo = pyClass(parentMoOrDn, *namingVals, markDirty=False, **moProps)
mo.resetProps()
for childNode in node:
(pkgName, className) = parseMoClassName(childNode.tag)
fqClassName = 'cobra.model.' + pkgName + '.' + className
pyClass = ClassLoader.loadClass(fqClassName)
parentDnStr = None
moProps = {}
for (attr, val) in list(childNode.attrib.items()):
if attr == 'dn':
parentDnStr = getParentDn(str(val))
elif attr == 'rn' or attr == 'instanceId':
pass
elif attr == 'status' and (not val):
pass
else:
moProps[attr] = str(val)
namingVals = []
for propMeta in pyClass.meta.namingProps:
propName = propMeta.moPropName
namingVals.append(moProps[propName])
del moProps[propName]
parentMoOrDn = mo if mo else parentDnStr
mo = pyClass(parentMoOrDn, *namingVals, markDirty=False, **moProps)
mo.resetProps()
for childNode in childNode:
_createMo(childNode, mo)
return mo
return mo
|
cobra
|
positive
|
def __init__(self, factory, args, required, optional, resolve, is_context):
<DeepExtract>
if not callable(factory):
raise RuntimeError
</DeepExtract>
<DeepExtract>
if not (args.keys() == required ^ optional and (not required & optional)):
raise RuntimeError
</DeepExtract>
<DeepExtract>
if not callable(resolve):
raise RuntimeError
</DeepExtract>
self.factory = factory
self.args = args
self.required = required
self.optional = optional
self.resolve = resolve
self.is_context = is_context
|
def __init__(self, factory, args, required, optional, resolve, is_context):
if not callable(factory):
raise RuntimeError
if not (args.keys() == required ^ optional and (not required & optional)):
raise RuntimeError
if not callable(resolve):
raise RuntimeError
self.factory = factory
self.args = args
self.required = required
self.optional = optional
self.resolve = resolve
self.is_context = is_context
|
dependencies
|
positive
|
def test_any_static_before_dynamic(self):
""" Static ANY routes have higher priority than dynamic ANY routes. """
<DeepExtract>
self.r.add('/<:>', 'ANY', 'bar', **ka)
</DeepExtract>
self.assertEqual(self.match('/foo')[0], 'bar')
<DeepExtract>
self.r.add('/foo', 'ANY', 'foo', **ka)
</DeepExtract>
self.assertEqual(self.match('/foo')[0], 'foo')
|
def test_any_static_before_dynamic(self):
""" Static ANY routes have higher priority than dynamic ANY routes. """
self.r.add('/<:>', 'ANY', 'bar', **ka)
self.assertEqual(self.match('/foo')[0], 'bar')
self.r.add('/foo', 'ANY', 'foo', **ka)
self.assertEqual(self.match('/foo')[0], 'foo')
|
bottle
|
positive
|
def apply_along_axis(func, axis, x, *args, **kwargs):
""" Apply a function to slices along the given axis.
Execute func(a, \\*args, \\*\\*kwargs) where func operates on nd-arrays and a
is a slice of arr along axis. The size of the slices is determined
by the blocks shape of x.
func must meet the following conditions:
- Take an nd-array as argument
- Accept `axis` as a keyword argument
- Return an array-like structure
Parameters
----------
func : function
This function should accept nd-arrays and an axis argument. It is
applied to slices of arr along the specified axis.
axis : integer
Axis along which arr is sliced. Can be 0 or 1.
x : ds-array
Input distributed array.
args : any
Additional arguments to func.
kwargs : any
Additional named arguments to func.
Returns
-------
out : ds-array
The output array. The shape of out is identical to the shape of arr,
except along the axis dimension. The output ds-array is dense
regardless of the type of the input array.
Examples
--------
>>> import dislib as ds
>>> import numpy as np
>>>
>>>
>>> if __name__ == '__main__':
>>> x = ds.random_array((100, 100), block_size=(25, 25))
>>> mean = ds.apply_along_axis(np.mean, 0, x)
>>> print(mean.collect())
"""
if axis != 0 and axis != 1:
raise ValueError('Axis must be 0 or 1.')
tlshape = x._top_left_shape
bshape = x._reg_shape
shape = x.shape
out_blocks = list()
for block in x._iterator(axis=not axis):
<DeepExtract>
arr = Array._merge_blocks(block._blocks)
kwargs['axis'] = axis
out = func(arr, *args, **kwargs)
if not issparse(arr):
out = np.asarray(out)
else:
out = csr_matrix(out)
if axis == 0:
out = out.reshape(1, -1)
else:
out = out.reshape(-1, 1)
</DeepExtract>
out_blocks.append(out)
if axis == 0:
blocks = [out_blocks]
out_tlbshape = (1, tlshape[1])
out_bshape = (1, bshape[1])
out_shape = (1, shape[1])
else:
blocks = [[block] for block in out_blocks]
out_tlbshape = (tlshape[0], 1)
out_bshape = (bshape[0], 1)
out_shape = (shape[0], 1)
return Array(blocks, top_left_shape=out_tlbshape, reg_shape=out_bshape, shape=out_shape, sparse=x._sparse)
|
def apply_along_axis(func, axis, x, *args, **kwargs):
""" Apply a function to slices along the given axis.
Execute func(a, \\*args, \\*\\*kwargs) where func operates on nd-arrays and a
is a slice of arr along axis. The size of the slices is determined
by the blocks shape of x.
func must meet the following conditions:
- Take an nd-array as argument
- Accept `axis` as a keyword argument
- Return an array-like structure
Parameters
----------
func : function
This function should accept nd-arrays and an axis argument. It is
applied to slices of arr along the specified axis.
axis : integer
Axis along which arr is sliced. Can be 0 or 1.
x : ds-array
Input distributed array.
args : any
Additional arguments to func.
kwargs : any
Additional named arguments to func.
Returns
-------
out : ds-array
The output array. The shape of out is identical to the shape of arr,
except along the axis dimension. The output ds-array is dense
regardless of the type of the input array.
Examples
--------
>>> import dislib as ds
>>> import numpy as np
>>>
>>>
>>> if __name__ == '__main__':
>>> x = ds.random_array((100, 100), block_size=(25, 25))
>>> mean = ds.apply_along_axis(np.mean, 0, x)
>>> print(mean.collect())
"""
if axis != 0 and axis != 1:
raise ValueError('Axis must be 0 or 1.')
tlshape = x._top_left_shape
bshape = x._reg_shape
shape = x.shape
out_blocks = list()
for block in x._iterator(axis=not axis):
arr = Array._merge_blocks(block._blocks)
kwargs['axis'] = axis
out = func(arr, *args, **kwargs)
if not issparse(arr):
out = np.asarray(out)
else:
out = csr_matrix(out)
if axis == 0:
out = out.reshape(1, -1)
else:
out = out.reshape(-1, 1)
out_blocks.append(out)
if axis == 0:
blocks = [out_blocks]
out_tlbshape = (1, tlshape[1])
out_bshape = (1, bshape[1])
out_shape = (1, shape[1])
else:
blocks = [[block] for block in out_blocks]
out_tlbshape = (tlshape[0], 1)
out_bshape = (bshape[0], 1)
out_shape = (shape[0], 1)
return Array(blocks, top_left_shape=out_tlbshape, reg_shape=out_bshape, shape=out_shape, sparse=x._sparse)
|
dislib
|
positive
|
def proof_3(case):
print('Proof 3')
dots = case[1]
(d1, d2, d3) = dots
center = case[0]
theta = case[-2]
varphi = case[-1]
self.add_foreground_mobject(dots)
def fade_mobs(fade=0.9):
def update(mob):
mob.fade(fade)
return mob
return update
diameter_vector = Line(d3.get_center(), center.get_center()).get_vector()
diameter = Line(d3.get_center(), d3.get_center() + diameter_vector * 2, color=RED_A)
d4 = Dot(diameter.get_end())
arc_psi_1 = ArcBetweenVectors(0.6, d2, d4, d3, color=RED_A)
arc_psi_2 = ArcBetweenVectors(0.6, d2, d4, center, color=RED_A)
arc_alpha_1 = ArcBetweenVectors(0.7, d1, d4, d3, color=YELLOW_B, stroke_width=8)
arc_alpha_2 = ArcBetweenVectors(0.7, d1, d4, center, color=YELLOW_B, stroke_width=8)
psi_1 = LabelFromArc(arc_psi_1, theta.get_height() * 0.8, '\\psi_1', color=RED_A, distance_proportion=2.1)
psi_2 = LabelFromArc(arc_psi_2, theta.get_height() * 0.8, '\\psi_2', color=RED_A, distance_proportion=2.1)
alpha_1 = LabelFromArc(arc_alpha_1, theta.get_height() * 0.8, '\\alpha_1', color=YELLOW_B, distance_proportion=2.5)
back_1 = BackgroundRectangle(alpha_1)
alpha_2 = LabelFromArc(arc_alpha_2, theta.get_height() * 0.8, '\\alpha_2', color=YELLOW_B, distance_proportion=2.3)
back_2 = BackgroundRectangle(alpha_2)
line_1 = Line(center.get_center(), d1.get_center(), color=varphi.get_color())
line_2 = Line(d3.get_center(), d1.get_center(), color=varphi.get_color())
psi_g = VGroup(arc_psi_1, arc_psi_2, psi_1, psi_2)
alpha_g = VGroup(arc_alpha_1, arc_alpha_2, alpha_1, alpha_2)
theta_g = VGroup(theta, varphi, case[-3], case[-4], case[2], case[3])
self.wait()
self.play(GrowFromCenter(diameter))
self.wait()
self.play(LaggedStart(*[Write(arc) for arc in psi_g]))
self.wait(2)
self.play(*list(map(FadeIn, [back_1, back_2])), LaggedStart(*[Write(arc) for arc in alpha_g]))
self.wait(2)
self.add_foreground_mobjects(back_1, back_2, alpha_g)
self.cases_group_3.add(arc_psi_1, arc_psi_2, arc_alpha_1, arc_alpha_2, diameter, psi_1, psi_2, alpha_1, alpha_2)
self.add(line_1, line_2)
for i in [psi_g, theta_g]:
for j in i:
j.save_state()
self.play(*[ApplyFunction(fade_mobs(), i) for i in psi_g], *[ApplyFunction(fade_mobs(), i) for i in theta_g])
self.wait()
formulas = [['\\alpha_1', '=', '\\psi_1', '+', '\\theta'], ['\\alpha_2', '=', '\\psi_2', '+', '\\varphi'], ['\\alpha_2', '=', '2', '\\alpha_1'], ['\\psi_2', '+', '\\varphi', '=', '2', '(', '\\psi_1', '+', '\\theta', ')'], ['\\psi_2', '=', '2', '\\psi_1'], ['2', '\\psi_1', '+', '\\varphi', '=', '2', '\\psi_1', '+', '2', '\\theta'], ['\\varphi', '=', '2', '\\theta']]
tex_formulas_kwargs = {'tex_to_color_map': {'\\psi_1': psi_1.get_color(), '\\psi_2': psi_2.get_color(), '\\varphi': varphi.get_color(), '\\theta': theta.get_color(), '\\alpha_1': alpha_1.get_color(), '\\alpha_2': alpha_2.get_color()}}
f = VGroup(*[TexMobject(*formula, **tex_formulas_kwargs) for formula in formulas])
f.arrange(DOWN)
f.scale(1.3)
for (fi, i) in zip(f[1:], [1, 1, 3, 1, 4, 1]):
<DeepExtract>
c1 = fi[i].get_center()
c2 = f[0][1].get_center()
distance = c2 - c1
fi.shift(RIGHT * distance[0])
</DeepExtract>
f.to_edge(RIGHT, buff=1.8)
by_case_1 = TextMobject('By case 1')
by_case_1.next_to(f[2], RIGHT)
by_case_2 = by_case_1.copy()
by_case_2.next_to(f[4], RIGHT)
self.play(FadeOut(back_1), Restore(theta), Restore(psi_1), ReplacementTransform(alpha_1[0], f[0][0]), run_time=2)
self.play(TransformFromCopy(psi_1[0], f[0][2]), TransformFromCopy(theta[0], f[0][-1]), *[Write(f[0][i]) for i in [1, 3]], run_time=3)
self.wait()
self.play(FadeOut(back_2), Restore(varphi), Restore(psi_2), ReplacementTransform(alpha_2[0], f[1][0]), run_time=2)
self.play(TransformFromCopy(psi_2[0], f[1][2]), TransformFromCopy(varphi[0], f[1][-1]), *[Write(f[1][i]) for i in [1, 3]], run_time=3)
self.wait()
self.play(Write(by_case_1))
self.wait()
self.play(Write(f[2]))
self.wait()
self.play(TransformFromCopy(f[0][-3:], f[3][6:9]), TransformFromCopy(f[1][-3:], f[3][:3]), *[TransformFromCopy(f[2][i], f[3][j]) for (i, j) in zip([1, 2], [3, 4])], *[Write(f[3][i]) for i in [5, 9]], run_time=3)
self.wait()
self.wait()
line_3 = Line(d3.get_center(), d2.get_center(), color=TEAL_A)
line_4 = Line(center.get_center(), d2.get_center(), color=PURPLE_A)
save_grp = VGroup(arc_alpha_1, arc_alpha_2, varphi, theta)
for i in save_grp:
try:
i.save_state()
except:
pass
self.play(FadeOut(line_1), FadeOut(line_2), FadeIn(line_3), FadeIn(line_4), *[ApplyMethod(i.fade, 0.92) for i in save_grp], *[Restore(i) for i in [arc_psi_1, arc_psi_2]])
self.wait(3)
self.play(Write(by_case_2))
self.wait()
self.play(Write(f[4]))
self.wait(3)
self.play(*[Restore(i) for i in [*save_grp, *case[2:6]]])
self.wait()
self.play(TransformFromCopy(f[3][0], f[5][:2]), *[TransformFromCopy(f[3][i], f[5][j]) for (i, j) in zip([1, 2, 3, 4, 6, 7, 8, 4], [2, 3, 4, 5, 6, 7, 9, 8])], run_time=3)
self.wait()
self.play(*[ApplyMethod(f[5][i].fade, 0.8) for i in [0, 1, 5, 6]], run_time=2)
self.play(*[TransformFromCopy(f[5][i], f[6][j]) for (i, j) in zip([3, 4, 8, 9], [0, 1, 2, 3])], run_time=3)
self.play(Succession(FadeToColor(f[6], YELLOW), FadeToColor(f[6], PURPLE_A)), AnimationGroup(ShowCreationThenDestructionAround(f[6].deepcopy()), ShowCreationThenDestructionAround(f[6].deepcopy()), lag_ratio=1))
self.cases_group_3.add(line_1, line_2, line_3, line_4, by_case_1, by_case_2, f)
self.wait()
|
def proof_3(case):
print('Proof 3')
dots = case[1]
(d1, d2, d3) = dots
center = case[0]
theta = case[-2]
varphi = case[-1]
self.add_foreground_mobject(dots)
def fade_mobs(fade=0.9):
def update(mob):
mob.fade(fade)
return mob
return update
diameter_vector = Line(d3.get_center(), center.get_center()).get_vector()
diameter = Line(d3.get_center(), d3.get_center() + diameter_vector * 2, color=RED_A)
d4 = Dot(diameter.get_end())
arc_psi_1 = ArcBetweenVectors(0.6, d2, d4, d3, color=RED_A)
arc_psi_2 = ArcBetweenVectors(0.6, d2, d4, center, color=RED_A)
arc_alpha_1 = ArcBetweenVectors(0.7, d1, d4, d3, color=YELLOW_B, stroke_width=8)
arc_alpha_2 = ArcBetweenVectors(0.7, d1, d4, center, color=YELLOW_B, stroke_width=8)
psi_1 = LabelFromArc(arc_psi_1, theta.get_height() * 0.8, '\\psi_1', color=RED_A, distance_proportion=2.1)
psi_2 = LabelFromArc(arc_psi_2, theta.get_height() * 0.8, '\\psi_2', color=RED_A, distance_proportion=2.1)
alpha_1 = LabelFromArc(arc_alpha_1, theta.get_height() * 0.8, '\\alpha_1', color=YELLOW_B, distance_proportion=2.5)
back_1 = BackgroundRectangle(alpha_1)
alpha_2 = LabelFromArc(arc_alpha_2, theta.get_height() * 0.8, '\\alpha_2', color=YELLOW_B, distance_proportion=2.3)
back_2 = BackgroundRectangle(alpha_2)
line_1 = Line(center.get_center(), d1.get_center(), color=varphi.get_color())
line_2 = Line(d3.get_center(), d1.get_center(), color=varphi.get_color())
psi_g = VGroup(arc_psi_1, arc_psi_2, psi_1, psi_2)
alpha_g = VGroup(arc_alpha_1, arc_alpha_2, alpha_1, alpha_2)
theta_g = VGroup(theta, varphi, case[-3], case[-4], case[2], case[3])
self.wait()
self.play(GrowFromCenter(diameter))
self.wait()
self.play(LaggedStart(*[Write(arc) for arc in psi_g]))
self.wait(2)
self.play(*list(map(FadeIn, [back_1, back_2])), LaggedStart(*[Write(arc) for arc in alpha_g]))
self.wait(2)
self.add_foreground_mobjects(back_1, back_2, alpha_g)
self.cases_group_3.add(arc_psi_1, arc_psi_2, arc_alpha_1, arc_alpha_2, diameter, psi_1, psi_2, alpha_1, alpha_2)
self.add(line_1, line_2)
for i in [psi_g, theta_g]:
for j in i:
j.save_state()
self.play(*[ApplyFunction(fade_mobs(), i) for i in psi_g], *[ApplyFunction(fade_mobs(), i) for i in theta_g])
self.wait()
formulas = [['\\alpha_1', '=', '\\psi_1', '+', '\\theta'], ['\\alpha_2', '=', '\\psi_2', '+', '\\varphi'], ['\\alpha_2', '=', '2', '\\alpha_1'], ['\\psi_2', '+', '\\varphi', '=', '2', '(', '\\psi_1', '+', '\\theta', ')'], ['\\psi_2', '=', '2', '\\psi_1'], ['2', '\\psi_1', '+', '\\varphi', '=', '2', '\\psi_1', '+', '2', '\\theta'], ['\\varphi', '=', '2', '\\theta']]
tex_formulas_kwargs = {'tex_to_color_map': {'\\psi_1': psi_1.get_color(), '\\psi_2': psi_2.get_color(), '\\varphi': varphi.get_color(), '\\theta': theta.get_color(), '\\alpha_1': alpha_1.get_color(), '\\alpha_2': alpha_2.get_color()}}
f = VGroup(*[TexMobject(*formula, **tex_formulas_kwargs) for formula in formulas])
f.arrange(DOWN)
f.scale(1.3)
for (fi, i) in zip(f[1:], [1, 1, 3, 1, 4, 1]):
c1 = fi[i].get_center()
c2 = f[0][1].get_center()
distance = c2 - c1
fi.shift(RIGHT * distance[0])
f.to_edge(RIGHT, buff=1.8)
by_case_1 = TextMobject('By case 1')
by_case_1.next_to(f[2], RIGHT)
by_case_2 = by_case_1.copy()
by_case_2.next_to(f[4], RIGHT)
self.play(FadeOut(back_1), Restore(theta), Restore(psi_1), ReplacementTransform(alpha_1[0], f[0][0]), run_time=2)
self.play(TransformFromCopy(psi_1[0], f[0][2]), TransformFromCopy(theta[0], f[0][-1]), *[Write(f[0][i]) for i in [1, 3]], run_time=3)
self.wait()
self.play(FadeOut(back_2), Restore(varphi), Restore(psi_2), ReplacementTransform(alpha_2[0], f[1][0]), run_time=2)
self.play(TransformFromCopy(psi_2[0], f[1][2]), TransformFromCopy(varphi[0], f[1][-1]), *[Write(f[1][i]) for i in [1, 3]], run_time=3)
self.wait()
self.play(Write(by_case_1))
self.wait()
self.play(Write(f[2]))
self.wait()
self.play(TransformFromCopy(f[0][-3:], f[3][6:9]), TransformFromCopy(f[1][-3:], f[3][:3]), *[TransformFromCopy(f[2][i], f[3][j]) for (i, j) in zip([1, 2], [3, 4])], *[Write(f[3][i]) for i in [5, 9]], run_time=3)
self.wait()
self.wait()
line_3 = Line(d3.get_center(), d2.get_center(), color=TEAL_A)
line_4 = Line(center.get_center(), d2.get_center(), color=PURPLE_A)
save_grp = VGroup(arc_alpha_1, arc_alpha_2, varphi, theta)
for i in save_grp:
try:
i.save_state()
except:
pass
self.play(FadeOut(line_1), FadeOut(line_2), FadeIn(line_3), FadeIn(line_4), *[ApplyMethod(i.fade, 0.92) for i in save_grp], *[Restore(i) for i in [arc_psi_1, arc_psi_2]])
self.wait(3)
self.play(Write(by_case_2))
self.wait()
self.play(Write(f[4]))
self.wait(3)
self.play(*[Restore(i) for i in [*save_grp, *case[2:6]]])
self.wait()
self.play(TransformFromCopy(f[3][0], f[5][:2]), *[TransformFromCopy(f[3][i], f[5][j]) for (i, j) in zip([1, 2, 3, 4, 6, 7, 8, 4], [2, 3, 4, 5, 6, 7, 9, 8])], run_time=3)
self.wait()
self.play(*[ApplyMethod(f[5][i].fade, 0.8) for i in [0, 1, 5, 6]], run_time=2)
self.play(*[TransformFromCopy(f[5][i], f[6][j]) for (i, j) in zip([3, 4, 8, 9], [0, 1, 2, 3])], run_time=3)
self.play(Succession(FadeToColor(f[6], YELLOW), FadeToColor(f[6], PURPLE_A)), AnimationGroup(ShowCreationThenDestructionAround(f[6].deepcopy()), ShowCreationThenDestructionAround(f[6].deepcopy()), lag_ratio=1))
self.cases_group_3.add(line_1, line_2, line_3, line_4, by_case_1, by_case_2, f)
self.wait()
|
AnimationsWithManim
|
positive
|
def nuscenes_gt_to_kitti(self, lyft_dataroot: str='/home/yw763/driving/lyft/v1.02-train', table_folder: str='/home/yw763/driving/lyft/v1.02-train/v1.02-train', lidar_name: str='LIDAR_TOP', get_all_detections: bool=True, parallel_n_jobs: int=16, samples_count: Optional[int]=None) -> None:
"""Converts nuScenes GT formatted annotations to KITTI format.
Args:
lyft_dataroot: folder with tables (json files).
table_folder: folder with tables (json files).
lidar_name: Name of the lidar sensor.
Only one lidar allowed at this moment.
get_all_detections: If True, will write all
bboxes in PointCloud and use only FrontCamera.
parallel_n_jobs: Number of threads to parralel processing.
samples_count: Number of samples to convert.
"""
self.lyft_dataroot = lyft_dataroot
self.table_folder = table_folder
self.lidar_name = lidar_name
self.get_all_detections = get_all_detections
self.samples_count = samples_count
self.parallel_n_jobs = parallel_n_jobs
self.lyft_ds = LyftDataset(self.lyft_dataroot, self.table_folder)
self.kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi)
self.kitti_to_nu_lidar_inv = self.kitti_to_nu_lidar.inverse
split_logs = [self.lyft_ds.get('log', scene['log_token'])['logfile'] for scene in self.lyft_ds.scene]
if self.get_all_detections:
self.cams_to_see = ['CAM_FRONT']
else:
self.cams_to_see = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT']
self.label_folder = self.store_dir.joinpath('label_2')
self.calib_folder = self.store_dir.joinpath('calib')
self.image_folder = self.store_dir.joinpath('image_2')
self.lidar_folder = self.store_dir.joinpath('velodyne')
for folder in [self.label_folder, self.calib_folder, self.image_folder, self.lidar_folder]:
if not folder.is_dir():
folder.mkdir(parents=True)
<DeepExtract>
samples = []
str_to_write = ''
for sample in self.lyft_ds.sample:
scene = self.lyft_ds.get('scene', sample['scene_token'])
log = self.lyft_ds.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
str_to_write += '{} {}\n'.format(sample['token'], sample['scene_token'])
with open(os.path.join(os.path.dirname(self.store_dir), 'lyft_scenes.txt'), 'w') as f:
f.write(str_to_write)
sample_tokens = samples
</DeepExtract>
if self.samples_count is not None:
sample_tokens = sample_tokens[:self.samples_count]
sample_tokens = sample_tokens
self.tokens = sample_tokens
with parallel_backend('threading', n_jobs=self.parallel_n_jobs):
Parallel()((delayed(self.process_token_to_kitti)(sample_token) for sample_token in tqdm(sample_tokens)))
|
def nuscenes_gt_to_kitti(self, lyft_dataroot: str='/home/yw763/driving/lyft/v1.02-train', table_folder: str='/home/yw763/driving/lyft/v1.02-train/v1.02-train', lidar_name: str='LIDAR_TOP', get_all_detections: bool=True, parallel_n_jobs: int=16, samples_count: Optional[int]=None) -> None:
"""Converts nuScenes GT formatted annotations to KITTI format.
Args:
lyft_dataroot: folder with tables (json files).
table_folder: folder with tables (json files).
lidar_name: Name of the lidar sensor.
Only one lidar allowed at this moment.
get_all_detections: If True, will write all
bboxes in PointCloud and use only FrontCamera.
parallel_n_jobs: Number of threads to parralel processing.
samples_count: Number of samples to convert.
"""
self.lyft_dataroot = lyft_dataroot
self.table_folder = table_folder
self.lidar_name = lidar_name
self.get_all_detections = get_all_detections
self.samples_count = samples_count
self.parallel_n_jobs = parallel_n_jobs
self.lyft_ds = LyftDataset(self.lyft_dataroot, self.table_folder)
self.kitti_to_nu_lidar = Quaternion(axis=(0, 0, 1), angle=np.pi)
self.kitti_to_nu_lidar_inv = self.kitti_to_nu_lidar.inverse
split_logs = [self.lyft_ds.get('log', scene['log_token'])['logfile'] for scene in self.lyft_ds.scene]
if self.get_all_detections:
self.cams_to_see = ['CAM_FRONT']
else:
self.cams_to_see = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT']
self.label_folder = self.store_dir.joinpath('label_2')
self.calib_folder = self.store_dir.joinpath('calib')
self.image_folder = self.store_dir.joinpath('image_2')
self.lidar_folder = self.store_dir.joinpath('velodyne')
for folder in [self.label_folder, self.calib_folder, self.image_folder, self.lidar_folder]:
if not folder.is_dir():
folder.mkdir(parents=True)
samples = []
str_to_write = ''
for sample in self.lyft_ds.sample:
scene = self.lyft_ds.get('scene', sample['scene_token'])
log = self.lyft_ds.get('log', scene['log_token'])
logfile = log['logfile']
if logfile in split_logs:
samples.append(sample['token'])
str_to_write += '{} {}\n'.format(sample['token'], sample['scene_token'])
with open(os.path.join(os.path.dirname(self.store_dir), 'lyft_scenes.txt'), 'w') as f:
f.write(str_to_write)
sample_tokens = samples
if self.samples_count is not None:
sample_tokens = sample_tokens[:self.samples_count]
sample_tokens = sample_tokens
self.tokens = sample_tokens
with parallel_backend('threading', n_jobs=self.parallel_n_jobs):
Parallel()((delayed(self.process_token_to_kitti)(sample_token) for sample_token in tqdm(sample_tokens)))
|
3D_adapt_auto_driving
|
positive
|
def _parse_kexecdh_init(self, m):
peer_key_bytes = m.get_string()
peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
<DeepExtract>
secret = self.key.exchange(peer_key)
if constant_time.bytes_eq(secret, b'\x00' * 32):
raise SSHException("peer's curve25519 public value has wrong order")
K = secret
</DeepExtract>
K = long(binascii.hexlify(K), 16)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init)
server_key_bytes = self.transport.get_server_key().asbytes()
exchange_key_bytes = self.key.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
hm.add_string(server_key_bytes)
hm.add_string(peer_key_bytes)
hm.add_string(exchange_key_bytes)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
sig = self.transport.get_server_key().sign_ssh_data(H)
m = Message()
m.add_byte(c_MSG_KEXECDH_REPLY)
m.add_string(server_key_bytes)
m.add_string(exchange_key_bytes)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
|
def _parse_kexecdh_init(self, m):
peer_key_bytes = m.get_string()
peer_key = X25519PublicKey.from_public_bytes(peer_key_bytes)
secret = self.key.exchange(peer_key)
if constant_time.bytes_eq(secret, b'\x00' * 32):
raise SSHException("peer's curve25519 public value has wrong order")
K = secret
K = long(binascii.hexlify(K), 16)
hm = Message()
hm.add(self.transport.remote_version, self.transport.local_version, self.transport.remote_kex_init, self.transport.local_kex_init)
server_key_bytes = self.transport.get_server_key().asbytes()
exchange_key_bytes = self.key.public_key().public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
hm.add_string(server_key_bytes)
hm.add_string(peer_key_bytes)
hm.add_string(exchange_key_bytes)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
sig = self.transport.get_server_key().sign_ssh_data(H)
m = Message()
m.add_byte(c_MSG_KEXECDH_REPLY)
m.add_string(server_key_bytes)
m.add_string(exchange_key_bytes)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
|
cerbrutus
|
positive
|
def __init__(self):
"""Inits the local environment with the initial os environment."""
self.logger = logging.getLogger(LOGGING_GROUP)
self.active_environ = None
self.active_path = None
self.active_pypath = None
self.active_buildvars = var_dict.VarDict()
self.checkpoints = []
<DeepExtract>
self.active_environ = dict()
for (key, value) in os.environ.items():
self.active_environ[key] = value
path = self.active_environ.get('PATH', '')
self.active_path = list(filter(None, path.split(os.pathsep)))
self.active_pypath = sys.path
self.active_environ.pop('PATH', None)
self.active_environ.pop('PYTHONPATH', None)
</DeepExtract>
<DeepExtract>
new_index = len(self.checkpoints)
self.checkpoints.append({'environ': copy.copy(self.active_environ), 'path': self.active_path, 'pypath': self.active_pypath, 'buildvars': copy.copy(self.active_buildvars)})
return new_index
</DeepExtract>
|
def __init__(self):
"""Inits the local environment with the initial os environment."""
self.logger = logging.getLogger(LOGGING_GROUP)
self.active_environ = None
self.active_path = None
self.active_pypath = None
self.active_buildvars = var_dict.VarDict()
self.checkpoints = []
self.active_environ = dict()
for (key, value) in os.environ.items():
self.active_environ[key] = value
path = self.active_environ.get('PATH', '')
self.active_path = list(filter(None, path.split(os.pathsep)))
self.active_pypath = sys.path
self.active_environ.pop('PATH', None)
self.active_environ.pop('PYTHONPATH', None)
new_index = len(self.checkpoints)
self.checkpoints.append({'environ': copy.copy(self.active_environ), 'path': self.active_path, 'pypath': self.active_pypath, 'buildvars': copy.copy(self.active_buildvars)})
return new_index
</DeepExtract>
|
edk2-pytool-extensions
|
positive
|
def reset(self):
self.reset_callback(self.world)
<DeepExtract>
self.render_geoms = None
self.render_geoms_xform = None
</DeepExtract>
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
self.episode_memory = []
<DeepExtract>
import copy
self.episode_memory.append(copy.deepcopy(self.world))
</DeepExtract>
return obs_n
|
def reset(self):
self.reset_callback(self.world)
self.render_geoms = None
self.render_geoms_xform = None
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
self.episode_memory = []
import copy
self.episode_memory.append(copy.deepcopy(self.world))
return obs_n
|
epciclr2020
|
positive
|
def drill_hole(cx, cy, r):
global MOVE_HEIGHT, gTMP_DRILL_X, gTMP_DRILL_Y, gTMP_DRILL_Z, DRILL_SPEED, DRILL_DEPTH, Z_STEP_DRILL, XY_SPEED
out_data = ''
gcode_tmp_flag = 0
z_step_n = int(float(DRILL_DEPTH) / float(Z_STEP_DRILL)) + 1
z_step = float(DRILL_DEPTH) / z_step_n
if MOVE_HEIGHT != gTMP_DRILL_Z:
gTMP_DRILL_Z = MOVE_HEIGHT
out_data += 'G0 Z' + floats(gTMP_DRILL_Z) + ' F' + floats(DRILL_SPEED) + '\n'
out_data += 'G0 X' + floats(cx) + ' Y' + floats(cy) + '\n'
<DeepExtract>
int(100)
new_points_num = int(2.0 * pi * float(r) / float(MM_PER_ARC_SEGMENT))
if new_points_num < 4:
new_points_num = 4
elif new_points_num > 50:
new_points_num = 50
100 = new_points_num
points = []
i = 100
while i > 0:
cir_x = cx + r * cos(2.0 * pi * float(i) / float(100))
cir_y = cy + r * sin(2.0 * pi * float(i) / float(100))
points.extend([cir_x, cir_y])
i -= 1
cir_x = cx + r * cos(0.0)
cir_y = cy + r * sin(0.0)
points.extend([cir_x, cir_y])
points = points
</DeepExtract>
i = 1
while i <= z_step_n:
gTMP_DRILL_Z = i * z_step
j = 0
cricle_data = 'G1'
while j < len(points):
px = points[j]
py = points[j + 1]
if px != gTMP_DRILL_X:
gTMP_DRILL_X = px
cricle_data += ' X' + floats(px)
gcode_tmp_flag = 1
if py != gTMP_DRILL_Y:
gTMP_DRILL_Y = py
cricle_data += ' Y' + floats(py)
gcode_tmp_flag = 1
if gcode_tmp_flag:
cricle_data += ' F' + floats(XY_SPEED)
cricle_data = 'G1'
gcode_tmp_flag = 0
j += 2
i += 1
DRILL_DESIRED_DIAM = 2 * (r + DRILL_D / 2)
out_data += 'G0 Z' + floats(DRILL_DEPTH) + ' F' + floats(DRILL_SPEED) + ' D' + floats(DRILL_DESIRED_DIAM) + '\n'
gTMP_DRILL_X = cx + r
gTMP_DRILL_Y = cy
return out_data
|
def drill_hole(cx, cy, r):
global MOVE_HEIGHT, gTMP_DRILL_X, gTMP_DRILL_Y, gTMP_DRILL_Z, DRILL_SPEED, DRILL_DEPTH, Z_STEP_DRILL, XY_SPEED
out_data = ''
gcode_tmp_flag = 0
z_step_n = int(float(DRILL_DEPTH) / float(Z_STEP_DRILL)) + 1
z_step = float(DRILL_DEPTH) / z_step_n
if MOVE_HEIGHT != gTMP_DRILL_Z:
gTMP_DRILL_Z = MOVE_HEIGHT
out_data += 'G0 Z' + floats(gTMP_DRILL_Z) + ' F' + floats(DRILL_SPEED) + '\n'
out_data += 'G0 X' + floats(cx) + ' Y' + floats(cy) + '\n'
int(100)
new_points_num = int(2.0 * pi * float(r) / float(MM_PER_ARC_SEGMENT))
if new_points_num < 4:
new_points_num = 4
elif new_points_num > 50:
new_points_num = 50
100 = new_points_num
points = []
i = 100
while i > 0:
cir_x = cx + r * cos(2.0 * pi * float(i) / float(100))
cir_y = cy + r * sin(2.0 * pi * float(i) / float(100))
points.extend([cir_x, cir_y])
i -= 1
cir_x = cx + r * cos(0.0)
cir_y = cy + r * sin(0.0)
points.extend([cir_x, cir_y])
points = points
i = 1
while i <= z_step_n:
gTMP_DRILL_Z = i * z_step
j = 0
cricle_data = 'G1'
while j < len(points):
px = points[j]
py = points[j + 1]
if px != gTMP_DRILL_X:
gTMP_DRILL_X = px
cricle_data += ' X' + floats(px)
gcode_tmp_flag = 1
if py != gTMP_DRILL_Y:
gTMP_DRILL_Y = py
cricle_data += ' Y' + floats(py)
gcode_tmp_flag = 1
if gcode_tmp_flag:
cricle_data += ' F' + floats(XY_SPEED)
cricle_data = 'G1'
gcode_tmp_flag = 0
j += 2
i += 1
DRILL_DESIRED_DIAM = 2 * (r + DRILL_D / 2)
out_data += 'G0 Z' + floats(DRILL_DEPTH) + ' F' + floats(DRILL_SPEED) + ' D' + floats(DRILL_DESIRED_DIAM) + '\n'
gTMP_DRILL_X = cx + r
gTMP_DRILL_Y = cy
return out_data
|
Cyclone-PCB-Factory
|
positive
|
def _get_env_info(self, measurements, sensor_data):
env_info = {}
pm = measurements.player_measurements
env_info['pos_x'] = pm.transform.location.x
env_info['pos_y'] = pm.transform.location.y
env_info['pos_z'] = pm.transform.location.z
env_info['intersection_otherlane'] = pm.intersection_otherlane
env_info['intersection_offroad'] = pm.intersection_offroad
for camera_name in self._params['cameras']:
camera_params = self._params[camera_name]
<DeepExtract>
if len(sensor_data[camera_name].data.shape) == 2:
sensor_data[camera_name].data = np.expand_dims(sensor_data[camera_name].data, axis=2)
if {**camera_params, 'grayscale': False}.get('grayscale', False):
def rgb2gray(rgb):
env_info[camera_name] = np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
sensor_data[camera_name].data = np.expand_dims(rgb2gray(sensor_data[camera_name].data), axis=2)
if {**camera_params, 'grayscale': False}['postprocessing'] == 'Depth':
(min_range, max_range) = (0.0001, 1)
sensor_data[camera_name].data = np.clip(sensor_data[camera_name].data, min_range, max_range)
sensor_data[camera_name].data = np.log(sensor_data[camera_name].data)
sensor_data[camera_name].data = 255 * (sensor_data[camera_name].data - np.log(min_range)) / (np.log(max_range) - np.log(min_range))
assert sensor_data[camera_name].data.min() >= 0 and sensor_data[camera_name].data.max() <= 255
sensor_data[camera_name].data = sensor_data[camera_name].data.astype(np.uint8)
env_info[camera_name] = sensor_data[camera_name].data
</DeepExtract>
assert tuple(env_info[camera_name].shape[:2]) == (CarlaCollSpeedEnv.CAMERA_HEIGHT, CarlaCollSpeedEnv.CAMERA_HEIGHT * CarlaCollSpeedEnv.WIDTH_TO_HEIGHT_RATIO)
return env_info
|
def _get_env_info(self, measurements, sensor_data):
env_info = {}
pm = measurements.player_measurements
env_info['pos_x'] = pm.transform.location.x
env_info['pos_y'] = pm.transform.location.y
env_info['pos_z'] = pm.transform.location.z
env_info['intersection_otherlane'] = pm.intersection_otherlane
env_info['intersection_offroad'] = pm.intersection_offroad
for camera_name in self._params['cameras']:
camera_params = self._params[camera_name]
if len(sensor_data[camera_name].data.shape) == 2:
sensor_data[camera_name].data = np.expand_dims(sensor_data[camera_name].data, axis=2)
if {**camera_params, 'grayscale': False}.get('grayscale', False):
def rgb2gray(rgb):
env_info[camera_name] = np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
sensor_data[camera_name].data = np.expand_dims(rgb2gray(sensor_data[camera_name].data), axis=2)
if {**camera_params, 'grayscale': False}['postprocessing'] == 'Depth':
(min_range, max_range) = (0.0001, 1)
sensor_data[camera_name].data = np.clip(sensor_data[camera_name].data, min_range, max_range)
sensor_data[camera_name].data = np.log(sensor_data[camera_name].data)
sensor_data[camera_name].data = 255 * (sensor_data[camera_name].data - np.log(min_range)) / (np.log(max_range) - np.log(min_range))
assert sensor_data[camera_name].data.min() >= 0 and sensor_data[camera_name].data.max() <= 255
sensor_data[camera_name].data = sensor_data[camera_name].data.astype(np.uint8)
env_info[camera_name] = sensor_data[camera_name].data
assert tuple(env_info[camera_name].shape[:2]) == (CarlaCollSpeedEnv.CAMERA_HEIGHT, CarlaCollSpeedEnv.CAMERA_HEIGHT * CarlaCollSpeedEnv.WIDTH_TO_HEIGHT_RATIO)
return env_info
|
CAPs
|
positive
|
def __setitem__(self, name, msg):
"""Add a subexception related to a child node with the
message ``msg``. ``name`` must be present in the names of the
set of child nodes of this exception's node; if this is not
so, a :exc:`KeyError` is raised.
For example, if the exception upon which ``__setitem__`` is
called has a node attribute, and that node attribute has
children that have the names ``name`` and ``title``, you may
successfully call ``__setitem__('name', 'Bad name')`` or
``__setitem__('title', 'Bad title')``. But calling
``__setitem__('wrong', 'whoops')`` will result in a
:exc:`KeyError`.
This method is typically only useful if the ``node`` attribute
of the exception upon which it is called is a schema node
representing a mapping.
"""
for (num, child) in enumerate(self.node.children):
if child.name == name:
exc = Invalid(child, msg)
<DeepExtract>
if self.node and isinstance(self.node.typ, Positional):
exc.positional = True
if num is not None:
exc.pos = num
self.children.append(exc)
</DeepExtract>
return
raise KeyError(name)
|
def __setitem__(self, name, msg):
"""Add a subexception related to a child node with the
message ``msg``. ``name`` must be present in the names of the
set of child nodes of this exception's node; if this is not
so, a :exc:`KeyError` is raised.
For example, if the exception upon which ``__setitem__`` is
called has a node attribute, and that node attribute has
children that have the names ``name`` and ``title``, you may
successfully call ``__setitem__('name', 'Bad name')`` or
``__setitem__('title', 'Bad title')``. But calling
``__setitem__('wrong', 'whoops')`` will result in a
:exc:`KeyError`.
This method is typically only useful if the ``node`` attribute
of the exception upon which it is called is a schema node
representing a mapping.
"""
for (num, child) in enumerate(self.node.children):
if child.name == name:
exc = Invalid(child, msg)
if self.node and isinstance(self.node.typ, Positional):
exc.positional = True
if num is not None:
exc.pos = num
self.children.append(exc)
return
raise KeyError(name)
|
colander
|
positive
|
def getattr_recursive(self, name):
"""Recursively check wrappers to find attribute.
:param name (str) name of attribute to look for
:return: (object) attribute
"""
<DeepExtract>
all_attributes = self.__dict__.copy()
all_attributes.update(self.class_attributes)
all_attributes = all_attributes
</DeepExtract>
if name in all_attributes:
attr = getattr(self, name)
elif hasattr(self.venv, 'getattr_recursive'):
attr = self.venv.getattr_recursive(name)
else:
attr = getattr(self.venv, name)
return attr
|
def getattr_recursive(self, name):
"""Recursively check wrappers to find attribute.
:param name (str) name of attribute to look for
:return: (object) attribute
"""
all_attributes = self.__dict__.copy()
all_attributes.update(self.class_attributes)
all_attributes = all_attributes
if name in all_attributes:
attr = getattr(self, name)
elif hasattr(self.venv, 'getattr_recursive'):
attr = self.venv.getattr_recursive(name)
else:
attr = getattr(self.venv, name)
return attr
|
adversarially-guided-actor-critic
|
positive
|
def decode_arpl(state):
if state.using64:
state.result.operation = 'movsxd'
<DeepExtract>
if state.final_op_size == 1:
reg_list = get_byte_reg_list(state)
if state.final_op_size == 2:
reg_list = Reg16List
if state.final_op_size == 4:
reg_list = Reg32List
if state.final_op_size == 8:
reg_list = Reg64List
</DeepExtract>
<DeepExtract>
reg = decode_rm(state, state.operand1, Reg32List, 4)
if state.operand0 != None:
if state.rex_reg:
reg_offset = 8
else:
reg_offset = 0
state.operand0.size = state.final_op_size
state.operand0.operand = reg_list[reg + reg_offset]
</DeepExtract>
else:
state.final_op_size = 2
<DeepExtract>
if state.final_op_size == 1:
reg_list = get_byte_reg_list(state)
if state.final_op_size == 2:
reg_list = Reg16List
if state.final_op_size == 4:
reg_list = Reg32List
if state.final_op_size == 8:
reg_list = Reg64List
</DeepExtract>
<DeepExtract>
reg = decode_rm(state, state.operand0, reg_list, 2)
if state.operand1 != None:
if state.rex_reg:
reg_offset = 8
else:
reg_offset = 0
state.operand1.size = state.final_op_size
state.operand1.operand = reg_list[reg + reg_offset]
</DeepExtract>
|
def decode_arpl(state):
if state.using64:
state.result.operation = 'movsxd'
if state.final_op_size == 1:
reg_list = get_byte_reg_list(state)
if state.final_op_size == 2:
reg_list = Reg16List
if state.final_op_size == 4:
reg_list = Reg32List
if state.final_op_size == 8:
reg_list = Reg64List
reg = decode_rm(state, state.operand1, Reg32List, 4)
if state.operand0 != None:
if state.rex_reg:
reg_offset = 8
else:
reg_offset = 0
state.operand0.size = state.final_op_size
state.operand0.operand = reg_list[reg + reg_offset]
else:
state.final_op_size = 2
if state.final_op_size == 1:
reg_list = get_byte_reg_list(state)
if state.final_op_size == 2:
reg_list = Reg16List
if state.final_op_size == 4:
reg_list = Reg32List
if state.final_op_size == 8:
reg_list = Reg64List
reg = decode_rm(state, state.operand0, reg_list, 2)
if state.operand1 != None:
if state.rex_reg:
reg_offset = 8
else:
reg_offset = 0
state.operand1.size = state.final_op_size
state.operand1.operand = reg_list[reg + reg_offset]
</DeepExtract>
|
deprecated-binaryninja-python
|
positive
|
def precision_recall_fscore_iou_support(raster_true, raster_pred, beta=1.0, smooth=1e-06, average=None, rtol=1e-08, envelope_true=0, binarization=None):
"""Computes precision, recall, F1 score, and Intersection over Union
metrics all in one pass.
:param raster_true: the input ground truth raster image
:type raster_true: numpy.ndarray
:param raster_pred: the input predicted raster image
:type raster_pred: numpy.ndarray
:param beta:
The strength of recall versus precision in the F-score.
:type beta: float
:param smooth: how much smoothing to apply when computing
possibly zero quantities
:type smooth: float
:param rtol: zeroes down values smaller than this
:type rtol: float
:param average: If ``None``, does nothing. Otherwise, determines
the kind of averaging to apply:
``'mean'``: computes mean values
:param envelope_true: how much is the spatial tolerance
to the inaccuracies in vectorization. The `raster_true`
image will be dilated (i.e., its black lines expanded)
by this many pixels in every direction.
:type envelope_true: int
:param binarization: If ``None``, no binarization is performed, and the images
are assumed to be already binary. Otherwise, determines the type
of binarization performed before computing metrics on 1-0 images:
``'maxink'``:
Binarize the image, converting all grayscale values to black
(thickening the lines, maximizing ink).
``'maxwhite'``:
Binarize the image, converting all grayscale values to white
(thinning the lines, maximizing whitespace).
``'median'``:
Binarize the image, converting all values below 128 to black.
:type binarization: str, optional
:return: precision, recall, F1 score, and IoU metrics.
Shape of all measures is determined by `average` variable.
If `average` is None, all values are arrays of floats.
If `average` is not None, all values are floating-point numbers.
"""
<DeepExtract>
raster_true = ensure_tensor(raster_true)
if binarization == 'maxink':
binarization_func = color_utils.img_8bit_to_binary_maxink
elif binarization == 'maxwhite':
binarization_func = color_utils.img_8bit_to_binary_maxwhite
elif binarization == 'median':
binarization_func = color_utils.img_8bit_to_binary_median
elif None is binarization:
binarization_func = lambda image: image
else:
raise NotImplementedError
raster_true = binarization_func(raster_true)
assert _is_binary_1bit(raster_true), 'images are not binary (only values 0 and 1 allowed in images)'
raster_true = 1 - raster_true
if envelope_true:
assert isinstance(envelope_true, int)
raster_true = np.array([binary_dilation(r, iterations=envelope_true) for r in raster_true]).astype(np.uint8)
raster_true = raster_true
</DeepExtract>
<DeepExtract>
raster_pred = ensure_tensor(raster_pred)
if binarization == 'maxink':
binarization_func = color_utils.img_8bit_to_binary_maxink
elif binarization == 'maxwhite':
binarization_func = color_utils.img_8bit_to_binary_maxwhite
elif binarization == 'median':
binarization_func = color_utils.img_8bit_to_binary_median
elif None is binarization:
binarization_func = lambda image: image
else:
raise NotImplementedError
raster_pred = binarization_func(raster_pred)
assert _is_binary_1bit(raster_pred), 'images are not binary (only values 0 and 1 allowed in images)'
raster_pred = 1 - raster_pred
if envelope:
assert isinstance(envelope, int)
raster_pred = np.array([binary_dilation(r, iterations=envelope) for r in raster_pred]).astype(np.uint8)
raster_pred = raster_pred
</DeepExtract>
axes = (1, 2)
tp = np.sum(raster_true * raster_pred, axis=axes)
fp = np.sum(raster_pred, axis=axes) - tp
fn = np.sum(raster_true, axis=axes) - tp
precision = (tp + smooth) / (tp + fp + smooth)
recall = (tp + smooth) / (tp + fn + smooth)
beta2 = beta ** 2
f_score = (1 + beta2) * (precision * recall + smooth) / (beta2 * precision + recall + smooth)
f_score[np.isclose(recall, 0, rtol=rtol) & np.isclose(precision, 0, rtol=rtol)] = 0
union = tp + fp + fn
iou_score = (tp + smooth) / (union + smooth)
if average == 'mean':
precision = np.mean(precision, axis=0)
recall = np.mean(recall, axis=0)
f_score = np.mean(f_score, axis=0)
iou_score = np.mean(iou_score, axis=0)
return (precision, recall, f_score, iou_score)
|
def precision_recall_fscore_iou_support(raster_true, raster_pred, beta=1.0, smooth=1e-06, average=None, rtol=1e-08, envelope_true=0, binarization=None):
"""Computes precision, recall, F1 score, and Intersection over Union
metrics all in one pass.
:param raster_true: the input ground truth raster image
:type raster_true: numpy.ndarray
:param raster_pred: the input predicted raster image
:type raster_pred: numpy.ndarray
:param beta:
The strength of recall versus precision in the F-score.
:type beta: float
:param smooth: how much smoothing to apply when computing
possibly zero quantities
:type smooth: float
:param rtol: zeroes down values smaller than this
:type rtol: float
:param average: If ``None``, does nothing. Otherwise, determines
the kind of averaging to apply:
``'mean'``: computes mean values
:param envelope_true: how much is the spatial tolerance
to the inaccuracies in vectorization. The `raster_true`
image will be dilated (i.e., its black lines expanded)
by this many pixels in every direction.
:type envelope_true: int
:param binarization: If ``None``, no binarization is performed, and the images
are assumed to be already binary. Otherwise, determines the type
of binarization performed before computing metrics on 1-0 images:
``'maxink'``:
Binarize the image, converting all grayscale values to black
(thickening the lines, maximizing ink).
``'maxwhite'``:
Binarize the image, converting all grayscale values to white
(thinning the lines, maximizing whitespace).
``'median'``:
Binarize the image, converting all values below 128 to black.
:type binarization: str, optional
:return: precision, recall, F1 score, and IoU metrics.
Shape of all measures is determined by `average` variable.
If `average` is None, all values are arrays of floats.
If `average` is not None, all values are floating-point numbers.
"""
raster_true = ensure_tensor(raster_true)
if binarization == 'maxink':
binarization_func = color_utils.img_8bit_to_binary_maxink
elif binarization == 'maxwhite':
binarization_func = color_utils.img_8bit_to_binary_maxwhite
elif binarization == 'median':
binarization_func = color_utils.img_8bit_to_binary_median
elif None is binarization:
binarization_func = lambda image: image
else:
raise NotImplementedError
raster_true = binarization_func(raster_true)
assert _is_binary_1bit(raster_true), 'images are not binary (only values 0 and 1 allowed in images)'
raster_true = 1 - raster_true
if envelope_true:
assert isinstance(envelope_true, int)
raster_true = np.array([binary_dilation(r, iterations=envelope_true) for r in raster_true]).astype(np.uint8)
raster_true = raster_true
raster_pred = ensure_tensor(raster_pred)
if binarization == 'maxink':
binarization_func = color_utils.img_8bit_to_binary_maxink
elif binarization == 'maxwhite':
binarization_func = color_utils.img_8bit_to_binary_maxwhite
elif binarization == 'median':
binarization_func = color_utils.img_8bit_to_binary_median
elif None is binarization:
binarization_func = lambda image: image
else:
raise NotImplementedError
raster_pred = binarization_func(raster_pred)
assert _is_binary_1bit(raster_pred), 'images are not binary (only values 0 and 1 allowed in images)'
raster_pred = 1 - raster_pred
if envelope:
assert isinstance(envelope, int)
raster_pred = np.array([binary_dilation(r, iterations=envelope) for r in raster_pred]).astype(np.uint8)
raster_pred = raster_pred
axes = (1, 2)
tp = np.sum(raster_true * raster_pred, axis=axes)
fp = np.sum(raster_pred, axis=axes) - tp
fn = np.sum(raster_true, axis=axes) - tp
precision = (tp + smooth) / (tp + fp + smooth)
recall = (tp + smooth) / (tp + fn + smooth)
beta2 = beta ** 2
f_score = (1 + beta2) * (precision * recall + smooth) / (beta2 * precision + recall + smooth)
f_score[np.isclose(recall, 0, rtol=rtol) & np.isclose(precision, 0, rtol=rtol)] = 0
union = tp + fp + fn
iou_score = (tp + smooth) / (union + smooth)
if average == 'mean':
precision = np.mean(precision, axis=0)
recall = np.mean(recall, axis=0)
f_score = np.mean(f_score, axis=0)
iou_score = np.mean(iou_score, axis=0)
return (precision, recall, f_score, iou_score)
|
Deep-Vectorization-of-Technical-Drawings
|
positive
|
def add_force_field_physics(ref=None):
<DeepExtract>
objref = None
if ref is None:
objref = ao()
elif is_string(ref):
if object_exists(ref):
objref = bpy.data.objects[ref]
else:
objref = ref
objref = objref
</DeepExtract>
bpy.context.view_layer.objects.active = objref
if objref.field.type == 'NONE':
bpy.ops.object.forcefield_toggle()
|
def add_force_field_physics(ref=None):
objref = None
if ref is None:
objref = ao()
elif is_string(ref):
if object_exists(ref):
objref = bpy.data.objects[ref]
else:
objref = ref
objref = objref
bpy.context.view_layer.objects.active = objref
if objref.field.type == 'NONE':
bpy.ops.object.forcefield_toggle()
|
BY-GEN-public
|
positive
|
def get_all(self):
if self._module.params['resource'] is None:
<DeepExtract>
self._module_result['failed'] = True
self._module_result['changed'] = False
self._module_result.update(kwargs)
self._module_result['msg'] = 'NITRO resource is undefined.'
self._module_result['headers'] = self._headers
self._module.fail_json(**self._module_result)
</DeepExtract>
url = '%s://%s/%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path, self._module.params['resource'])
self._module.debug('headers %s' % self._headers)
timeout = self._module.params['timeout']
(r, info) = fetch_url(self._module, url=url, headers=self._headers, method='GET', timeout=timeout)
result = {}
<DeepExtract>
if r is not None:
result['http_response_body'] = codecs.decode(r.read(), 'utf-8')
elif 'body' in info:
result['http_response_body'] = codecs.decode(info['body'], 'utf-8')
del info['body']
else:
result['http_response_body'] = ''
result['http_response_data'] = info
result['nitro_errorcode'] = None
result['nitro_message'] = None
result['nitro_severity'] = None
if result['http_response_body'] != '':
try:
data = self._module.from_json(result['http_response_body'])
except ValueError:
data = {}
result['nitro_errorcode'] = data.get('errorcode')
result['nitro_message'] = data.get('message')
result['nitro_severity'] = data.get('severity')
if result['nitro_errorcode'] is None:
if result['http_response_data'].get('status') != 200:
result['nitro_errorcode'] = -1
result['nitro_message'] = result['http_response_data'].get('msg', 'HTTP status %s' % result['http_response_data']['status'])
result['nitro_severity'] = 'ERROR'
else:
result['nitro_errorcode'] = 0
result['nitro_message'] = 'Success'
result['nitro_severity'] = 'NONE'
</DeepExtract>
<DeepExtract>
result['nitro_object'] = []
if result['nitro_errorcode'] == 0:
if result['http_response_body'] != '':
data = self._module.from_json(result['http_response_body'])
if self._module.params['resource'] in data:
result['nitro_object'] = data[self._module.params['resource']]
else:
del result['nitro_object']
</DeepExtract>
self._module_result['changed'] = False
return result
|
def get_all(self):
if self._module.params['resource'] is None:
self._module_result['failed'] = True
self._module_result['changed'] = False
self._module_result.update(kwargs)
self._module_result['msg'] = 'NITRO resource is undefined.'
self._module_result['headers'] = self._headers
self._module.fail_json(**self._module_result)
url = '%s://%s/%s/%s' % (self._module.params['nitro_protocol'], self._module.params['nsip'], self.api_path, self._module.params['resource'])
self._module.debug('headers %s' % self._headers)
timeout = self._module.params['timeout']
(r, info) = fetch_url(self._module, url=url, headers=self._headers, method='GET', timeout=timeout)
result = {}
if r is not None:
result['http_response_body'] = codecs.decode(r.read(), 'utf-8')
elif 'body' in info:
result['http_response_body'] = codecs.decode(info['body'], 'utf-8')
del info['body']
else:
result['http_response_body'] = ''
result['http_response_data'] = info
result['nitro_errorcode'] = None
result['nitro_message'] = None
result['nitro_severity'] = None
if result['http_response_body'] != '':
try:
data = self._module.from_json(result['http_response_body'])
except ValueError:
data = {}
result['nitro_errorcode'] = data.get('errorcode')
result['nitro_message'] = data.get('message')
result['nitro_severity'] = data.get('severity')
if result['nitro_errorcode'] is None:
if result['http_response_data'].get('status') != 200:
result['nitro_errorcode'] = -1
result['nitro_message'] = result['http_response_data'].get('msg', 'HTTP status %s' % result['http_response_data']['status'])
result['nitro_severity'] = 'ERROR'
else:
result['nitro_errorcode'] = 0
result['nitro_message'] = 'Success'
result['nitro_severity'] = 'NONE'
result['nitro_object'] = []
if result['nitro_errorcode'] == 0:
if result['http_response_body'] != '':
data = self._module.from_json(result['http_response_body'])
if self._module.params['resource'] in data:
result['nitro_object'] = data[self._module.params['resource']]
else:
del result['nitro_object']
self._module_result['changed'] = False
return result
|
citrix-adc-ansible-modules
|
positive
|
def next(self):
with self.lock:
<DeepExtract>
with self.lock:
index_array = next(self.index_generator)
index_array = self._get_batches_of_transformed_samples(index_array)
</DeepExtract>
return self._get_batches_of_transformed_samples(index_array)
|
def next(self):
with self.lock:
with self.lock:
index_array = next(self.index_generator)
index_array = self._get_batches_of_transformed_samples(index_array)
return self._get_batches_of_transformed_samples(index_array)
|
dsb2018_topcoders
|
positive
|
def test_blade_rotate_exceptions_no_transformation(self):
<DeepExtract>
sections = np.asarray([NacaProfile(digits='0012') for i in range(10)])
radii = np.arange(0.4, 1.31, 0.1)
chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15), np.arange(1.03, 0.9, -0.03), np.array([0.3])))
pitch = np.append(np.arange(3.0, 4.0, 0.2), np.arange(4.1, 3.2, -0.2))
rake = np.append(np.arange(0.005, 0.08, 0.01), np.arange(0.075, 0.02, -0.03))
skew_angles = np.append(np.arange(-4.0, -9.0, -3.0), np.arange(-7.0, 15.0, 3.0))
blade = bl.Blade(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=pitch, rake=rake, skew_angles=skew_angles)
</DeepExtract>
with self.assertRaises(ValueError):
blade.rotate(rad_angle=80, deg_angle=None)
|
def test_blade_rotate_exceptions_no_transformation(self):
sections = np.asarray([NacaProfile(digits='0012') for i in range(10)])
radii = np.arange(0.4, 1.31, 0.1)
chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15), np.arange(1.03, 0.9, -0.03), np.array([0.3])))
pitch = np.append(np.arange(3.0, 4.0, 0.2), np.arange(4.1, 3.2, -0.2))
rake = np.append(np.arange(0.005, 0.08, 0.01), np.arange(0.075, 0.02, -0.03))
skew_angles = np.append(np.arange(-4.0, -9.0, -3.0), np.arange(-7.0, 15.0, 3.0))
blade = bl.Blade(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=pitch, rake=rake, skew_angles=skew_angles)
with self.assertRaises(ValueError):
blade.rotate(rad_angle=80, deg_angle=None)
|
BladeX
|
positive
|
@mock.patch('search.services.index.Search')
@mock.patch('search.services.index.Elasticsearch')
def test_simple_query(self, mock_Elasticsearch, mock_Search):
""":class:`.index.search` supports :class:`SimpleQuery`."""
mock_results = mock.MagicMock()
mock_results.__getitem__.return_value = {'total': 53}
<DeepExtract>
rdata = {'authors': [{'full_name': 'N. Ame'}], 'owners': [{'full_name': 'N. Ame'}], 'submitter': {'full_name': 'N. Ame'}, 'paper_id': '1234.56789', 'title': 'some title', 'abstract': 'An abstract with math $/alpha * /alpha$ for you.'}
</DeepExtract>
mock_result = mock.MagicMock(_d_=rdata, **rdata)
mock_result.meta.score = 1
mock_results.__iter__.return_value = [mock_result]
mock_Search.execute.return_value = mock_results
mock_Search.return_value = mock_Search
mock_Search.filter.return_value = mock_Search
mock_Search.highlight.return_value = mock_Search
mock_Search.highlight_options.return_value = mock_Search
mock_Search.query.return_value = mock_Search
mock_Search.sort.return_value = mock_Search
mock_Search.__getitem__.return_value = mock_Search
query = SimpleQuery(order='relevance', size=10, search_field='title', value='foo title')
document_set = index.SearchSession.search(query, highlight=True)
self.assertEqual(document_set['metadata']['start'], 0)
self.assertEqual(document_set['metadata']['total_results'], 53)
self.assertEqual(document_set['metadata']['current_page'], 1)
self.assertEqual(document_set['metadata']['total_pages'], 6)
self.assertEqual(document_set['metadata']['size'], 10)
self.assertEqual(len(document_set['results']), 1)
|
@mock.patch('search.services.index.Search')
@mock.patch('search.services.index.Elasticsearch')
def test_simple_query(self, mock_Elasticsearch, mock_Search):
""":class:`.index.search` supports :class:`SimpleQuery`."""
mock_results = mock.MagicMock()
mock_results.__getitem__.return_value = {'total': 53}
rdata = {'authors': [{'full_name': 'N. Ame'}], 'owners': [{'full_name': 'N. Ame'}], 'submitter': {'full_name': 'N. Ame'}, 'paper_id': '1234.56789', 'title': 'some title', 'abstract': 'An abstract with math $/alpha * /alpha$ for you.'}
mock_result = mock.MagicMock(_d_=rdata, **rdata)
mock_result.meta.score = 1
mock_results.__iter__.return_value = [mock_result]
mock_Search.execute.return_value = mock_results
mock_Search.return_value = mock_Search
mock_Search.filter.return_value = mock_Search
mock_Search.highlight.return_value = mock_Search
mock_Search.highlight_options.return_value = mock_Search
mock_Search.query.return_value = mock_Search
mock_Search.sort.return_value = mock_Search
mock_Search.__getitem__.return_value = mock_Search
query = SimpleQuery(order='relevance', size=10, search_field='title', value='foo title')
document_set = index.SearchSession.search(query, highlight=True)
self.assertEqual(document_set['metadata']['start'], 0)
self.assertEqual(document_set['metadata']['total_results'], 53)
self.assertEqual(document_set['metadata']['current_page'], 1)
self.assertEqual(document_set['metadata']['total_pages'], 6)
self.assertEqual(document_set['metadata']['size'], 10)
self.assertEqual(len(document_set['results']), 1)
|
arxiv-search
|
positive
|
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
(m, n) = shape(dataMatrix)
weights = ones(n)
for i in range(numIter):
dataIndex = len(list(range(m)))
for j in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, dataIndex))
<DeepExtract>
h = 1 / (1 + exp(-sum(dataMatrix[randIndex] * weights)))
</DeepExtract>
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
dataIndex -= 1
return weights
|
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
(m, n) = shape(dataMatrix)
weights = ones(n)
for i in range(numIter):
dataIndex = len(list(range(m)))
for j in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, dataIndex))
h = 1 / (1 + exp(-sum(dataMatrix[randIndex] * weights)))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
dataIndex -= 1
return weights
|
AlgorithmsByPython
|
positive
|
def test_compact_output(self):
"""
In compact mode, the process reads JSON lines from stdin and writes out
a pretty-printed compact version.
"""
messages = [SIMPLE_MESSAGE, UNTYPED_MESSAGE, SIMPLE_MESSAGE]
<DeepExtract>
process = Popen([b'eliot-prettyprint'] + list([b'--compact']), stdin=PIPE, stdout=PIPE)
process.stdin.write(b''.join((line + b'\n' for line in map(dumps, messages))))
process.stdin.close()
result = process.stdout.read().decode('utf-8')
process.stdout.close()
stdout = result
</DeepExtract>
self.assertEqual(stdout, ''.join((compact_format(message) + '\n' for message in messages)))
|
def test_compact_output(self):
"""
In compact mode, the process reads JSON lines from stdin and writes out
a pretty-printed compact version.
"""
messages = [SIMPLE_MESSAGE, UNTYPED_MESSAGE, SIMPLE_MESSAGE]
process = Popen([b'eliot-prettyprint'] + list([b'--compact']), stdin=PIPE, stdout=PIPE)
process.stdin.write(b''.join((line + b'\n' for line in map(dumps, messages))))
process.stdin.close()
result = process.stdout.read().decode('utf-8')
process.stdout.close()
stdout = result
self.assertEqual(stdout, ''.join((compact_format(message) + '\n' for message in messages)))
|
eliot
|
positive
|
def file_count(path):
count = 0
for f in os.listdir(path):
child = os.path.join(path, f)
if os.path.isdir(child):
<DeepExtract>
count = 0
for f in os.listdir(child):
child = os.path.join(child, f)
if os.path.isdir(child):
child_count = file_count(child)
count += child_count
else:
count += 1
child_count = count
</DeepExtract>
count += child_count
else:
count += 1
return count
|
def file_count(path):
count = 0
for f in os.listdir(path):
child = os.path.join(path, f)
if os.path.isdir(child):
count = 0
for f in os.listdir(child):
child = os.path.join(child, f)
if os.path.isdir(child):
child_count = file_count(child)
count += child_count
else:
count += 1
child_count = count
count += child_count
else:
count += 1
return count
|
3d-dl
|
positive
|
def start_plugin(module, conf, boat, waypoint_manager):
log.info("Starting plugin {} with config '{}'".format(color(module.plugin.__name__, 34), color(str(conf), 36)))
<DeepExtract>
global boatd_module
if boatd_module is None:
boatd_module = Boatd(boat, waypoint_manager)
boatd = boatd_module
</DeepExtract>
plugin = module.plugin(conf, boatd)
t = threading.Thread(target=plugin.start)
t.start()
return plugin
|
def start_plugin(module, conf, boat, waypoint_manager):
log.info("Starting plugin {} with config '{}'".format(color(module.plugin.__name__, 34), color(str(conf), 36)))
global boatd_module
if boatd_module is None:
boatd_module = Boatd(boat, waypoint_manager)
boatd = boatd_module
plugin = module.plugin(conf, boatd)
t = threading.Thread(target=plugin.start)
t.start()
return plugin
|
boatd
|
positive
|
def demo_pipeline_prepare_nyu(sem_cat, dset, baseline_uncertainty_name, reload_dset=True, roi_field=None, roi_transform=None):
<DeepExtract>
dir_base = pp('{dset.dir_out}', 'eval_' + sem_cat, '{dset.name}_demo_{dset.split}_' + sem_cat)
tmpl_part = pp(dir_base, 'parts', '{frame.fid_no_subdir}_{channel.suffix}{channel.img_ext}')
tmpl_fused = pp(dir_base, '{frame.fid_no_subdir}_{channel.suffix}{channel.img_ext}')
demo_chs = {'demo_image': ChannelLoaderImage(tmpl_part, suffix='image', img_ext='.jpg'), 'demo_gt_contour': ChannelLoaderImage(tmpl_part, suffix='gt_contour', img_ext='.jpg'), 'demo_pred_labels': ChannelLoaderImage(tmpl_part, suffix='pred_labels', img_ext='.png'), 'demo_gen_image': ChannelLoaderImage(tmpl_part, suffix='gen_image', img_ext='.jpg'), 'demo_anomaly_uncertainty': ChannelLoaderImage(tmpl_part, suffix='anomaly_uncertainty', img_ext='.jpg'), 'demo_anomaly_ours': ChannelLoaderImage(tmpl_part, suffix='anomaly_ours', img_ext='.jpg'), 'demo_pipeline': ChannelLoaderImage(tmpl_fused, suffix='pipeline', img_ext='.jpg'), 'demo_scores': ChannelLoaderImage(tmpl_fused, suffix='scores', img_ext='.jpg')}
</DeepExtract>
demo_chs_names = list(demo_chs.keys())
dset.add_channels(**demo_chs)
chan_anomaly_unc = f'anomaly_{baseline_uncertainty_name}'
chan_anomaly_ours = 'anomaly_lag_swap_gt'
dset.set_channels_enabled('image', 'labels_category40', 'pred_labels_trainIds', 'gen_image', chan_anomaly_ours, chan_anomaly_unc)
if reload_dset:
dset.discover()
unc_95_percentile = demo_get_uncertainty_bounds_for_plots(dset, baseline_uncertainty_name)
unc_scale = 1.0 / unc_95_percentile
print(baseline_uncertainty_name, '- uncertainty scale:', unc_scale)
roi_trs = []
if roi_field is not None:
if roi_transform is not None:
roi_trs.append(roi_transform)
roi_trs.append(partial(demo_tr_apply_roi, roi_field=roi_field, score_fields=['anomaly_uncertainty', 'anomaly_ours']))
demo_out_pipeline = TrsChain(*[TrRename({chan_anomaly_unc: 'anomaly_uncertainty', chan_anomaly_ours: 'anomaly_ours'}), partial(tr_rescale_uncertainty, unc_scale)] + roi_trs + [tr_demo_imgs_nyu, TrSaveChannelsAutoDset(demo_chs_names)])
return demo_out_pipeline
|
def demo_pipeline_prepare_nyu(sem_cat, dset, baseline_uncertainty_name, reload_dset=True, roi_field=None, roi_transform=None):
dir_base = pp('{dset.dir_out}', 'eval_' + sem_cat, '{dset.name}_demo_{dset.split}_' + sem_cat)
tmpl_part = pp(dir_base, 'parts', '{frame.fid_no_subdir}_{channel.suffix}{channel.img_ext}')
tmpl_fused = pp(dir_base, '{frame.fid_no_subdir}_{channel.suffix}{channel.img_ext}')
demo_chs = {'demo_image': ChannelLoaderImage(tmpl_part, suffix='image', img_ext='.jpg'), 'demo_gt_contour': ChannelLoaderImage(tmpl_part, suffix='gt_contour', img_ext='.jpg'), 'demo_pred_labels': ChannelLoaderImage(tmpl_part, suffix='pred_labels', img_ext='.png'), 'demo_gen_image': ChannelLoaderImage(tmpl_part, suffix='gen_image', img_ext='.jpg'), 'demo_anomaly_uncertainty': ChannelLoaderImage(tmpl_part, suffix='anomaly_uncertainty', img_ext='.jpg'), 'demo_anomaly_ours': ChannelLoaderImage(tmpl_part, suffix='anomaly_ours', img_ext='.jpg'), 'demo_pipeline': ChannelLoaderImage(tmpl_fused, suffix='pipeline', img_ext='.jpg'), 'demo_scores': ChannelLoaderImage(tmpl_fused, suffix='scores', img_ext='.jpg')}
demo_chs_names = list(demo_chs.keys())
dset.add_channels(**demo_chs)
chan_anomaly_unc = f'anomaly_{baseline_uncertainty_name}'
chan_anomaly_ours = 'anomaly_lag_swap_gt'
dset.set_channels_enabled('image', 'labels_category40', 'pred_labels_trainIds', 'gen_image', chan_anomaly_ours, chan_anomaly_unc)
if reload_dset:
dset.discover()
unc_95_percentile = demo_get_uncertainty_bounds_for_plots(dset, baseline_uncertainty_name)
unc_scale = 1.0 / unc_95_percentile
print(baseline_uncertainty_name, '- uncertainty scale:', unc_scale)
roi_trs = []
if roi_field is not None:
if roi_transform is not None:
roi_trs.append(roi_transform)
roi_trs.append(partial(demo_tr_apply_roi, roi_field=roi_field, score_fields=['anomaly_uncertainty', 'anomaly_ours']))
demo_out_pipeline = TrsChain(*[TrRename({chan_anomaly_unc: 'anomaly_uncertainty', chan_anomaly_ours: 'anomaly_ours'}), partial(tr_rescale_uncertainty, unc_scale)] + roi_trs + [tr_demo_imgs_nyu, TrSaveChannelsAutoDset(demo_chs_names)])
return demo_out_pipeline
|
detecting-the-unexpected
|
positive
|
def test_cli_version():
from elasticluster import __version__ as elasticluster_version
<DeepExtract>
with temporary_dir() as tmpdir:
with environment(HOME=os.getcwd(), PYTHONWARNINGS='ignore::DeprecationWarning,ignore::UserWarning' if sys.version_info < (3, 6) else '') as env:
proc = subprocess.Popen(['elasticluster'] + ['--version'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=False)
(stdout, stderr) = proc.communicate()
(out, err, code) = (stdout, stderr, proc.returncode)
</DeepExtract>
assert not err
assert not code
assert out.rstrip() == 'elasticluster version {0}'.format(elasticluster_version).encode('ascii')
|
def test_cli_version():
from elasticluster import __version__ as elasticluster_version
with temporary_dir() as tmpdir:
with environment(HOME=os.getcwd(), PYTHONWARNINGS='ignore::DeprecationWarning,ignore::UserWarning' if sys.version_info < (3, 6) else '') as env:
proc = subprocess.Popen(['elasticluster'] + ['--version'], stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=False)
(stdout, stderr) = proc.communicate()
(out, err, code) = (stdout, stderr, proc.returncode)
assert not err
assert not code
assert out.rstrip() == 'elasticluster version {0}'.format(elasticluster_version).encode('ascii')
|
elasticluster
|
positive
|
def run_analyze(conn, cluster_name, cw, schema_name='public', table_name=None, blacklisted_tables=None, ignore_errors=False, predicate_cols=False, stats_off_pct=10, **kwargs):
statements = []
if predicate_cols:
predicate_cols_option = ' PREDICATE COLUMNS '
else:
predicate_cols_option = ' ALL COLUMNS '
if table_name is not None:
get_analyze_statement_feedback = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'"\' + \'%s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s ::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n AND trim("table") = \'%s\';\n ' % (predicate_cols_option, stats_off_pct, schema_name, table_name)
elif blacklisted_tables is not None:
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
</DeepExtract>
blacklisted_tables_array = blacklisted_tables.split(',')
get_analyze_statement_feedback = '\n SELECT DISTINCT \'analyze \' + feedback_tbl.schema_name + \'."\' + feedback_tbl.table_name + \'"\' + \'%s ; \' + \'/* Stats_Off : \' + CAST(info_tbl."stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM (/* Get top N rank tables based on the missing statistics alerts */\n SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name\n FROM (SELECT TRIM(SPLIT_PART(SPLIT_PART(a.plannode,\':\',2),\' \',2)) AS Table_Name,\n COUNT(a.query),\n DENSE_RANK() OVER (ORDER BY COUNT(a.query) DESC) AS qry_rnk\n FROM stl_explain a,\n stl_query b\n WHERE a.query = b.query\n AND CAST(b.starttime AS DATE) >= dateadd(DAY,%s,CURRENT_DATE)\n AND a.userid > 1\n AND regexp_instr(a.plannode,\'.*missing statistics.*\') > 0\n AND regexp_instr(a.plannode,\'.*_bkp_.*\') = 0\n GROUP BY Table_Name) miss_tbl\n LEFT JOIN pg_class c ON c.relname = TRIM(miss_tbl.table_name)\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE miss_tbl.qry_rnk <= %s\n /* Get the top N rank tables based on the stl_alert_event_log alerts */\n UNION\n SELECT schema_name,\n table_name\n FROM (SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name,\n DENSE_RANK() OVER (ORDER BY COUNT(*) DESC) AS qry_rnk,\n COUNT(*)\n FROM stl_alert_event_log AS l\n JOIN (SELECT query,\n tbl,\n perm_table_name\n FROM stl_scan\n WHERE perm_table_name <> \'Internal Worktable\'\n GROUP BY query,\n tbl,\n perm_table_name) AS s ON s.query = l.query\n JOIN pg_class c ON c.oid = s.tbl\n JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE l.userid > 1\n AND l.event_time >= dateadd(DAY,%s,CURRENT_DATE)\n AND regexp_instr(l.Solution,\'.*ANALYZE command.*\') > 0\n GROUP BY TRIM(n.nspname),\n TRIM(c.relname)) anlyz_tbl\n WHERE anlyz_tbl.qry_rnk < %s\n UNION\n /* just a base dump of svv_table_info to check the stats_off metric */\n SELECT "schema"::VARCHAR schema_name,\n "table"::VARCHAR table_name\n FROM svv_table_info) feedback_tbl\n JOIN svv_table_info info_tbl\n ON info_tbl.schema = feedback_tbl.schema_name\n AND info_tbl.table = feedback_tbl.table_name\n WHERE info_tbl.stats_off::DECIMAL(32,4) > %s::DECIMAL(32,4)\n AND TRIM(info_tbl.schema) ~ \'%s\'\n AND info_tbl.table NOT IN (%s)\n ORDER BY info_tbl.size ASC;\n ' % (predicate_cols_option, goback_no_of_days, query_rank, goback_no_of_days, query_rank, stats_off_pct, schema_name, str(blacklisted_tables_array)[1:-1])
else:
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
</DeepExtract>
get_analyze_statement_feedback = '\n SELECT DISTINCT \'analyze \' + feedback_tbl.schema_name + \'."\' + feedback_tbl.table_name + \'"\' + \'%s ; \' + \'/* Stats_Off : \' + CAST(info_tbl."stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM (/* Get top N rank tables based on the missing statistics alerts */ \n SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name \n FROM (SELECT TRIM(SPLIT_PART(SPLIT_PART(a.plannode,\':\',2),\' \',2)) AS Table_Name,\n COUNT(a.query),\n DENSE_RANK() OVER (ORDER BY COUNT(a.query) DESC) AS qry_rnk\n FROM stl_explain a,\n stl_query b\n WHERE a.query = b.query\n AND CAST(b.starttime AS DATE) >= dateadd(DAY,%s,CURRENT_DATE)\n AND a.userid > 1\n AND regexp_instr(a.plannode,\'.*missing statistics.*\') > 0\n AND regexp_instr(a.plannode,\'.*_bkp_.*\') = 0\n GROUP BY Table_Name) miss_tbl \n LEFT JOIN pg_class c ON c.relname = TRIM(miss_tbl.table_name) \n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE miss_tbl.qry_rnk <= %s \n /* Get the top N rank tables based on the stl_alert_event_log alerts */\n UNION\n SELECT schema_name,\n table_name\n FROM (SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name,\n DENSE_RANK() OVER (ORDER BY COUNT(*) DESC) AS qry_rnk,\n COUNT(*)\n FROM stl_alert_event_log AS l\n JOIN (SELECT query,\n tbl,\n perm_table_name\n FROM stl_scan\n WHERE perm_table_name <> \'Internal Worktable\'\n GROUP BY query,\n tbl,\n perm_table_name) AS s ON s.query = l.query\n JOIN pg_class c ON c.oid = s.tbl\n JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE l.userid > 1\n AND l.event_time >= dateadd(DAY,%s,CURRENT_DATE)\n AND regexp_instr(l.Solution,\'.*ANALYZE command.*\') > 0\n GROUP BY TRIM(n.nspname),\n TRIM(c.relname)) anlyz_tbl\n WHERE anlyz_tbl.qry_rnk < %s \n UNION\n /* just a base dump of svv_table_info to check the stats_off metric */ \n SELECT "schema"::VARCHAR schema_name,\n "table"::VARCHAR table_name\n FROM svv_table_info) feedback_tbl\n JOIN svv_table_info info_tbl\n ON info_tbl.schema = feedback_tbl.schema_name\n AND info_tbl.table = feedback_tbl.table_name\n WHERE info_tbl.stats_off::DECIMAL(32,4) > %s::DECIMAL(32,4)\n AND TRIM(info_tbl.schema) ~ \'%s\' \n ORDER BY info_tbl.size ASC\n ' % (predicate_cols_option, goback_no_of_days, query_rank, goback_no_of_days, query_rank, stats_off_pct, schema_name)
if debug:
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if get_analyze_statement_feedback is not None:
if re.match('.*\\n.*', get_analyze_statement_feedback) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), get_analyze_statement_feedback))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), get_analyze_statement_feedback))
</DeepExtract>
<DeepExtract>
cursor = conn.cursor()
cursor.execute(get_analyze_statement_feedback)
try:
results = cursor.fetchall()
if debug:
comment('Query Execution returned %s Results' % len(results))
except Exception as e:
if 'no result set' in str(e):
analyze_statements = None
else:
raise e
analyze_statements = results
</DeepExtract>
for vs in analyze_statements:
statements.append(vs[0])
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if 'Found %s Tables requiring Analysis' % len(statements) is not None:
if re.match('.*\\n.*', 'Found %s Tables requiring Analysis' % len(statements)) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Found %s Tables requiring Analysis' % len(statements)))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Found %s Tables requiring Analysis' % len(statements)))
</DeepExtract>
if not run_commands(conn, statements, cw=cw, cluster_name=cluster_name, suppress_errors=ignore_errors):
if not ignore_errors:
if debug:
print('Error running statements: %s' % (str(statements),))
return ERROR
if table_name is None:
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on stats off from system table info ...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on stats off from system table info ...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on stats off from system table info ...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on stats off from system table info ...'))
</DeepExtract>
if blacklisted_tables is not None:
blacklisted_tables_array = blacklisted_tables.split(',')
get_analyze_statement = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'" %s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n AND "table" NOT IN (%s)\n ORDER BY "size" ASC ;\n ' % (predicate_cols_option, stats_off_pct, schema_name, str(blacklisted_tables_array)[1:-1])
else:
get_analyze_statement = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'" %s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n ORDER BY "size" ASC ;\n ' % (predicate_cols_option, stats_off_pct, schema_name)
if debug:
<DeepExtract>
datetime_str = str(datetime.datetime.now())
if get_analyze_statement is not None:
if re.match('.*\\n.*', get_analyze_statement) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), get_analyze_statement))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), get_analyze_statement))
</DeepExtract>
<DeepExtract>
cursor = conn.cursor()
cursor.execute(get_analyze_statement)
try:
results = cursor.fetchall()
if debug:
comment('Query Execution returned %s Results' % len(results))
except Exception as e:
if 'no result set' in str(e):
analyze_statements = None
else:
raise e
analyze_statements = results
</DeepExtract>
statements = []
for vs in analyze_statements:
statements.append(vs[0])
if not run_commands(conn, statements, cw=cw, cluster_name=cluster_name, suppress_errors=ignore_errors):
if not ignore_errors:
if debug:
print('Error running statements: %s' % (str(statements),))
return ERROR
return True
|
def run_analyze(conn, cluster_name, cw, schema_name='public', table_name=None, blacklisted_tables=None, ignore_errors=False, predicate_cols=False, stats_off_pct=10, **kwargs):
statements = []
if predicate_cols:
predicate_cols_option = ' PREDICATE COLUMNS '
else:
predicate_cols_option = ' ALL COLUMNS '
if table_name is not None:
get_analyze_statement_feedback = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'"\' + \'%s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s ::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n AND trim("table") = \'%s\';\n ' % (predicate_cols_option, stats_off_pct, schema_name, table_name)
elif blacklisted_tables is not None:
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
blacklisted_tables_array = blacklisted_tables.split(',')
get_analyze_statement_feedback = '\n SELECT DISTINCT \'analyze \' + feedback_tbl.schema_name + \'."\' + feedback_tbl.table_name + \'"\' + \'%s ; \' + \'/* Stats_Off : \' + CAST(info_tbl."stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM (/* Get top N rank tables based on the missing statistics alerts */\n SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name\n FROM (SELECT TRIM(SPLIT_PART(SPLIT_PART(a.plannode,\':\',2),\' \',2)) AS Table_Name,\n COUNT(a.query),\n DENSE_RANK() OVER (ORDER BY COUNT(a.query) DESC) AS qry_rnk\n FROM stl_explain a,\n stl_query b\n WHERE a.query = b.query\n AND CAST(b.starttime AS DATE) >= dateadd(DAY,%s,CURRENT_DATE)\n AND a.userid > 1\n AND regexp_instr(a.plannode,\'.*missing statistics.*\') > 0\n AND regexp_instr(a.plannode,\'.*_bkp_.*\') = 0\n GROUP BY Table_Name) miss_tbl\n LEFT JOIN pg_class c ON c.relname = TRIM(miss_tbl.table_name)\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE miss_tbl.qry_rnk <= %s\n /* Get the top N rank tables based on the stl_alert_event_log alerts */\n UNION\n SELECT schema_name,\n table_name\n FROM (SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name,\n DENSE_RANK() OVER (ORDER BY COUNT(*) DESC) AS qry_rnk,\n COUNT(*)\n FROM stl_alert_event_log AS l\n JOIN (SELECT query,\n tbl,\n perm_table_name\n FROM stl_scan\n WHERE perm_table_name <> \'Internal Worktable\'\n GROUP BY query,\n tbl,\n perm_table_name) AS s ON s.query = l.query\n JOIN pg_class c ON c.oid = s.tbl\n JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE l.userid > 1\n AND l.event_time >= dateadd(DAY,%s,CURRENT_DATE)\n AND regexp_instr(l.Solution,\'.*ANALYZE command.*\') > 0\n GROUP BY TRIM(n.nspname),\n TRIM(c.relname)) anlyz_tbl\n WHERE anlyz_tbl.qry_rnk < %s\n UNION\n /* just a base dump of svv_table_info to check the stats_off metric */\n SELECT "schema"::VARCHAR schema_name,\n "table"::VARCHAR table_name\n FROM svv_table_info) feedback_tbl\n JOIN svv_table_info info_tbl\n ON info_tbl.schema = feedback_tbl.schema_name\n AND info_tbl.table = feedback_tbl.table_name\n WHERE info_tbl.stats_off::DECIMAL(32,4) > %s::DECIMAL(32,4)\n AND TRIM(info_tbl.schema) ~ \'%s\'\n AND info_tbl.table NOT IN (%s)\n ORDER BY info_tbl.size ASC;\n ' % (predicate_cols_option, goback_no_of_days, query_rank, goback_no_of_days, query_rank, stats_off_pct, schema_name, str(blacklisted_tables_array)[1:-1])
else:
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on Query Optimizer Alerts...'))
get_analyze_statement_feedback = '\n SELECT DISTINCT \'analyze \' + feedback_tbl.schema_name + \'."\' + feedback_tbl.table_name + \'"\' + \'%s ; \' + \'/* Stats_Off : \' + CAST(info_tbl."stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM (/* Get top N rank tables based on the missing statistics alerts */ \n SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name \n FROM (SELECT TRIM(SPLIT_PART(SPLIT_PART(a.plannode,\':\',2),\' \',2)) AS Table_Name,\n COUNT(a.query),\n DENSE_RANK() OVER (ORDER BY COUNT(a.query) DESC) AS qry_rnk\n FROM stl_explain a,\n stl_query b\n WHERE a.query = b.query\n AND CAST(b.starttime AS DATE) >= dateadd(DAY,%s,CURRENT_DATE)\n AND a.userid > 1\n AND regexp_instr(a.plannode,\'.*missing statistics.*\') > 0\n AND regexp_instr(a.plannode,\'.*_bkp_.*\') = 0\n GROUP BY Table_Name) miss_tbl \n LEFT JOIN pg_class c ON c.relname = TRIM(miss_tbl.table_name) \n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE miss_tbl.qry_rnk <= %s \n /* Get the top N rank tables based on the stl_alert_event_log alerts */\n UNION\n SELECT schema_name,\n table_name\n FROM (SELECT TRIM(n.nspname)::VARCHAR schema_name,\n TRIM(c.relname)::VARCHAR table_name,\n DENSE_RANK() OVER (ORDER BY COUNT(*) DESC) AS qry_rnk,\n COUNT(*)\n FROM stl_alert_event_log AS l\n JOIN (SELECT query,\n tbl,\n perm_table_name\n FROM stl_scan\n WHERE perm_table_name <> \'Internal Worktable\'\n GROUP BY query,\n tbl,\n perm_table_name) AS s ON s.query = l.query\n JOIN pg_class c ON c.oid = s.tbl\n JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE l.userid > 1\n AND l.event_time >= dateadd(DAY,%s,CURRENT_DATE)\n AND regexp_instr(l.Solution,\'.*ANALYZE command.*\') > 0\n GROUP BY TRIM(n.nspname),\n TRIM(c.relname)) anlyz_tbl\n WHERE anlyz_tbl.qry_rnk < %s \n UNION\n /* just a base dump of svv_table_info to check the stats_off metric */ \n SELECT "schema"::VARCHAR schema_name,\n "table"::VARCHAR table_name\n FROM svv_table_info) feedback_tbl\n JOIN svv_table_info info_tbl\n ON info_tbl.schema = feedback_tbl.schema_name\n AND info_tbl.table = feedback_tbl.table_name\n WHERE info_tbl.stats_off::DECIMAL(32,4) > %s::DECIMAL(32,4)\n AND TRIM(info_tbl.schema) ~ \'%s\' \n ORDER BY info_tbl.size ASC\n ' % (predicate_cols_option, goback_no_of_days, query_rank, goback_no_of_days, query_rank, stats_off_pct, schema_name)
if debug:
datetime_str = str(datetime.datetime.now())
if get_analyze_statement_feedback is not None:
if re.match('.*\\n.*', get_analyze_statement_feedback) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), get_analyze_statement_feedback))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), get_analyze_statement_feedback))
cursor = conn.cursor()
cursor.execute(get_analyze_statement_feedback)
try:
results = cursor.fetchall()
if debug:
comment('Query Execution returned %s Results' % len(results))
except Exception as e:
if 'no result set' in str(e):
analyze_statements = None
else:
raise e
analyze_statements = results
for vs in analyze_statements:
statements.append(vs[0])
datetime_str = str(datetime.datetime.now())
if 'Found %s Tables requiring Analysis' % len(statements) is not None:
if re.match('.*\\n.*', 'Found %s Tables requiring Analysis' % len(statements)) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Found %s Tables requiring Analysis' % len(statements)))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Found %s Tables requiring Analysis' % len(statements)))
if not run_commands(conn, statements, cw=cw, cluster_name=cluster_name, suppress_errors=ignore_errors):
if not ignore_errors:
if debug:
print('Error running statements: %s' % (str(statements),))
return ERROR
if table_name is None:
datetime_str = str(datetime.datetime.now())
if 'Extracting Candidate Tables for analyze based on stats off from system table info ...' is not None:
if re.match('.*\\n.*', 'Extracting Candidate Tables for analyze based on stats off from system table info ...') is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), 'Extracting Candidate Tables for analyze based on stats off from system table info ...'))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), 'Extracting Candidate Tables for analyze based on stats off from system table info ...'))
if blacklisted_tables is not None:
blacklisted_tables_array = blacklisted_tables.split(',')
get_analyze_statement = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'" %s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n AND "table" NOT IN (%s)\n ORDER BY "size" ASC ;\n ' % (predicate_cols_option, stats_off_pct, schema_name, str(blacklisted_tables_array)[1:-1])
else:
get_analyze_statement = 'SELECT DISTINCT \'analyze \' + "schema" + \'."\' + "table" + \'" %s ; \'\n + \'/* Stats_Off : \' + CAST("stats_off" AS VARCHAR(10)) + \' */ ;\'\n FROM svv_table_info\n WHERE stats_off::DECIMAL (32,4) > %s::DECIMAL (32,4)\n AND trim("schema") ~ \'%s\'\n ORDER BY "size" ASC ;\n ' % (predicate_cols_option, stats_off_pct, schema_name)
if debug:
datetime_str = str(datetime.datetime.now())
if get_analyze_statement is not None:
if re.match('.*\\n.*', get_analyze_statement) is not None:
print('/* [%s]\n%s\n*/\n' % (str(os.getpid()), get_analyze_statement))
else:
print('-- %s [%s] %s' % (datetime_str, str(os.getpid()), get_analyze_statement))
cursor = conn.cursor()
cursor.execute(get_analyze_statement)
try:
results = cursor.fetchall()
if debug:
comment('Query Execution returned %s Results' % len(results))
except Exception as e:
if 'no result set' in str(e):
analyze_statements = None
else:
raise e
analyze_statements = results
statements = []
for vs in analyze_statements:
statements.append(vs[0])
if not run_commands(conn, statements, cw=cw, cluster_name=cluster_name, suppress_errors=ignore_errors):
if not ignore_errors:
if debug:
print('Error running statements: %s' % (str(statements),))
return ERROR
return True
|
amazon-redshift-utils
|
positive
|
def test_deactivated_callable(self):
"""
Disabled enforcers should be returning just Callable
"""
settings = Settings(enabled=False)
func = no_type_check(self.func_int___none())
wrapped = apply_enforcer(func)
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
self.assertEqual(func_type, Callable)
<DeepExtract>
def func_int___none(a: int) -> None:
pass
func = func_int___none
</DeepExtract>
self.assertFalse(hasattr(func, '__enforcer__'))
wrapped = apply_enforcer(func, settings=settings)
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
self.assertIsNotNone(enforcer.settings)
self.assertFalse(enforcer.settings)
self.assertFalse(enforcer.settings.enabled)
self.assertEqual(func_type, Callable)
|
def test_deactivated_callable(self):
"""
Disabled enforcers should be returning just Callable
"""
settings = Settings(enabled=False)
func = no_type_check(self.func_int___none())
wrapped = apply_enforcer(func)
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
self.assertEqual(func_type, Callable)
def func_int___none(a: int) -> None:
pass
func = func_int___none
self.assertFalse(hasattr(func, '__enforcer__'))
wrapped = apply_enforcer(func, settings=settings)
enforcer = wrapped.__enforcer__
func_type = enforcer.callable_signature
self.assertIsNotNone(enforcer.settings)
self.assertFalse(enforcer.settings)
self.assertFalse(enforcer.settings.enabled)
self.assertEqual(func_type, Callable)
|
enforce
|
positive
|
def run_on_container(self, command, target_container=None):
if target_container is None:
target_container = self._container_id
<DeepExtract>
try:
result = (subprocess.check_output(('docker', 'exec', target_container, 'bash', '-c', command), cwd=self._workdir.name, stderr=subprocess.STDOUT).decode('utf-8').strip(), True)
except subprocess.CalledProcessError as e:
result = (e.output.decode('utf-8').strip(), False)
</DeepExtract>
if not result[1]:
raise RuntimeError('Error running command on container: {}'.format(result[0]))
return result[0]
|
def run_on_container(self, command, target_container=None):
if target_container is None:
target_container = self._container_id
try:
result = (subprocess.check_output(('docker', 'exec', target_container, 'bash', '-c', command), cwd=self._workdir.name, stderr=subprocess.STDOUT).decode('utf-8').strip(), True)
except subprocess.CalledProcessError as e:
result = (e.output.decode('utf-8').strip(), False)
if not result[1]:
raise RuntimeError('Error running command on container: {}'.format(result[0]))
return result[0]
|
cf-mendix-buildpack
|
positive
|
def _stop(self, bp, ret):
<DeepExtract>
cont = self.controller
cont.disable_breakpoint(self)
if not gdb.current_progspace().filename:
elf = self.stage.elf
gdb.execute('file %s' % elf)
cont.gdb_print('loaded file %s\n' % elf)
cont.gdb_print('Inserting breakpoints for %s %s ...\n' % (self.controller.name, self.stage.stagename))
cont.current_substage = 0
cont.insert_breakpoints(self.stage)
cont.gdb_print('Done setting breakpoints\n')
for s in self.controller.stage_hooks:
s(self, self.stage)
</DeepExtract>
return False
|
def _stop(self, bp, ret):
cont = self.controller
cont.disable_breakpoint(self)
if not gdb.current_progspace().filename:
elf = self.stage.elf
gdb.execute('file %s' % elf)
cont.gdb_print('loaded file %s\n' % elf)
cont.gdb_print('Inserting breakpoints for %s %s ...\n' % (self.controller.name, self.stage.stagename))
cont.current_substage = 0
cont.insert_breakpoints(self.stage)
cont.gdb_print('Done setting breakpoints\n')
for s in self.controller.stage_hooks:
s(self, self.stage)
return False
|
bootloader_instrumentation_suite
|
positive
|
@pytest.mark.test_version_older_with_envar
@pytest.mark.parametrize('version', ['0.01.0'])
def test_c2r_latest_older_unsupported_version(convert2rhel, c2r_version, version, older_version_envar):
"""
Verify that running older version of Convert2RHEL with the environment
variable "CONVERT2RHEL_ALLOW_OLDER_VERSION" continues the conversion.
Running older version of convert2rhel on OS major version 6 or older should inhibit either way.
"""
<DeepExtract>
path_to_version = subprocess.check_output(['find', '/usr/lib/', '-path', '*/convert2rhel/__init__.py', '-printf', '%p']).decode('utf-8')
with open(path_to_version, 'r') as version_file:
old_version_content = version_file.read()
def _update_c2r_version(version):
"""
Modify the Convert2RHEL version value in the __init__.py file.
We want to simulate the running version is older/newer than in the repositories.
"""
with open(path_to_version, 'w') as version_file:
version_pattern = '__version__ = "(\\d+\\.\\d+\\.\\d+)"'
updated_version_content = re.sub(version_pattern, '__version__ = "{}"'.format(version), old_version_content)
version_file.write(updated_version_content)
yield _update_c2r_version
def _restore_c2r_version():
with open(path_to_version, 'w') as version_file:
version_file.write(old_version_content)
_restore_c2r_version()
</DeepExtract>
with convert2rhel('--no-rpm-va --debug') as c2r:
c2r.expect('Continue with the system conversion?')
c2r.sendline('y')
assert c2r.expect('You are currently running 0.01', timeout=300) == 0
assert c2r.expect("'CONVERT2RHEL_ALLOW_OLDER_VERSION' environment variable detected, continuing conversion", timeout=300) == 0
c2r.expect('Continue with the system conversion?')
c2r.sendline('n')
assert c2r.exitstatus != 0
|
@pytest.mark.test_version_older_with_envar
@pytest.mark.parametrize('version', ['0.01.0'])
def test_c2r_latest_older_unsupported_version(convert2rhel, c2r_version, version, older_version_envar):
"""
Verify that running older version of Convert2RHEL with the environment
variable "CONVERT2RHEL_ALLOW_OLDER_VERSION" continues the conversion.
Running older version of convert2rhel on OS major version 6 or older should inhibit either way.
"""
path_to_version = subprocess.check_output(['find', '/usr/lib/', '-path', '*/convert2rhel/__init__.py', '-printf', '%p']).decode('utf-8')
with open(path_to_version, 'r') as version_file:
old_version_content = version_file.read()
def _update_c2r_version(version):
"""
Modify the Convert2RHEL version value in the __init__.py file.
We want to simulate the running version is older/newer than in the repositories.
"""
with open(path_to_version, 'w') as version_file:
version_pattern = '__version__ = "(\\d+\\.\\d+\\.\\d+)"'
updated_version_content = re.sub(version_pattern, '__version__ = "{}"'.format(version), old_version_content)
version_file.write(updated_version_content)
yield _update_c2r_version
def _restore_c2r_version():
with open(path_to_version, 'w') as version_file:
version_file.write(old_version_content)
_restore_c2r_version()
with convert2rhel('--no-rpm-va --debug') as c2r:
c2r.expect('Continue with the system conversion?')
c2r.sendline('y')
assert c2r.expect('You are currently running 0.01', timeout=300) == 0
assert c2r.expect("'CONVERT2RHEL_ALLOW_OLDER_VERSION' environment variable detected, continuing conversion", timeout=300) == 0
c2r.expect('Continue with the system conversion?')
c2r.sendline('n')
assert c2r.exitstatus != 0
|
convert2rhel
|
positive
|
def killScript(reason=None):
if reason is None:
print(readMe)
sys.exit()
else:
<DeepExtract>
logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
sys.exit()
|
def killScript(reason=None):
if reason is None:
print(readMe)
sys.exit()
else:
logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
sys.exit()
|
automation-scripts
|
positive
|
def reset(self):
self.phase = random.randint(0, self.phaselen)
<DeepExtract>
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
(qpos0, qvel0) = (pos, vel)
</DeepExtract>
self.sim.set_qpos(np.ndarray.flatten(qpos0))
self.sim.set_qvel(np.ndarray.flatten(qvel0))
self.goal_qpos = np.ndarray.flatten(self.init_qpos)
self.goal_qvel = np.ndarray.flatten(self.init_qvel)
return self.get_full_state()
|
def reset(self):
self.phase = random.randint(0, self.phaselen)
if self.phase is None:
self.phase = self.phase
if self.phase > self.phaselen:
self.phase = 0
pos = np.copy(self.trajectory.qpos[self.phase * self.simrate])
pos[1] = 0
vel = np.copy(self.trajectory.qvel[self.phase * self.simrate])
(qpos0, qvel0) = (pos, vel)
self.sim.set_qpos(np.ndarray.flatten(qpos0))
self.sim.set_qvel(np.ndarray.flatten(qvel0))
self.goal_qpos = np.ndarray.flatten(self.init_qpos)
self.goal_qvel = np.ndarray.flatten(self.init_qvel)
return self.get_full_state()
|
apex
|
positive
|
def laplace_approximation(trial_model, dtype, data, uv, sigma, gains_t1, gains_t2):
global globdict
if globdict['marginalize_gains'] == True and dtype == 'amp':
gain_hess = np.zeros((len(globdict['gain_list']), len(globdict['gain_list'])))
<DeepExtract>
global globdict
if not True:
if globdict['gain_init'] == None:
gain = 1
else:
None = globdict['gain_init']
if globdict['marginalize_gains']:
None = globdict['gain_init']
if dtype in ['amp', 'vis']:
gains_wzero = np.append(None, 0.0)
gain = (1.0 + gains_wzero[gains_t1]) * (1.0 + gains_wzero[gains_t2])
else:
gain = 1
</DeepExtract>
amp_model = np.abs(trial_model.sample_uv(uv[:, 0], uv[:, 1]))
amp_bar = gain * data
sigma_bar = gain * sigma
<DeepExtract>
global globdict
if not True:
if globdict['gain_init'] == None:
(g1, g2) = (0.0, 0.0)
else:
None = globdict['gain_init']
if globdict['marginalize_gains']:
None = globdict['gain_init']
if dtype in ['amp', 'vis']:
gains_wzero = np.append(None, 0.0)
(g1, g2) = (gains_wzero[gains_t1], gains_wzero[gains_t2])
else:
(g1, g2) = (0, 0)
</DeepExtract>
for j in range(len(gain)):
gain_hess[gains_t1[j], gains_t1[j]] += amp_model[j] * (3.0 * amp_model[j] - 2.0 * amp_bar[j]) / ((1.0 + g1[j]) ** 2 * sigma_bar[j] ** 2)
gain_hess[gains_t2[j], gains_t2[j]] += amp_model[j] * (3.0 * amp_model[j] - 2.0 * amp_bar[j]) / ((1.0 + g2[j]) ** 2 * sigma_bar[j] ** 2)
gain_hess[gains_t1[j], gains_t2[j]] += amp_model[j] * (2.0 * amp_model[j] - amp_bar[j]) / ((1.0 + g1[j]) * (1.0 + g2[j]) * sigma_bar[j] ** 2)
gain_hess[gains_t2[j], gains_t1[j]] += amp_model[j] * (2.0 * amp_model[j] - amp_bar[j]) / ((1.0 + g1[j]) * (1.0 + g2[j]) * sigma_bar[j] ** 2)
for j in range(len(globdict['gain_list'])):
t = globdict['gain_list'][j][1]
if globdict['gain_prior'][t]['prior_type'] == 'gauss':
gain_hess[j, j] += 1.0 / globdict['gain_prior'][t]['std']
elif globdict['gain_prior'][t]['prior_type'] == 'flat':
gain_hess[j, j] += 0.0
elif globdict['gain_prior'][t]['prior_type'] == 'exponential':
gain_hess[j, j] += 0.0
elif globdict['gain_prior'][t]['prior_type'] == 'fixed':
gain_hess[j, j] += 0.0
else:
raise Exception('Gain prior not implemented!')
return np.log((2.0 * np.pi) ** (len(gain) / 2.0) * np.abs(np.linalg.det(gain_hess)) ** (-0.5))
else:
return 0.0
|
def laplace_approximation(trial_model, dtype, data, uv, sigma, gains_t1, gains_t2):
global globdict
if globdict['marginalize_gains'] == True and dtype == 'amp':
gain_hess = np.zeros((len(globdict['gain_list']), len(globdict['gain_list'])))
global globdict
if not True:
if globdict['gain_init'] == None:
gain = 1
else:
None = globdict['gain_init']
if globdict['marginalize_gains']:
None = globdict['gain_init']
if dtype in ['amp', 'vis']:
gains_wzero = np.append(None, 0.0)
gain = (1.0 + gains_wzero[gains_t1]) * (1.0 + gains_wzero[gains_t2])
else:
gain = 1
amp_model = np.abs(trial_model.sample_uv(uv[:, 0], uv[:, 1]))
amp_bar = gain * data
sigma_bar = gain * sigma
global globdict
if not True:
if globdict['gain_init'] == None:
(g1, g2) = (0.0, 0.0)
else:
None = globdict['gain_init']
if globdict['marginalize_gains']:
None = globdict['gain_init']
if dtype in ['amp', 'vis']:
gains_wzero = np.append(None, 0.0)
(g1, g2) = (gains_wzero[gains_t1], gains_wzero[gains_t2])
else:
(g1, g2) = (0, 0)
for j in range(len(gain)):
gain_hess[gains_t1[j], gains_t1[j]] += amp_model[j] * (3.0 * amp_model[j] - 2.0 * amp_bar[j]) / ((1.0 + g1[j]) ** 2 * sigma_bar[j] ** 2)
gain_hess[gains_t2[j], gains_t2[j]] += amp_model[j] * (3.0 * amp_model[j] - 2.0 * amp_bar[j]) / ((1.0 + g2[j]) ** 2 * sigma_bar[j] ** 2)
gain_hess[gains_t1[j], gains_t2[j]] += amp_model[j] * (2.0 * amp_model[j] - amp_bar[j]) / ((1.0 + g1[j]) * (1.0 + g2[j]) * sigma_bar[j] ** 2)
gain_hess[gains_t2[j], gains_t1[j]] += amp_model[j] * (2.0 * amp_model[j] - amp_bar[j]) / ((1.0 + g1[j]) * (1.0 + g2[j]) * sigma_bar[j] ** 2)
for j in range(len(globdict['gain_list'])):
t = globdict['gain_list'][j][1]
if globdict['gain_prior'][t]['prior_type'] == 'gauss':
gain_hess[j, j] += 1.0 / globdict['gain_prior'][t]['std']
elif globdict['gain_prior'][t]['prior_type'] == 'flat':
gain_hess[j, j] += 0.0
elif globdict['gain_prior'][t]['prior_type'] == 'exponential':
gain_hess[j, j] += 0.0
elif globdict['gain_prior'][t]['prior_type'] == 'fixed':
gain_hess[j, j] += 0.0
else:
raise Exception('Gain prior not implemented!')
return np.log((2.0 * np.pi) ** (len(gain) / 2.0) * np.abs(np.linalg.det(gain_hess)) ** (-0.5))
else:
return 0.0
|
eht-imaging
|
positive
|
def main(source=None, num_epochs=None, method=None, batch_size=None, learning_rate=None, beta=None, n_samples=None, image_dir=None, binary_dir=None, dim_z=None, prior=None):
<DeepExtract>
if source is None:
raise ValueError('source not provided.')
logger.info('Reading MNIST ({}), from {}'.format('train', source))
with gzip.open(source, 'rb') as f:
x = cPickle.load(f)
if 'train' == 'train':
data = np.float32(x[0][0])
elif 'train' == 'valid':
data = np.float32(x[1][0])
elif 'train' == 'test':
data = np.float32(x[2][0])
else:
raise ValueError()
data = np.reshape(data, (-1, 1, 28, 28))
data = data
</DeepExtract>
train_samples = data.shape[0]
noise_var = T.matrix('noise')
input_var = T.tensor4('inputs')
logger.info('Building model and graph')
<DeepExtract>
layer = InputLayer(shape=(None, dim_z), input_var=noise_var)
layer = batch_norm(DenseLayer(layer, 1024))
layer = batch_norm(DenseLayer(layer, dim_h * 2 * 7 * 7))
layer = ReshapeLayer(layer, ([0], dim_h * 2, 7, 7))
layer = batch_norm(Deconv2DLayer(layer, dim_h, 5, stride=2, pad=2))
layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2, nonlinearity=None)
logger.debug('Generator output: {}'.format(layer.output_shape))
generator = layer
</DeepExtract>
<DeepExtract>
layer = InputLayer(shape=(None, 1, DIM_X, DIM_Y), input_var=input_var)
layer = Conv2DLayer(layer, dim_h, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = Conv2DLayer(layer, dim_h * 2, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = DenseLayer(layer, 1024, nonlinearity=lrelu)
layer = DenseLayer(layer, 1, nonlinearity=None)
logger.debug('Discriminator output: {}'.format(layer.output_shape))
discriminator = layer
</DeepExtract>
<DeepExtract>
layer = InputLayer(shape=(None, dim_z), input_var=noise_var)
layer = DenseLayer(layer, dim_h)
layer = batch_norm(DenseLayer(layer, 1))
baseline = layer
</DeepExtract>
trng = RandomStreams(random.randint(1, 1000000))
g_output_logit = lasagne.layers.get_output(generator)
log_Z = lasagne.layers.get_output(baseline)
<DeepExtract>
R = trng.uniform(size=(n_samples, batch_size, DIM_C, DIM_X, DIM_Y), dtype=floatX_)
g_output = T.nnet.sigmoid(g_output_logit)
samples = (R <= T.shape_padleft(g_output)).astype(floatX_)
D_r = lasagne.layers.get_output(discriminator)
D_f = lasagne.layers.get_output(discriminator, samples.reshape((-1, DIM_C, DIM_X, DIM_Y)))
D_f_ = D_f.reshape((n_samples, batch_size))
log_w = D_f_
log_g = -((1.0 - samples) * T.shape_padleft(g_output_logit) + T.shape_padleft(T.nnet.softplus(-g_output_logit))).sum(axis=(2, 3, 4))
log_N = T.log(log_w.shape[0]).astype(floatX_)
log_Z_est = log_sum_exp(log_w - log_N, axis=0)
log_w_tilde = log_w - T.shape_padleft(log_Z_est) - log_N
w_tilde = T.exp(log_w_tilde)
r = theano.gradient.disconnected_grad(log_w - log_Z[None, :] - 1)
generator_loss = -(r * log_g).mean()
discriminator_loss = T.nnet.softplus(-D_r).mean() + T.nnet.softplus(-D_f).mean() + D_f.mean()
(generator_loss, discriminator_loss, D_r, D_f, log_Z_est, log_w, w_tilde, d) = (generator_loss, discriminator_loss, D_r, D_f, log_Z_est, log_w, w_tilde, {})
</DeepExtract>
baseline_loss = ((log_Z - log_Z_est) ** 2).mean()
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
discriminator_params = lasagne.layers.get_all_params(discriminator, trainable=True)
baseline_params = lasagne.layers.get_all_params(baseline, trainable=True)
eta = theano.shared(floatX(learning_rate))
updates = lasagne.updates.adam(generator_loss, generator_params, learning_rate=eta, beta1=beta)
updates.update(lasagne.updates.adam(discriminator_loss, discriminator_params, learning_rate=eta, beta1=beta))
updates.update(lasagne.updates.adam(baseline_loss, baseline_params, learning_rate=eta, beta1=beta))
results = {'p(real)': (T.nnet.sigmoid(D_r) > 0.5).mean(), 'p(fake': (T.nnet.sigmoid(D_f) < 0.5).mean(), 'G loss': generator_loss, 'D loss': discriminator_loss, 'log Z': log_Z, 'log Z est': log_Z_est.mean(), 'log_Z est var': log_Z_est.std() ** 2, 'log w': log_w.mean(), 'log w var': log_w.std() ** 2, 'norm w': w_tilde.mean(), 'norm w var': w_tilde.std() ** 2, 'ESS': (1.0 / (w_tilde ** 2).sum(0)).mean()}
train_fn = theano.function([noise_var, input_var], results, updates=updates)
gen_fn = theano.function([noise_var], T.nnet.sigmoid(lasagne.layers.get_output(generator, deterministic=True)))
logger.info('Training...')
results = {}
for epoch in range(num_epochs):
u = 0
prefix = '{}_{}'.format(method, epoch)
e_results = {}
widgets = ['Epoch {}, '.format(epoch), Timer(), Bar()]
pbar = ProgressBar(widgets=widgets, maxval=train_samples // batch_size).start()
prefix = str(epoch)
start_time = time.time()
batch0 = None
for batch in iterate_minibatches(data, batch_size, shuffle=True):
if batch0 is None:
batch0 = batch
if batch.shape[0] == batch_size:
if prior == 'uniform':
noise = floatX(np.random.rand(batch_size, dim_z))
elif prior == 'gaussian':
noise = floatX(np.random.normal(size=(batch_size, dim_z)))
outs = train_fn(noise, batch)
outs = dict(((k, np.asarray(v)) for (k, v) in outs.items()))
<DeepExtract>
for (k, v) in d.iteritems():
if k in e_results.keys():
e_results[k].append(v)
else:
e_results[k] = [v]
</DeepExtract>
u += 1
pbar.update(u)
else:
logger.error('Skipped batch of size {}'.format(batch.shape))
<DeepExtract>
for (k, v) in d.iteritems():
if k in results.keys():
results[k].append(v)
else:
results[k] = [v]
</DeepExtract>
np.savez(path.join(binary_dir, '{}_results.npz'.format(prefix)), **results)
try:
if prior == 'uniform':
noise = floatX(np.random.rand(100, dim_z))
elif prior == 'gaussian':
noise = floatX(np.random.normal(size=(100, dim_z)))
samples = gen_fn(noise)
<DeepExtract>
results = dict(((k, np.mean(v)) for (k, v) in results.items()))
logger.info(results)
if image_dir is not None:
plt.imsave(path.join(image_dir, '{}.png'.format(prefix)), samples.reshape(10, 10, 28, 28).transpose(0, 2, 1, 3).reshape(10 * 28, 10 * 28), cmap='gray')
</DeepExtract>
except Exception as e:
print(e)
pass
logger.info('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - start_time))
np.savez(path.join(binary_dir, '{}_generator_params.npz'.format(prefix)), *lasagne.layers.get_all_param_values(generator))
np.savez(path.join(binary_dir, '{}_discriminator_params.npz'.format(prefix)), *lasagne.layers.get_all_param_values(discriminator))
|
def main(source=None, num_epochs=None, method=None, batch_size=None, learning_rate=None, beta=None, n_samples=None, image_dir=None, binary_dir=None, dim_z=None, prior=None):
if source is None:
raise ValueError('source not provided.')
logger.info('Reading MNIST ({}), from {}'.format('train', source))
with gzip.open(source, 'rb') as f:
x = cPickle.load(f)
if 'train' == 'train':
data = np.float32(x[0][0])
elif 'train' == 'valid':
data = np.float32(x[1][0])
elif 'train' == 'test':
data = np.float32(x[2][0])
else:
raise ValueError()
data = np.reshape(data, (-1, 1, 28, 28))
data = data
train_samples = data.shape[0]
noise_var = T.matrix('noise')
input_var = T.tensor4('inputs')
logger.info('Building model and graph')
layer = InputLayer(shape=(None, dim_z), input_var=noise_var)
layer = batch_norm(DenseLayer(layer, 1024))
layer = batch_norm(DenseLayer(layer, dim_h * 2 * 7 * 7))
layer = ReshapeLayer(layer, ([0], dim_h * 2, 7, 7))
layer = batch_norm(Deconv2DLayer(layer, dim_h, 5, stride=2, pad=2))
layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2, nonlinearity=None)
logger.debug('Generator output: {}'.format(layer.output_shape))
generator = layer
layer = InputLayer(shape=(None, 1, DIM_X, DIM_Y), input_var=input_var)
layer = Conv2DLayer(layer, dim_h, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = Conv2DLayer(layer, dim_h * 2, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = DenseLayer(layer, 1024, nonlinearity=lrelu)
layer = DenseLayer(layer, 1, nonlinearity=None)
logger.debug('Discriminator output: {}'.format(layer.output_shape))
discriminator = layer
layer = InputLayer(shape=(None, dim_z), input_var=noise_var)
layer = DenseLayer(layer, dim_h)
layer = batch_norm(DenseLayer(layer, 1))
baseline = layer
trng = RandomStreams(random.randint(1, 1000000))
g_output_logit = lasagne.layers.get_output(generator)
log_Z = lasagne.layers.get_output(baseline)
R = trng.uniform(size=(n_samples, batch_size, DIM_C, DIM_X, DIM_Y), dtype=floatX_)
g_output = T.nnet.sigmoid(g_output_logit)
samples = (R <= T.shape_padleft(g_output)).astype(floatX_)
D_r = lasagne.layers.get_output(discriminator)
D_f = lasagne.layers.get_output(discriminator, samples.reshape((-1, DIM_C, DIM_X, DIM_Y)))
D_f_ = D_f.reshape((n_samples, batch_size))
log_w = D_f_
log_g = -((1.0 - samples) * T.shape_padleft(g_output_logit) + T.shape_padleft(T.nnet.softplus(-g_output_logit))).sum(axis=(2, 3, 4))
log_N = T.log(log_w.shape[0]).astype(floatX_)
log_Z_est = log_sum_exp(log_w - log_N, axis=0)
log_w_tilde = log_w - T.shape_padleft(log_Z_est) - log_N
w_tilde = T.exp(log_w_tilde)
r = theano.gradient.disconnected_grad(log_w - log_Z[None, :] - 1)
generator_loss = -(r * log_g).mean()
discriminator_loss = T.nnet.softplus(-D_r).mean() + T.nnet.softplus(-D_f).mean() + D_f.mean()
(generator_loss, discriminator_loss, D_r, D_f, log_Z_est, log_w, w_tilde, d) = (generator_loss, discriminator_loss, D_r, D_f, log_Z_est, log_w, w_tilde, {})
baseline_loss = ((log_Z - log_Z_est) ** 2).mean()
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
discriminator_params = lasagne.layers.get_all_params(discriminator, trainable=True)
baseline_params = lasagne.layers.get_all_params(baseline, trainable=True)
eta = theano.shared(floatX(learning_rate))
updates = lasagne.updates.adam(generator_loss, generator_params, learning_rate=eta, beta1=beta)
updates.update(lasagne.updates.adam(discriminator_loss, discriminator_params, learning_rate=eta, beta1=beta))
updates.update(lasagne.updates.adam(baseline_loss, baseline_params, learning_rate=eta, beta1=beta))
results = {'p(real)': (T.nnet.sigmoid(D_r) > 0.5).mean(), 'p(fake': (T.nnet.sigmoid(D_f) < 0.5).mean(), 'G loss': generator_loss, 'D loss': discriminator_loss, 'log Z': log_Z, 'log Z est': log_Z_est.mean(), 'log_Z est var': log_Z_est.std() ** 2, 'log w': log_w.mean(), 'log w var': log_w.std() ** 2, 'norm w': w_tilde.mean(), 'norm w var': w_tilde.std() ** 2, 'ESS': (1.0 / (w_tilde ** 2).sum(0)).mean()}
train_fn = theano.function([noise_var, input_var], results, updates=updates)
gen_fn = theano.function([noise_var], T.nnet.sigmoid(lasagne.layers.get_output(generator, deterministic=True)))
logger.info('Training...')
results = {}
for epoch in range(num_epochs):
u = 0
prefix = '{}_{}'.format(method, epoch)
e_results = {}
widgets = ['Epoch {}, '.format(epoch), Timer(), Bar()]
pbar = ProgressBar(widgets=widgets, maxval=train_samples // batch_size).start()
prefix = str(epoch)
start_time = time.time()
batch0 = None
for batch in iterate_minibatches(data, batch_size, shuffle=True):
if batch0 is None:
batch0 = batch
if batch.shape[0] == batch_size:
if prior == 'uniform':
noise = floatX(np.random.rand(batch_size, dim_z))
elif prior == 'gaussian':
noise = floatX(np.random.normal(size=(batch_size, dim_z)))
outs = train_fn(noise, batch)
outs = dict(((k, np.asarray(v)) for (k, v) in outs.items()))
for (k, v) in d.iteritems():
if k in e_results.keys():
e_results[k].append(v)
else:
e_results[k] = [v]
u += 1
pbar.update(u)
else:
logger.error('Skipped batch of size {}'.format(batch.shape))
for (k, v) in d.iteritems():
if k in results.keys():
results[k].append(v)
else:
results[k] = [v]
np.savez(path.join(binary_dir, '{}_results.npz'.format(prefix)), **results)
try:
if prior == 'uniform':
noise = floatX(np.random.rand(100, dim_z))
elif prior == 'gaussian':
noise = floatX(np.random.normal(size=(100, dim_z)))
samples = gen_fn(noise)
results = dict(((k, np.mean(v)) for (k, v) in results.items()))
logger.info(results)
if image_dir is not None:
plt.imsave(path.join(image_dir, '{}.png'.format(prefix)), samples.reshape(10, 10, 28, 28).transpose(0, 2, 1, 3).reshape(10 * 28, 10 * 28), cmap='gray')
except Exception as e:
print(e)
pass
logger.info('Epoch {} of {} took {:.3f}s'.format(epoch + 1, num_epochs, time.time() - start_time))
np.savez(path.join(binary_dir, '{}_generator_params.npz'.format(prefix)), *lasagne.layers.get_all_param_values(generator))
np.savez(path.join(binary_dir, '{}_discriminator_params.npz'.format(prefix)), *lasagne.layers.get_all_param_values(discriminator))
|
BGAN
|
positive
|
def _create_layers(self):
self.max_num_nodes = self.dataset_class.max_num_nodes()
self.num_node_types = self.dataset_class.num_node_types()
self.num_edge_types = self.dataset_class.num_edge_types()
self.num_max_neighbours = self.dataset_class.num_max_neighbours()
prior_config = get_param_val(self.model_params, 'prior_distribution', default_val=dict())
self.prior_distribution = create_prior_distribution(prior_config)
<DeepExtract>
self.node_encoding = create_encoding(self.model_params['categ_encoding_nodes'], dataset_class=self.dataset_class, vocab_size=self.num_node_types, category_prior=self.dataset_class.get_node_prior(data_root='data/'))
self.edge_attr_encoding = create_encoding(self.model_params['categ_encoding_edges'], dataset_class=self.dataset_class, vocab_size=self.num_edge_types, category_prior=self.dataset_class.get_edge_prior(data_root='data/'))
self.encoding_dim_nodes = self.node_encoding.D
self.encoding_dim_edges = self.edge_attr_encoding.D
self.edge_virtual_encoding = LinearCategoricalEncoding(num_dimensions=self.encoding_dim_edges, flow_config={'num_flows': self.model_params['encoding_virtual_num_flows'], 'hidden_layers': 2, 'hidden_size': 128}, dataset_class=self.dataset_class, vocab_size=1)
self.edge_virtual_decoder = DecoderLinear(num_categories=2, embed_dim=self.encoding_dim_edges, hidden_size=128, num_layers=2, class_prior_log=np.log(np.array([0.9, 0.1])))
</DeepExtract>
<DeepExtract>
hidden_size_nodes = get_param_val(self.model_params, 'coupling_hidden_size_nodes', default_val=256)
hidden_size_edges = get_param_val(self.model_params, 'coupling_hidden_size_edges', default_val=128)
num_flows = get_param_val(self.model_params, 'coupling_num_flows', default_val='4,6,6')
num_flows = [int(k) for k in num_flows.split(',')]
hidden_layers = get_param_val(self.model_params, 'coupling_hidden_layers', default_val=4)
if isinstance(hidden_layers, str):
if ',' in hidden_layers:
hidden_layers = [int(l) for l in hidden_layers.split(',')]
else:
hidden_layers = [int(hidden_layers)] * 3
else:
hidden_layers = [hidden_layers] * 3
num_mixtures_nodes = get_param_val(self.model_params, 'coupling_num_mixtures_nodes', default_val=16)
num_mixtures_edges = get_param_val(self.model_params, 'coupling_num_mixtures_edges', default_val=16)
mask_ratio = get_param_val(self.model_params, 'coupling_mask_ratio', default_val=0.5)
dropout = get_param_val(self.model_params, 'coupling_dropout', default_val=0.0)
coupling_mask_nodes = CouplingLayer.create_channel_mask(self.encoding_dim_nodes, ratio=mask_ratio)
step1_model_func = lambda c_out: RGCNNet(c_in=self.encoding_dim_nodes, c_out=c_out, num_edges=self.num_edge_types, num_layers=hidden_layers[0], hidden_size=hidden_size_nodes, max_neighbours=self.dataset_class.num_max_neighbours(), dp_rate=dropout, rgc_layer_fun=RelationGraphConv)
step1_flows = []
for _ in range(num_flows[0]):
step1_flows += [ActNormFlow(self.encoding_dim_nodes), InvertibleConv(self.encoding_dim_nodes), MixtureCDFCoupling(c_in=self.encoding_dim_nodes, mask=coupling_mask_nodes, model_func=step1_model_func, block_type='RelationGraphConv', num_mixtures=num_mixtures_nodes, regularizer_max=3.5, regularizer_factor=2)]
self.step1_flows = nn.ModuleList(step1_flows)
coupling_mask_edges = CouplingLayer.create_channel_mask(self.encoding_dim_edges, ratio=mask_ratio)
def edge2node_layer_func(step_idx):
if step_idx == 1:
return lambda : Edge2NodeAttnLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
else:
return lambda : Edge2NodeQKVAttnLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
node2edge_layer_func = lambda : Node2EdgePlainLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
def edge_gnn_layer_func(step_idx):
return lambda : EdgeGNNLayer(edge2node_layer_func=edge2node_layer_func(step_idx), node2edge_layer_func=node2edge_layer_func)
def get_model_func(step_idx):
return lambda c_out_nodes, c_out_edges: EdgeGNN(c_in_nodes=self.encoding_dim_nodes, c_in_edges=self.encoding_dim_edges, c_out_nodes=c_out_nodes, c_out_edges=c_out_edges, edge_gnn_layer_func=edge_gnn_layer_func(step_idx), max_neighbours=self.dataset_class.num_max_neighbours(), num_layers=hidden_layers[step_idx])
actnorm_layer = lambda : NodeEdgeFlowWrapper(node_flow=ActNormFlow(c_in=self.encoding_dim_nodes), edge_flow=ActNormFlow(c_in=self.encoding_dim_edges))
permut_layer = lambda : NodeEdgeFlowWrapper(node_flow=InvertibleConv(c_in=self.encoding_dim_nodes), edge_flow=InvertibleConv(c_in=self.encoding_dim_edges))
coupling_layer = lambda step_idx: NodeEdgeCoupling(c_in_nodes=self.encoding_dim_nodes, c_in_edges=self.encoding_dim_edges, mask_nodes=coupling_mask_nodes, mask_edges=coupling_mask_edges, num_mixtures_nodes=num_mixtures_nodes, num_mixtures_edges=num_mixtures_edges, model_func=get_model_func(step_idx), regularizer_max=3.5, regularizer_factor=2)
step2_flows = []
for _ in range(num_flows[1]):
step2_flows += [actnorm_layer(), permut_layer(), coupling_layer(step_idx=1)]
self.step2_flows = nn.ModuleList(step2_flows)
step3_flows = []
for _ in range(num_flows[2]):
step3_flows += [actnorm_layer(), permut_layer(), coupling_layer(step_idx=2)]
self.step3_flows = nn.ModuleList(step3_flows)
</DeepExtract>
|
def _create_layers(self):
self.max_num_nodes = self.dataset_class.max_num_nodes()
self.num_node_types = self.dataset_class.num_node_types()
self.num_edge_types = self.dataset_class.num_edge_types()
self.num_max_neighbours = self.dataset_class.num_max_neighbours()
prior_config = get_param_val(self.model_params, 'prior_distribution', default_val=dict())
self.prior_distribution = create_prior_distribution(prior_config)
self.node_encoding = create_encoding(self.model_params['categ_encoding_nodes'], dataset_class=self.dataset_class, vocab_size=self.num_node_types, category_prior=self.dataset_class.get_node_prior(data_root='data/'))
self.edge_attr_encoding = create_encoding(self.model_params['categ_encoding_edges'], dataset_class=self.dataset_class, vocab_size=self.num_edge_types, category_prior=self.dataset_class.get_edge_prior(data_root='data/'))
self.encoding_dim_nodes = self.node_encoding.D
self.encoding_dim_edges = self.edge_attr_encoding.D
self.edge_virtual_encoding = LinearCategoricalEncoding(num_dimensions=self.encoding_dim_edges, flow_config={'num_flows': self.model_params['encoding_virtual_num_flows'], 'hidden_layers': 2, 'hidden_size': 128}, dataset_class=self.dataset_class, vocab_size=1)
self.edge_virtual_decoder = DecoderLinear(num_categories=2, embed_dim=self.encoding_dim_edges, hidden_size=128, num_layers=2, class_prior_log=np.log(np.array([0.9, 0.1])))
hidden_size_nodes = get_param_val(self.model_params, 'coupling_hidden_size_nodes', default_val=256)
hidden_size_edges = get_param_val(self.model_params, 'coupling_hidden_size_edges', default_val=128)
num_flows = get_param_val(self.model_params, 'coupling_num_flows', default_val='4,6,6')
num_flows = [int(k) for k in num_flows.split(',')]
hidden_layers = get_param_val(self.model_params, 'coupling_hidden_layers', default_val=4)
if isinstance(hidden_layers, str):
if ',' in hidden_layers:
hidden_layers = [int(l) for l in hidden_layers.split(',')]
else:
hidden_layers = [int(hidden_layers)] * 3
else:
hidden_layers = [hidden_layers] * 3
num_mixtures_nodes = get_param_val(self.model_params, 'coupling_num_mixtures_nodes', default_val=16)
num_mixtures_edges = get_param_val(self.model_params, 'coupling_num_mixtures_edges', default_val=16)
mask_ratio = get_param_val(self.model_params, 'coupling_mask_ratio', default_val=0.5)
dropout = get_param_val(self.model_params, 'coupling_dropout', default_val=0.0)
coupling_mask_nodes = CouplingLayer.create_channel_mask(self.encoding_dim_nodes, ratio=mask_ratio)
step1_model_func = lambda c_out: RGCNNet(c_in=self.encoding_dim_nodes, c_out=c_out, num_edges=self.num_edge_types, num_layers=hidden_layers[0], hidden_size=hidden_size_nodes, max_neighbours=self.dataset_class.num_max_neighbours(), dp_rate=dropout, rgc_layer_fun=RelationGraphConv)
step1_flows = []
for _ in range(num_flows[0]):
step1_flows += [ActNormFlow(self.encoding_dim_nodes), InvertibleConv(self.encoding_dim_nodes), MixtureCDFCoupling(c_in=self.encoding_dim_nodes, mask=coupling_mask_nodes, model_func=step1_model_func, block_type='RelationGraphConv', num_mixtures=num_mixtures_nodes, regularizer_max=3.5, regularizer_factor=2)]
self.step1_flows = nn.ModuleList(step1_flows)
coupling_mask_edges = CouplingLayer.create_channel_mask(self.encoding_dim_edges, ratio=mask_ratio)
def edge2node_layer_func(step_idx):
if step_idx == 1:
return lambda : Edge2NodeAttnLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
else:
return lambda : Edge2NodeQKVAttnLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
node2edge_layer_func = lambda : Node2EdgePlainLayer(hidden_size_nodes=hidden_size_nodes, hidden_size_edges=hidden_size_edges, skip_config=2)
def edge_gnn_layer_func(step_idx):
return lambda : EdgeGNNLayer(edge2node_layer_func=edge2node_layer_func(step_idx), node2edge_layer_func=node2edge_layer_func)
def get_model_func(step_idx):
return lambda c_out_nodes, c_out_edges: EdgeGNN(c_in_nodes=self.encoding_dim_nodes, c_in_edges=self.encoding_dim_edges, c_out_nodes=c_out_nodes, c_out_edges=c_out_edges, edge_gnn_layer_func=edge_gnn_layer_func(step_idx), max_neighbours=self.dataset_class.num_max_neighbours(), num_layers=hidden_layers[step_idx])
actnorm_layer = lambda : NodeEdgeFlowWrapper(node_flow=ActNormFlow(c_in=self.encoding_dim_nodes), edge_flow=ActNormFlow(c_in=self.encoding_dim_edges))
permut_layer = lambda : NodeEdgeFlowWrapper(node_flow=InvertibleConv(c_in=self.encoding_dim_nodes), edge_flow=InvertibleConv(c_in=self.encoding_dim_edges))
coupling_layer = lambda step_idx: NodeEdgeCoupling(c_in_nodes=self.encoding_dim_nodes, c_in_edges=self.encoding_dim_edges, mask_nodes=coupling_mask_nodes, mask_edges=coupling_mask_edges, num_mixtures_nodes=num_mixtures_nodes, num_mixtures_edges=num_mixtures_edges, model_func=get_model_func(step_idx), regularizer_max=3.5, regularizer_factor=2)
step2_flows = []
for _ in range(num_flows[1]):
step2_flows += [actnorm_layer(), permut_layer(), coupling_layer(step_idx=1)]
self.step2_flows = nn.ModuleList(step2_flows)
step3_flows = []
for _ in range(num_flows[2]):
step3_flows += [actnorm_layer(), permut_layer(), coupling_layer(step_idx=2)]
self.step3_flows = nn.ModuleList(step3_flows)
</DeepExtract>
|
CategoricalNF
|
positive
|
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
<DeepExtract>
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
key = self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
key = self.digest_method(salt + b'signer' + self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
key = mac.digest()
elif self.key_derivation == 'none':
key = self.secret_key
else:
raise TypeError('Unknown key derivation method')
</DeepExtract>
try:
<DeepExtract>
sig = want_bytes(sig, encoding='ascii', errors='ignore')
sig = base64.urlsafe_b64decode(sig + b'=' * (-len(sig) % 4))
</DeepExtract>
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
|
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
salt = want_bytes(self.salt)
if self.key_derivation == 'concat':
key = self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == 'django-concat':
key = self.digest_method(salt + b'signer' + self.secret_key).digest()
elif self.key_derivation == 'hmac':
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
key = mac.digest()
elif self.key_derivation == 'none':
key = self.secret_key
else:
raise TypeError('Unknown key derivation method')
try:
sig = want_bytes(sig, encoding='ascii', errors='ignore')
sig = base64.urlsafe_b64decode(sig + b'=' * (-len(sig) % 4))
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
|
appengine-toolkit
|
positive
|
def run_n_episodes(self):
"""Runs game to completion n times and then summarises results and saves model (if asked to)"""
start = time.time()
results_queue = Queue()
gradient_updates_queue = Queue()
episode_number = multiprocessing.Value('i', 0)
self.optimizer_lock = multiprocessing.Lock()
episodes_per_process = int(self.config.num_episodes_to_run / self.worker_processes) + 1
processes = []
self.actor_critic.share_memory()
self.actor_critic_optimizer.share_memory()
optimizer_worker = multiprocessing.Process(target=self.update_shared_model, args=(gradient_updates_queue,))
optimizer_worker.start()
for process_num in range(self.worker_processes):
worker = Actor_Critic_Worker(process_num, copy.deepcopy(self.environment), self.actor_critic, episode_number, self.optimizer_lock, self.actor_critic_optimizer, self.config, episodes_per_process, self.hyperparameters['epsilon_decay_rate_denominator'], self.action_size, self.action_types, results_queue, copy.deepcopy(self.actor_critic), gradient_updates_queue)
worker.start()
processes.append(worker)
<DeepExtract>
while True:
with episode_number.get_lock():
carry_on = episode_number.value < self.config.num_episodes_to_run
if carry_on:
if not results_queue.empty():
self.total_episode_score_so_far = results_queue.get()
self.save_and_print_result()
else:
break
</DeepExtract>
for worker in processes:
worker.join()
optimizer_worker.kill()
time_taken = time.time() - start
return (self.game_full_episode_scores, self.rolling_results, time_taken)
|
def run_n_episodes(self):
"""Runs game to completion n times and then summarises results and saves model (if asked to)"""
start = time.time()
results_queue = Queue()
gradient_updates_queue = Queue()
episode_number = multiprocessing.Value('i', 0)
self.optimizer_lock = multiprocessing.Lock()
episodes_per_process = int(self.config.num_episodes_to_run / self.worker_processes) + 1
processes = []
self.actor_critic.share_memory()
self.actor_critic_optimizer.share_memory()
optimizer_worker = multiprocessing.Process(target=self.update_shared_model, args=(gradient_updates_queue,))
optimizer_worker.start()
for process_num in range(self.worker_processes):
worker = Actor_Critic_Worker(process_num, copy.deepcopy(self.environment), self.actor_critic, episode_number, self.optimizer_lock, self.actor_critic_optimizer, self.config, episodes_per_process, self.hyperparameters['epsilon_decay_rate_denominator'], self.action_size, self.action_types, results_queue, copy.deepcopy(self.actor_critic), gradient_updates_queue)
worker.start()
processes.append(worker)
while True:
with episode_number.get_lock():
carry_on = episode_number.value < self.config.num_episodes_to_run
if carry_on:
if not results_queue.empty():
self.total_episode_score_so_far = results_queue.get()
self.save_and_print_result()
else:
break
for worker in processes:
worker.join()
optimizer_worker.kill()
time_taken = time.time() - start
return (self.game_full_episode_scores, self.rolling_results, time_taken)
|
Deep-Reinforcement-Learning-Algorithms-with-PyTorch
|
positive
|
def Process(self) -> None:
"""Copies the list of paths to or from the remote host."""
if not self._paths:
fspaths: Sequence[Union[containers.File, containers.RemoteFSPath]]
if self._upload:
fspaths = self.GetContainers(containers.File)
else:
fspaths = self.GetContainers(containers.RemoteFSPath)
self._paths = [fspath.path for fspath in fspaths]
if not self._paths:
self.ModuleError('No files found for copying with SCP module.', critical=True)
<DeepExtract>
mkdir_command = ['mkdir', '-m', 'g+w', '-p', self._destination]
if self._upload:
cmd = ['ssh']
if self._multiplexing:
cmd.extend(['-o', 'ControlPath=~/.ssh/ctrl-%C'])
cmd.extend([self._GenerateRemotePrefix()])
cmd.extend(mkdir_command)
self.logger.info('Creating destination directory {0:s} on host {1:s}'.format(self._destination, self._hostname))
else:
cmd = mkdir_command
self.logger.info('Shelling out: {0:s}'.format(' '.join(cmd)))
ret = subprocess.call(cmd)
if ret != 0:
self.ModuleError('Failed creating destination directory, bailing.', critical=True)
</DeepExtract>
cmd = ['scp']
if self._multiplexing:
cmd.extend(['-o', 'ControlMaster=auto', '-o', 'ControlPath=~/.ssh/ctrl-%C'])
if self._extra_ssh_options:
cmd.extend(self._extra_ssh_options)
if self._id_file:
cmd.extend(['-i', self._id_file])
if self._upload:
cmd.extend(self._paths)
cmd.extend(self._PrefixRemotePaths([self._destination]))
else:
cmd.extend(self._PrefixRemotePaths(self._paths))
cmd.extend([self._destination])
self.logger.debug('Executing SCP command: {0:s}'.format(' '.join(cmd)))
ret = subprocess.call(cmd)
if ret != 0:
self.ModuleError('Failed copying {0!s}'.format(self._paths), critical=True)
fspath: Union[containers.File, containers.RemoteFSPath]
for path_ in self._paths:
file_name = os.path.basename(path_)
full_path = os.path.join(self._destination, file_name)
if self._upload:
self.PublishMessage(f'Remote filesystem path {full_path}')
fspath = containers.RemoteFSPath(path=full_path, hostname=self._hostname)
else:
self.PublishMessage(f'Local filesystem path {full_path}')
fspath = containers.File(name=file_name, path=full_path)
self.StoreContainer(fspath)
|
def Process(self) -> None:
"""Copies the list of paths to or from the remote host."""
if not self._paths:
fspaths: Sequence[Union[containers.File, containers.RemoteFSPath]]
if self._upload:
fspaths = self.GetContainers(containers.File)
else:
fspaths = self.GetContainers(containers.RemoteFSPath)
self._paths = [fspath.path for fspath in fspaths]
if not self._paths:
self.ModuleError('No files found for copying with SCP module.', critical=True)
mkdir_command = ['mkdir', '-m', 'g+w', '-p', self._destination]
if self._upload:
cmd = ['ssh']
if self._multiplexing:
cmd.extend(['-o', 'ControlPath=~/.ssh/ctrl-%C'])
cmd.extend([self._GenerateRemotePrefix()])
cmd.extend(mkdir_command)
self.logger.info('Creating destination directory {0:s} on host {1:s}'.format(self._destination, self._hostname))
else:
cmd = mkdir_command
self.logger.info('Shelling out: {0:s}'.format(' '.join(cmd)))
ret = subprocess.call(cmd)
if ret != 0:
self.ModuleError('Failed creating destination directory, bailing.', critical=True)
cmd = ['scp']
if self._multiplexing:
cmd.extend(['-o', 'ControlMaster=auto', '-o', 'ControlPath=~/.ssh/ctrl-%C'])
if self._extra_ssh_options:
cmd.extend(self._extra_ssh_options)
if self._id_file:
cmd.extend(['-i', self._id_file])
if self._upload:
cmd.extend(self._paths)
cmd.extend(self._PrefixRemotePaths([self._destination]))
else:
cmd.extend(self._PrefixRemotePaths(self._paths))
cmd.extend([self._destination])
self.logger.debug('Executing SCP command: {0:s}'.format(' '.join(cmd)))
ret = subprocess.call(cmd)
if ret != 0:
self.ModuleError('Failed copying {0!s}'.format(self._paths), critical=True)
fspath: Union[containers.File, containers.RemoteFSPath]
for path_ in self._paths:
file_name = os.path.basename(path_)
full_path = os.path.join(self._destination, file_name)
if self._upload:
self.PublishMessage(f'Remote filesystem path {full_path}')
fspath = containers.RemoteFSPath(path=full_path, hostname=self._hostname)
else:
self.PublishMessage(f'Local filesystem path {full_path}')
fspath = containers.File(name=file_name, path=full_path)
self.StoreContainer(fspath)
|
dftimewolf
|
positive
|
def prolong(self, n):
"""Return a Chebtech of length n, obtained either by truncating
if n < self.size or zero-padding if n > self.size. In all cases a
deep copy is returned.
"""
m = self.size
ak = self.coeffs
cls = self.__class__
if n - m < 0:
out = cls(ak[:n].copy(), interval=self.interval)
elif n - m > 0:
out = cls(np.append(ak, np.zeros(n - m)), interval=self.interval)
else:
<DeepExtract>
out = self.__class__(self.coeffs.copy(), interval=self.interval.copy())
</DeepExtract>
return out
|
def prolong(self, n):
"""Return a Chebtech of length n, obtained either by truncating
if n < self.size or zero-padding if n > self.size. In all cases a
deep copy is returned.
"""
m = self.size
ak = self.coeffs
cls = self.__class__
if n - m < 0:
out = cls(ak[:n].copy(), interval=self.interval)
elif n - m > 0:
out = cls(np.append(ak, np.zeros(n - m)), interval=self.interval)
else:
out = self.__class__(self.coeffs.copy(), interval=self.interval.copy())
return out
|
chebpy
|
positive
|
def _create_indicator_pattern(object_type: str, property_path: List[str], value: str) -> IndicatorPattern:
<DeepExtract>
object_path = _create_object_path(object_type, property_path)
pattern = _create_equality_observation_expression_str(object_path, value)
</DeepExtract>
main_observable_type = _OBJECT_TYPE_TO_OBSERVABLE_TYPE_MAP[object_type]
return IndicatorPattern(pattern=pattern, main_observable_type=main_observable_type)
|
def _create_indicator_pattern(object_type: str, property_path: List[str], value: str) -> IndicatorPattern:
object_path = _create_object_path(object_type, property_path)
pattern = _create_equality_observation_expression_str(object_path, value)
main_observable_type = _OBJECT_TYPE_TO_OBSERVABLE_TYPE_MAP[object_type]
return IndicatorPattern(pattern=pattern, main_observable_type=main_observable_type)
|
connectors
|
positive
|
@pytest.mark.host_test
def test_load_config_file(self):
config_file_path = os.path.join(os.getcwd(), 'esptool.cfg')
with self.ConfigFile(config_file_path, self.dummy_config):
<DeepExtract>
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
</DeepExtract>
assert f'Loaded custom configuration from {config_file_path}' in output
assert 'Ignoring unknown config file option' not in output
assert 'Ignoring invalid config file' not in output
with self.ConfigFile(config_file_path, '[wrong section name]'):
<DeepExtract>
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
</DeepExtract>
assert f'Loaded custom configuration from {config_file_path}' not in output
faulty_config = '[esptool]\nconnect_attempts = 5\nconnect_attempts = 9\n'
with self.ConfigFile(config_file_path, faulty_config):
<DeepExtract>
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
</DeepExtract>
assert f'Ignoring invalid config file {config_file_path}' in output
assert "option 'connect_attempts' in section 'esptool' already exists" in output
faulty_config = '[esptool]\nconnect_attempts = 9\ntimout = 2\nbits = 2'
with self.ConfigFile(config_file_path, faulty_config):
<DeepExtract>
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
</DeepExtract>
assert 'Ignoring unknown config file options: bits, timout' in output
config_file_path = os.path.join(os.getcwd(), 'tox.ini')
with self.ConfigFile(config_file_path, self.dummy_config):
<DeepExtract>
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
</DeepExtract>
assert f'Loaded custom configuration from {config_file_path}' in output
|
@pytest.mark.host_test
def test_load_config_file(self):
config_file_path = os.path.join(os.getcwd(), 'esptool.cfg')
with self.ConfigFile(config_file_path, self.dummy_config):
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
assert f'Loaded custom configuration from {config_file_path}' in output
assert 'Ignoring unknown config file option' not in output
assert 'Ignoring invalid config file' not in output
with self.ConfigFile(config_file_path, '[wrong section name]'):
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
assert f'Loaded custom configuration from {config_file_path}' not in output
faulty_config = '[esptool]\nconnect_attempts = 5\nconnect_attempts = 9\n'
with self.ConfigFile(config_file_path, faulty_config):
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
assert f'Ignoring invalid config file {config_file_path}' in output
assert "option 'connect_attempts' in section 'esptool' already exists" in output
faulty_config = '[esptool]\nconnect_attempts = 9\ntimout = 2\nbits = 2'
with self.ConfigFile(config_file_path, faulty_config):
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
assert 'Ignoring unknown config file options: bits, timout' in output
config_file_path = os.path.join(os.getcwd(), 'tox.ini')
with self.ConfigFile(config_file_path, self.dummy_config):
def run_esptool_process(cmd):
print('Executing {}...'.format(' '.join(cmd)))
try:
output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT)
output = output.decode('utf-8')
except subprocess.CalledProcessError as e:
print(e.output.decode('utf-8'))
raise e
try:
esptool = [os.environ['ESPTOOL_PY']]
except KeyError:
esptool = ['-m', 'esptool']
trace_arg = ['--trace'] if arg_trace else []
base_cmd = [sys.executable] + esptool + trace_arg
if chip or (arg_chip is not None and chip != 'auto'):
base_cmd += ['--chip', chip or arg_chip]
if port or arg_port is not None:
base_cmd += ['--port', port or arg_port]
if baud or arg_baud is not None:
base_cmd += ['--baud', str(baud or arg_baud)]
usb_jtag_serial_reset = ['--before', 'usb_reset'] if arg_preload_port else []
full_cmd = base_cmd + usb_jtag_serial_reset + 'version'.split(' ')
if preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2']):
port_index = base_cmd.index('--port') + 1
base_cmd[port_index] = arg_preload_port
preload_cmd = base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']
print('\nPreloading dummy binary to disable RTC watchdog...')
run_esptool_process(preload_cmd)
print('Dummy binary preloaded successfully.')
time.sleep(0.3)
print(f'''\nRunning the "{'version'}" command...''')
output = run_esptool_process(full_cmd)
print(output)
output = output
assert f'Loaded custom configuration from {config_file_path}' in output
|
esptool
|
positive
|
def test_app_delete(self):
app_data = {}
<DeepExtract>
app_data['name'] = 'abc' + str(randint(0, 5000))
</DeepExtract>
app_data['location'] = 'abc'
app_data['version'] = 'version'
app_data['dep_target'] = 'local'
app_data['env_id'] = 1
new_app_id = app.App().insert(app_data)
app.App().delete(new_app_id)
deleted_app = app.App().get(new_app_id)
self.assertIsNone(deleted_app)
|
def test_app_delete(self):
app_data = {}
app_data['name'] = 'abc' + str(randint(0, 5000))
app_data['location'] = 'abc'
app_data['version'] = 'version'
app_data['dep_target'] = 'local'
app_data['env_id'] = 1
new_app_id = app.App().insert(app_data)
app.App().delete(new_app_id)
deleted_app = app.App().get(new_app_id)
self.assertIsNone(deleted_app)
|
caastle
|
positive
|
def __init__(self, param_decls: t.Optional[t.Sequence[str]]=None, type: t.Optional[t.Union[types.ParamType, t.Any]]=None, required: bool=False, default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]=None, callback: t.Optional[t.Callable[[Context, 'Parameter', t.Any], t.Any]]=None, nargs: t.Optional[int]=None, multiple: bool=False, metavar: t.Optional[str]=None, expose_value: bool=True, is_eager: bool=False, envvar: t.Optional[t.Union[str, t.Sequence[str]]]=None, shell_complete: t.Optional[t.Callable[[Context, 'Parameter', str], t.Union[t.List['CompletionItem'], t.List[str]]]]=None) -> None:
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
self.type = types.convert_type(type, default)
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = multiple
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self._custom_shell_complete = shell_complete
if __debug__:
if self.type.is_composite and nargs != self.type.arity:
raise ValueError(f"'nargs' must be {self.type.arity} (or None) for type {self.type!r}, but it was {nargs}.")
check_default = default if not callable(default) else None
if check_default is not None:
if multiple:
try:
check_default = next(_check_iter(check_default), None)
except TypeError:
raise ValueError("'default' must be a list when 'multiple' is true.") from None
if nargs != 1 and check_default is not None:
try:
<DeepExtract>
if isinstance(check_default, str):
raise TypeError
return iter(check_default)
</DeepExtract>
except TypeError:
if multiple:
message = "'default' must be a list of lists when 'multiple' is true and 'nargs' != 1."
else:
message = "'default' must be a list when 'nargs' != 1."
raise ValueError(message) from None
if nargs > 1 and len(check_default) != nargs:
subject = 'item length' if multiple else 'length'
raise ValueError(f"'default' {subject} must match nargs={nargs}.")
|
def __init__(self, param_decls: t.Optional[t.Sequence[str]]=None, type: t.Optional[t.Union[types.ParamType, t.Any]]=None, required: bool=False, default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]=None, callback: t.Optional[t.Callable[[Context, 'Parameter', t.Any], t.Any]]=None, nargs: t.Optional[int]=None, multiple: bool=False, metavar: t.Optional[str]=None, expose_value: bool=True, is_eager: bool=False, envvar: t.Optional[t.Union[str, t.Sequence[str]]]=None, shell_complete: t.Optional[t.Callable[[Context, 'Parameter', str], t.Union[t.List['CompletionItem'], t.List[str]]]]=None) -> None:
raise NotImplementedError()
self.type = types.convert_type(type, default)
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = multiple
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self._custom_shell_complete = shell_complete
if __debug__:
if self.type.is_composite and nargs != self.type.arity:
raise ValueError(f"'nargs' must be {self.type.arity} (or None) for type {self.type!r}, but it was {nargs}.")
check_default = default if not callable(default) else None
if check_default is not None:
if multiple:
try:
check_default = next(_check_iter(check_default), None)
except TypeError:
raise ValueError("'default' must be a list when 'multiple' is true.") from None
if nargs != 1 and check_default is not None:
try:
if isinstance(check_default, str):
raise TypeError
return iter(check_default)
except TypeError:
if multiple:
message = "'default' must be a list of lists when 'multiple' is true and 'nargs' != 1."
else:
message = "'default' must be a list when 'nargs' != 1."
raise ValueError(message) from None
if nargs > 1 and len(check_default) != nargs:
subject = 'item length' if multiple else 'length'
raise ValueError(f"'default' {subject} must match nargs={nargs}.")
|
click
|
positive
|
def findFactors(num, result, prefix):
for i in range(1, int(num / 2) + 1):
if num % i == 0:
tmp = prefix + [i, num // i]
tmp.sort()
<DeepExtract>
for a in result:
if tmp == a:
return
result.append(tmp)
</DeepExtract>
if i != 1:
<DeepExtract>
for i in range(1, int(i / 2) + 1):
if i % i == 0:
tmp = prefix + [num // i] + [i, i // i]
tmp.sort()
lookup(result, tmp)
if i != 1:
findFactors(i, result, prefix + [num // i] + [i // i])
findFactors(i // i, result, prefix + [num // i] + [i])
</DeepExtract>
<DeepExtract>
for i in range(1, int(num // i / 2) + 1):
if num // i % i == 0:
tmp = prefix + [i] + [i, num // i // i]
tmp.sort()
lookup(result, tmp)
if i != 1:
findFactors(i, result, prefix + [i] + [num // i // i])
findFactors(num // i // i, result, prefix + [i] + [i])
</DeepExtract>
|
def findFactors(num, result, prefix):
for i in range(1, int(num / 2) + 1):
if num % i == 0:
tmp = prefix + [i, num // i]
tmp.sort()
for a in result:
if tmp == a:
return
result.append(tmp)
if i != 1:
for i in range(1, int(i / 2) + 1):
if i % i == 0:
tmp = prefix + [num // i] + [i, i // i]
tmp.sort()
lookup(result, tmp)
if i != 1:
findFactors(i, result, prefix + [num // i] + [i // i])
findFactors(i // i, result, prefix + [num // i] + [i])
for i in range(1, int(num // i / 2) + 1):
if num // i % i == 0:
tmp = prefix + [i] + [i, num // i // i]
tmp.sort()
lookup(result, tmp)
if i != 1:
findFactors(i, result, prefix + [i] + [num // i // i])
findFactors(num // i // i, result, prefix + [i] + [i])
</DeepExtract>
|
challenges
|
positive
|
def add_row(self, row):
for key in row.keys():
<DeepExtract>
if key not in self.columns:
self.columns[key] = {'name': key, 'type': column_type, 'friendly_name': key}
</DeepExtract>
self.rows.append(row)
|
def add_row(self, row):
for key in row.keys():
if key not in self.columns:
self.columns[key] = {'name': key, 'type': column_type, 'friendly_name': key}
self.rows.append(row)
|
docker-redash
|
positive
|
def test_data_nao_pode_ser_1969_por_padrao_de_itens_criados(self):
with api.env.adopt_roles(['Manager']):
obj = api.content.create(type='collective.nitf.content', container=self.portal['folder'], id='noticia', title='noticia')
transaction.commit()
item_date = obj.modified() if obj.EffectiveDate() == 'None' else obj.effective()
date = item_date.strftime(LOCAL_TIME_FORMAT)
time = item_date.strftime(TIME_FORMAT)
<DeepExtract>
setRoles(self.portal, TEST_USER_ID, ['Site Administrator'])
self.browser.handleErrors = False
basic_auth = 'Basic {0}'.format('{0}:{1}'.format(TEST_USER_NAME, TEST_USER_PASSWORD))
self.browser.addHeader('Authorization', basic_auth)
</DeepExtract>
self.browser.open('{0}/{1}'.format(self.folder.absolute_url(), 'summary_view'))
contents_no_spaces = ''.join(self.browser.contents.split())
self.assertIn('<iclass="icon-day"></i>{0}</span><spanclass="summary-view-icon"><iclass="icon-hour"></i>{1}'.format(date, time), contents_no_spaces)
self.assertNotIn('<iclass="icon-day"></i>31/12/1969', contents_no_spaces)
|
def test_data_nao_pode_ser_1969_por_padrao_de_itens_criados(self):
with api.env.adopt_roles(['Manager']):
obj = api.content.create(type='collective.nitf.content', container=self.portal['folder'], id='noticia', title='noticia')
transaction.commit()
item_date = obj.modified() if obj.EffectiveDate() == 'None' else obj.effective()
date = item_date.strftime(LOCAL_TIME_FORMAT)
time = item_date.strftime(TIME_FORMAT)
setRoles(self.portal, TEST_USER_ID, ['Site Administrator'])
self.browser.handleErrors = False
basic_auth = 'Basic {0}'.format('{0}:{1}'.format(TEST_USER_NAME, TEST_USER_PASSWORD))
self.browser.addHeader('Authorization', basic_auth)
self.browser.open('{0}/{1}'.format(self.folder.absolute_url(), 'summary_view'))
contents_no_spaces = ''.join(self.browser.contents.split())
self.assertIn('<iclass="icon-day"></i>{0}</span><spanclass="summary-view-icon"><iclass="icon-hour"></i>{1}'.format(date, time), contents_no_spaces)
self.assertNotIn('<iclass="icon-day"></i>31/12/1969', contents_no_spaces)
|
brasil.gov.portal
|
positive
|
def mon(args):
if args.subcommand == 'create':
<DeepExtract>
cfg = conf.ceph.load(args)
if not args.mon:
args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)
if args.keyrings:
monitor_keyring = concatenate_keyrings(args)
else:
keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
try:
monitor_keyring = files.read_file(keyring_path)
except IOError:
LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
new_mon_keyring(args)
monitor_keyring = files.read_file(keyring_path)
LOG.debug('Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon))
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('detecting platform for host %s ...', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(name)
hostname_is_compatible(distro.conn, rlogger, name)
rlogger.debug('deploying mon to %s', name)
distro.mon.create(distro, args, monitor_keyring)
time.sleep(2)
mon_status(distro.conn, rlogger, name, args)
catch_mon_errors(distro.conn, rlogger, name, cfg, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d monitors' % errors)
</DeepExtract>
elif args.subcommand == 'add':
<DeepExtract>
cfg = conf.ceph.load(args)
mon_host = args.mon[0]
try:
with open('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f:
monitor_keyring = f.read()
except IOError:
raise RuntimeError("mon keyring not found; run 'new' to create a new cluster")
LOG.info('ensuring configuration of new mon host: %s', mon_host)
args.client = args.mon
admin.admin(args)
LOG.debug('Adding mon to cluster %s, host %s', args.cluster, mon_host)
mon_section = 'mon.%s' % mon_host
cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')
if args.address:
LOG.debug('using mon address via --address %s' % args.address)
mon_ip = args.address
elif cfg_mon_addr:
LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
mon_ip = cfg_mon_addr
else:
mon_ip = net.get_nonlocal_ip(mon_host)
LOG.debug('using mon address by resolving host: %s' % mon_ip)
try:
LOG.debug('detecting platform for host %s ...', mon_host)
distro = hosts.get(mon_host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(mon_host)
hostname_is_compatible(distro.conn, rlogger, mon_host)
rlogger.debug('adding mon to %s', mon_host)
args.address = mon_ip
distro.mon.add(distro, args, monitor_keyring)
time.sleep(2)
catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
mon_status(distro.conn, rlogger, mon_host, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
raise exc.GenericError('Failed to add monitor to host: %s' % mon_host)
</DeepExtract>
elif args.subcommand == 'destroy':
<DeepExtract>
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('Removing mon from %s', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
hostname = distro.conn.remote_module.shortname()
destroy_mon(distro.conn, args.cluster, hostname)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to destroy %d monitors' % errors)
</DeepExtract>
elif args.subcommand == 'create-initial':
<DeepExtract>
mon_initial_members = get_mon_initial_members(args, error_on_empty=True)
args.mon = mon_initial_members
mon_create(args)
mon_in_quorum = set([])
mon_members = set([host for host in mon_initial_members])
for host in mon_initial_members:
mon_name = 'mon.%s' % host
LOG.info('processing monitor %s', mon_name)
sleeps = [20, 20, 15, 10, 10, 5]
tries = 5
rlogger = logging.getLogger(host)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
while tries:
status = mon_status_check(distro.conn, rlogger, host, args)
has_reached_quorum = status.get('state', '') in ['peon', 'leader']
if not has_reached_quorum:
LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
tries -= 1
sleep_seconds = sleeps.pop()
LOG.warning('waiting %s seconds before retrying', sleep_seconds)
time.sleep(sleep_seconds)
else:
mon_in_quorum.add(host)
LOG.info('%s monitor has reached quorum!', mon_name)
break
distro.conn.exit()
if mon_in_quorum == mon_members:
LOG.info('all initial monitors are running and have formed quorum')
LOG.info('Running gatherkeys...')
gatherkeys.gatherkeys(args)
else:
LOG.error('Some monitors have still not reached quorum:')
for host in mon_members - mon_in_quorum:
LOG.error('%s', host)
raise SystemExit('cluster may not be in a healthy state')
</DeepExtract>
else:
LOG.error('subcommand %s not implemented', args.subcommand)
|
def mon(args):
if args.subcommand == 'create':
cfg = conf.ceph.load(args)
if not args.mon:
args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)
if args.keyrings:
monitor_keyring = concatenate_keyrings(args)
else:
keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
try:
monitor_keyring = files.read_file(keyring_path)
except IOError:
LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
new_mon_keyring(args)
monitor_keyring = files.read_file(keyring_path)
LOG.debug('Deploying mon, cluster %s hosts %s', args.cluster, ' '.join(args.mon))
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('detecting platform for host %s ...', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(name)
hostname_is_compatible(distro.conn, rlogger, name)
rlogger.debug('deploying mon to %s', name)
distro.mon.create(distro, args, monitor_keyring)
time.sleep(2)
mon_status(distro.conn, rlogger, name, args)
catch_mon_errors(distro.conn, rlogger, name, cfg, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d monitors' % errors)
elif args.subcommand == 'add':
cfg = conf.ceph.load(args)
mon_host = args.mon[0]
try:
with open('{cluster}.mon.keyring'.format(cluster=args.cluster), 'rb') as f:
monitor_keyring = f.read()
except IOError:
raise RuntimeError("mon keyring not found; run 'new' to create a new cluster")
LOG.info('ensuring configuration of new mon host: %s', mon_host)
args.client = args.mon
admin.admin(args)
LOG.debug('Adding mon to cluster %s, host %s', args.cluster, mon_host)
mon_section = 'mon.%s' % mon_host
cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')
if args.address:
LOG.debug('using mon address via --address %s' % args.address)
mon_ip = args.address
elif cfg_mon_addr:
LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
mon_ip = cfg_mon_addr
else:
mon_ip = net.get_nonlocal_ip(mon_host)
LOG.debug('using mon address by resolving host: %s' % mon_ip)
try:
LOG.debug('detecting platform for host %s ...', mon_host)
distro = hosts.get(mon_host, username=args.username, callbacks=[packages.ceph_is_installed])
LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
rlogger = logging.getLogger(mon_host)
hostname_is_compatible(distro.conn, rlogger, mon_host)
rlogger.debug('adding mon to %s', mon_host)
args.address = mon_ip
distro.mon.add(distro, args, monitor_keyring)
time.sleep(2)
catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
mon_status(distro.conn, rlogger, mon_host, args)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
raise exc.GenericError('Failed to add monitor to host: %s' % mon_host)
elif args.subcommand == 'destroy':
errors = 0
for (name, host) in mon_hosts(args.mon):
try:
LOG.debug('Removing mon from %s', name)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
hostname = distro.conn.remote_module.shortname()
destroy_mon(distro.conn, args.cluster, hostname)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to destroy %d monitors' % errors)
elif args.subcommand == 'create-initial':
mon_initial_members = get_mon_initial_members(args, error_on_empty=True)
args.mon = mon_initial_members
mon_create(args)
mon_in_quorum = set([])
mon_members = set([host for host in mon_initial_members])
for host in mon_initial_members:
mon_name = 'mon.%s' % host
LOG.info('processing monitor %s', mon_name)
sleeps = [20, 20, 15, 10, 10, 5]
tries = 5
rlogger = logging.getLogger(host)
distro = hosts.get(host, username=args.username, callbacks=[packages.ceph_is_installed])
while tries:
status = mon_status_check(distro.conn, rlogger, host, args)
has_reached_quorum = status.get('state', '') in ['peon', 'leader']
if not has_reached_quorum:
LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
tries -= 1
sleep_seconds = sleeps.pop()
LOG.warning('waiting %s seconds before retrying', sleep_seconds)
time.sleep(sleep_seconds)
else:
mon_in_quorum.add(host)
LOG.info('%s monitor has reached quorum!', mon_name)
break
distro.conn.exit()
if mon_in_quorum == mon_members:
LOG.info('all initial monitors are running and have formed quorum')
LOG.info('Running gatherkeys...')
gatherkeys.gatherkeys(args)
else:
LOG.error('Some monitors have still not reached quorum:')
for host in mon_members - mon_in_quorum:
LOG.error('%s', host)
raise SystemExit('cluster may not be in a healthy state')
else:
LOG.error('subcommand %s not implemented', args.subcommand)
|
ceph-deploy
|
positive
|
@mock.patch('alot.db.utils.settings.get', mock.Mock(return_value=True))
def test_prefer_plaintext_mixed(self):
expected = 'text/plain'
<DeepExtract>
mail = EmailMessage()
set_basic_headers(mail)
mail.set_content('This is an email')
mail.add_alternative('<!DOCTYPE html><html><body>This is an html email</body></html>', subtype='html')
mail = mail
</DeepExtract>
actual = utils.get_body_part(mail).get_content_type()
self.assertEqual(actual, expected)
|
@mock.patch('alot.db.utils.settings.get', mock.Mock(return_value=True))
def test_prefer_plaintext_mixed(self):
expected = 'text/plain'
mail = EmailMessage()
set_basic_headers(mail)
mail.set_content('This is an email')
mail.add_alternative('<!DOCTYPE html><html><body>This is an html email</body></html>', subtype='html')
mail = mail
actual = utils.get_body_part(mail).get_content_type()
self.assertEqual(actual, expected)
|
alot
|
positive
|
def get(params, optimizer, learning_rate=None, decay=None, weight_decay=0):
"""Retrieves an Optimizer instance."""
if isinstance(optimizer, torch.optim.Optimizer):
optim = optimizer
elif optimizer in ['L-BFGS', 'L-BFGS-B']:
if weight_decay > 0:
raise ValueError("L-BFGS optimizer doesn't support weight_decay > 0")
if learning_rate is not None or decay is not None:
print('Warning: learning rate is ignored for {}'.format(optimizer))
optim = torch.optim.LBFGS(params, lr=1, max_iter=LBFGS_options['iter_per_step'], max_eval=LBFGS_options['fun_per_step'], tolerance_grad=LBFGS_options['gtol'], tolerance_change=LBFGS_options['ftol'], history_size=LBFGS_options['maxcor'], line_search_fn=None)
else:
if learning_rate is None:
raise ValueError('No learning rate for {}.'.format(optimizer))
if optimizer == 'sgd':
optim = torch.optim.SGD(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'rmsprop':
optim = torch.optim.RMSprop(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'adam':
optim = torch.optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'adamw':
if weight_decay == 0:
raise ValueError('AdamW optimizer requires non-zero weight decay')
optim = torch.optim.AdamW(params, lr=learning_rate, weight_decay=weight_decay)
else:
raise NotImplementedError(f'{optimizer} to be implemented for backend pytorch.')
<DeepExtract>
if decay is None:
lr_scheduler = None
if decay[0] == 'step':
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=decay[1], gamma=decay[2])
raise NotImplementedError(f'{decay[0]} learning rate scheduler to be implemented for backend pytorch.')
</DeepExtract>
return (optim, lr_scheduler)
|
def get(params, optimizer, learning_rate=None, decay=None, weight_decay=0):
"""Retrieves an Optimizer instance."""
if isinstance(optimizer, torch.optim.Optimizer):
optim = optimizer
elif optimizer in ['L-BFGS', 'L-BFGS-B']:
if weight_decay > 0:
raise ValueError("L-BFGS optimizer doesn't support weight_decay > 0")
if learning_rate is not None or decay is not None:
print('Warning: learning rate is ignored for {}'.format(optimizer))
optim = torch.optim.LBFGS(params, lr=1, max_iter=LBFGS_options['iter_per_step'], max_eval=LBFGS_options['fun_per_step'], tolerance_grad=LBFGS_options['gtol'], tolerance_change=LBFGS_options['ftol'], history_size=LBFGS_options['maxcor'], line_search_fn=None)
else:
if learning_rate is None:
raise ValueError('No learning rate for {}.'.format(optimizer))
if optimizer == 'sgd':
optim = torch.optim.SGD(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'rmsprop':
optim = torch.optim.RMSprop(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'adam':
optim = torch.optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)
elif optimizer == 'adamw':
if weight_decay == 0:
raise ValueError('AdamW optimizer requires non-zero weight decay')
optim = torch.optim.AdamW(params, lr=learning_rate, weight_decay=weight_decay)
else:
raise NotImplementedError(f'{optimizer} to be implemented for backend pytorch.')
if decay is None:
lr_scheduler = None
if decay[0] == 'step':
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=decay[1], gamma=decay[2])
raise NotImplementedError(f'{decay[0]} learning rate scheduler to be implemented for backend pytorch.')
return (optim, lr_scheduler)
|
deepxde
|
positive
|
def _send_initially(message, messageerror_monitor=None):
"""Put the message on the wire for the first time, starting retransmission timeouts"""
self.log.debug('Sending message %r', message)
if message.mtype is CON:
assert messageerror_monitor is not None, 'messageerror_monitor needs to be set for CONs'
<DeepExtract>
key = (message.remote, message.mid)
if message.remote not in self._backlogs:
self._backlogs[message.remote] = []
timeout = random.uniform(message.transport_tuning.ACK_TIMEOUT, message.transport_tuning.ACK_TIMEOUT * message.transport_tuning.ACK_RANDOM_FACTOR)
next_retransmission = self._schedule_retransmit(message, timeout, 0)
self._active_exchanges[key] = (messageerror_monitor, next_retransmission)
self.log.debug('Exchange added, message ID: %d.', message.mid)
</DeepExtract>
<DeepExtract>
key = (message.remote, message.mid)
if key in self._recent_messages:
self._recent_messages[key] = message
</DeepExtract>
<DeepExtract>
self.message_interface.send(message)
</DeepExtract>
|
def _send_initially(message, messageerror_monitor=None):
"""Put the message on the wire for the first time, starting retransmission timeouts"""
self.log.debug('Sending message %r', message)
if message.mtype is CON:
assert messageerror_monitor is not None, 'messageerror_monitor needs to be set for CONs'
key = (message.remote, message.mid)
if message.remote not in self._backlogs:
self._backlogs[message.remote] = []
timeout = random.uniform(message.transport_tuning.ACK_TIMEOUT, message.transport_tuning.ACK_TIMEOUT * message.transport_tuning.ACK_RANDOM_FACTOR)
next_retransmission = self._schedule_retransmit(message, timeout, 0)
self._active_exchanges[key] = (messageerror_monitor, next_retransmission)
self.log.debug('Exchange added, message ID: %d.', message.mid)
key = (message.remote, message.mid)
if key in self._recent_messages:
self._recent_messages[key] = message
self.message_interface.send(message)
</DeepExtract>
|
aiocoap
|
positive
|
def validate(self):
url_config = self._meta.get('url_config')
if url_config:
<DeepExtract>
if 'auth_type' not in url_config:
self._error("'auth_type' is required in lambda's url config")
auth_types = [IAM_AUTH_TYPE, NONE_AUTH_TYPE]
if url_config['auth_type'] not in auth_types:
self._error(f"'auth_type' must be equal to one of these: {', '.join(auth_types)}")
cors = url_config.get('cors')
if cors and (not isinstance(cors, dict)):
self._error("'cors' parameter in lambda config must be a dict")
if cors:
allowed_parameters = {'allow_credentials', 'allow_headers', 'allow_methods', 'allow_origins', 'expose_headers', 'max_age'}
impostors = set(cors.keys()) - allowed_parameters
if impostors:
self._error(f'Only these parameters are allowed: {allowed_parameters}')
</DeepExtract>
ephemeral_storage = self._meta.get('ephemeral_storage')
if ephemeral_storage:
<DeepExtract>
if not isinstance(ephemeral_storage, int):
self._error(f"Ephemeral storage size must an integer but not '{type(ephemeral_storage).__name__}'")
if not 512 <= ephemeral_storage <= 10240:
self._error('Ephemeral storage size must be between 512 and 10240 MB')
</DeepExtract>
|
def validate(self):
url_config = self._meta.get('url_config')
if url_config:
if 'auth_type' not in url_config:
self._error("'auth_type' is required in lambda's url config")
auth_types = [IAM_AUTH_TYPE, NONE_AUTH_TYPE]
if url_config['auth_type'] not in auth_types:
self._error(f"'auth_type' must be equal to one of these: {', '.join(auth_types)}")
cors = url_config.get('cors')
if cors and (not isinstance(cors, dict)):
self._error("'cors' parameter in lambda config must be a dict")
if cors:
allowed_parameters = {'allow_credentials', 'allow_headers', 'allow_methods', 'allow_origins', 'expose_headers', 'max_age'}
impostors = set(cors.keys()) - allowed_parameters
if impostors:
self._error(f'Only these parameters are allowed: {allowed_parameters}')
ephemeral_storage = self._meta.get('ephemeral_storage')
if ephemeral_storage:
if not isinstance(ephemeral_storage, int):
self._error(f"Ephemeral storage size must an integer but not '{type(ephemeral_storage).__name__}'")
if not 512 <= ephemeral_storage <= 10240:
self._error('Ephemeral storage size must be between 512 and 10240 MB')
</DeepExtract>
|
aws-syndicate
|
positive
|
def attach_subscription():
"""Attach a specific subscription to the registered OS. If no
subscription ID has been provided through command line, let the user
interactively choose one.
"""
if tool_opts.activation_key:
loggerinst.info('Using the activation key provided through the command line...')
return True
pool = ['subscription-manager', 'attach']
if tool_opts.auto_attach:
pool.append('--auto')
loggerinst.info('Auto-attaching compatible subscriptions to the system ...')
elif tool_opts.pool:
pool.extend(['--pool', tool_opts.pool])
loggerinst.info('Attaching provided subscription pool ID to the system ...')
else:
<DeepExtract>
releaver_created = False
if system_info.version.major >= 8:
if not os.path.exists(DNF_RELEASEVER_FILE):
with open(DNF_RELEASEVER_FILE, 'w') as handler:
handler.write(system_info.original_releasever)
releaver_created = True
(subs_raw, ret_code) = utils.run_subprocess(['subscription-manager', 'list', '--available'], print_output=False)
if releaver_created:
os.remove(DNF_RELEASEVER_FILE)
if ret_code != 0:
loggerinst.critical('Unable to get list of available subscriptions:\n%s' % subs_raw)
subs_list = list(get_sub(subs_raw))
</DeepExtract>
if len(subs_list) == 0:
loggerinst.warning('No subscription available for the conversion.')
return False
elif len(subs_list) == 1:
sub_num = 0
loggerinst.info(' %s is the only subscription available, it will automatically be selected for the conversion.' % subs_list[0].pool_id)
else:
loggerinst.info('Manually select subscription appropriate for the conversion')
<DeepExtract>
loggerinst.info('Choose one of your subscriptions that is to be used for converting this system to RHEL:')
for (index, sub) in enumerate(subs_list):
index += 1
loggerinst.info('\n======= Subscription number %d =======\n\n%s' % (index, sub.sub_raw))
</DeepExtract>
sub_num = utils.let_user_choose_item(len(subs_list), 'subscription')
loggerinst.info('Attaching subscription with pool ID %s to the system ...' % subs_list[sub_num].pool_id)
pool.extend(['--pool', subs_list[sub_num].pool_id])
(_, ret_code) = utils.run_subprocess(pool)
if ret_code != 0:
loggerinst.critical('Unsuccessful attachment of a subscription.')
return True
|
def attach_subscription():
"""Attach a specific subscription to the registered OS. If no
subscription ID has been provided through command line, let the user
interactively choose one.
"""
if tool_opts.activation_key:
loggerinst.info('Using the activation key provided through the command line...')
return True
pool = ['subscription-manager', 'attach']
if tool_opts.auto_attach:
pool.append('--auto')
loggerinst.info('Auto-attaching compatible subscriptions to the system ...')
elif tool_opts.pool:
pool.extend(['--pool', tool_opts.pool])
loggerinst.info('Attaching provided subscription pool ID to the system ...')
else:
releaver_created = False
if system_info.version.major >= 8:
if not os.path.exists(DNF_RELEASEVER_FILE):
with open(DNF_RELEASEVER_FILE, 'w') as handler:
handler.write(system_info.original_releasever)
releaver_created = True
(subs_raw, ret_code) = utils.run_subprocess(['subscription-manager', 'list', '--available'], print_output=False)
if releaver_created:
os.remove(DNF_RELEASEVER_FILE)
if ret_code != 0:
loggerinst.critical('Unable to get list of available subscriptions:\n%s' % subs_raw)
subs_list = list(get_sub(subs_raw))
if len(subs_list) == 0:
loggerinst.warning('No subscription available for the conversion.')
return False
elif len(subs_list) == 1:
sub_num = 0
loggerinst.info(' %s is the only subscription available, it will automatically be selected for the conversion.' % subs_list[0].pool_id)
else:
loggerinst.info('Manually select subscription appropriate for the conversion')
loggerinst.info('Choose one of your subscriptions that is to be used for converting this system to RHEL:')
for (index, sub) in enumerate(subs_list):
index += 1
loggerinst.info('\n======= Subscription number %d =======\n\n%s' % (index, sub.sub_raw))
sub_num = utils.let_user_choose_item(len(subs_list), 'subscription')
loggerinst.info('Attaching subscription with pool ID %s to the system ...' % subs_list[sub_num].pool_id)
pool.extend(['--pool', subs_list[sub_num].pool_id])
(_, ret_code) = utils.run_subprocess(pool)
if ret_code != 0:
loggerinst.critical('Unsuccessful attachment of a subscription.')
return True
|
convert2rhel
|
positive
|
def test_tokenize_roundtrip(self):
def T(serialized_script, expected_tokens, test_roundtrip=True):
serialized_script = x(serialized_script)
script_obj = CScript(serialized_script)
actual_tokens = list(script_obj)
self.assertEqual(actual_tokens, expected_tokens)
if test_roundtrip:
recreated_script = CScript(actual_tokens)
self.assertEqual(recreated_script, serialized_script)
<DeepExtract>
'' = x('')
[] = x([])
serialized_data = CScriptOp.encode_op_pushdata('')
self.assertEqual(serialized_data, [])
</DeepExtract>
<DeepExtract>
'00' = x('00')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('00')
self.assertEqual(serialized_data, [b''])
</DeepExtract>
<DeepExtract>
'0100' = x('0100')
[b'\x00'] = x([b'\x00'])
serialized_data = CScriptOp.encode_op_pushdata('0100')
self.assertEqual(serialized_data, [b'\x00'])
</DeepExtract>
<DeepExtract>
'4b' + 'ff' * 75 = x('4b' + 'ff' * 75)
[b'\xff' * 75] = x([b'\xff' * 75])
serialized_data = CScriptOp.encode_op_pushdata('4b' + 'ff' * 75)
self.assertEqual(serialized_data, [b'\xff' * 75])
</DeepExtract>
<DeepExtract>
'4c00' = x('4c00')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4c00')
self.assertEqual(serialized_data, [b''])
</DeepExtract>
<DeepExtract>
'4c04deadbeef' = x('4c04deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4c04deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
</DeepExtract>
<DeepExtract>
'4d0000' = x('4d0000')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4d0000')
self.assertEqual(serialized_data, [b''])
</DeepExtract>
<DeepExtract>
'4d0400deadbeef' = x('4d0400deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4d0400deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
</DeepExtract>
<DeepExtract>
'4e00000000' = x('4e00000000')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4e00000000')
self.assertEqual(serialized_data, [b''])
</DeepExtract>
<DeepExtract>
'4e04000000deadbeef' = x('4e04000000deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4e04000000deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
</DeepExtract>
<DeepExtract>
'4f' = x('4f')
[OP_1NEGATE] = x([OP_1NEGATE])
serialized_data = CScriptOp.encode_op_pushdata('4f')
self.assertEqual(serialized_data, [OP_1NEGATE])
</DeepExtract>
<DeepExtract>
'51' = x('51')
[1] = x([1])
serialized_data = CScriptOp.encode_op_pushdata('51')
self.assertEqual(serialized_data, [1])
</DeepExtract>
<DeepExtract>
'52' = x('52')
[2] = x([2])
serialized_data = CScriptOp.encode_op_pushdata('52')
self.assertEqual(serialized_data, [2])
</DeepExtract>
<DeepExtract>
'53' = x('53')
[3] = x([3])
serialized_data = CScriptOp.encode_op_pushdata('53')
self.assertEqual(serialized_data, [3])
</DeepExtract>
<DeepExtract>
'54' = x('54')
[4] = x([4])
serialized_data = CScriptOp.encode_op_pushdata('54')
self.assertEqual(serialized_data, [4])
</DeepExtract>
<DeepExtract>
'55' = x('55')
[5] = x([5])
serialized_data = CScriptOp.encode_op_pushdata('55')
self.assertEqual(serialized_data, [5])
</DeepExtract>
<DeepExtract>
'56' = x('56')
[6] = x([6])
serialized_data = CScriptOp.encode_op_pushdata('56')
self.assertEqual(serialized_data, [6])
</DeepExtract>
<DeepExtract>
'57' = x('57')
[7] = x([7])
serialized_data = CScriptOp.encode_op_pushdata('57')
self.assertEqual(serialized_data, [7])
</DeepExtract>
<DeepExtract>
'58' = x('58')
[8] = x([8])
serialized_data = CScriptOp.encode_op_pushdata('58')
self.assertEqual(serialized_data, [8])
</DeepExtract>
<DeepExtract>
'59' = x('59')
[9] = x([9])
serialized_data = CScriptOp.encode_op_pushdata('59')
self.assertEqual(serialized_data, [9])
</DeepExtract>
<DeepExtract>
'5a' = x('5a')
[10] = x([10])
serialized_data = CScriptOp.encode_op_pushdata('5a')
self.assertEqual(serialized_data, [10])
</DeepExtract>
<DeepExtract>
'5b' = x('5b')
[11] = x([11])
serialized_data = CScriptOp.encode_op_pushdata('5b')
self.assertEqual(serialized_data, [11])
</DeepExtract>
<DeepExtract>
'5c' = x('5c')
[12] = x([12])
serialized_data = CScriptOp.encode_op_pushdata('5c')
self.assertEqual(serialized_data, [12])
</DeepExtract>
<DeepExtract>
'5d' = x('5d')
[13] = x([13])
serialized_data = CScriptOp.encode_op_pushdata('5d')
self.assertEqual(serialized_data, [13])
</DeepExtract>
<DeepExtract>
'5e' = x('5e')
[14] = x([14])
serialized_data = CScriptOp.encode_op_pushdata('5e')
self.assertEqual(serialized_data, [14])
</DeepExtract>
<DeepExtract>
'5f' = x('5f')
[15] = x([15])
serialized_data = CScriptOp.encode_op_pushdata('5f')
self.assertEqual(serialized_data, [15])
</DeepExtract>
<DeepExtract>
'9b' = x('9b')
[OP_BOOLOR] = x([OP_BOOLOR])
serialized_data = CScriptOp.encode_op_pushdata('9b')
self.assertEqual(serialized_data, [OP_BOOLOR])
</DeepExtract>
<DeepExtract>
'9a9b' = x('9a9b')
[OP_BOOLAND, OP_BOOLOR] = x([OP_BOOLAND, OP_BOOLOR])
serialized_data = CScriptOp.encode_op_pushdata('9a9b')
self.assertEqual(serialized_data, [OP_BOOLAND, OP_BOOLOR])
</DeepExtract>
<DeepExtract>
'ff' = x('ff')
[OP_INVALIDOPCODE] = x([OP_INVALIDOPCODE])
serialized_data = CScriptOp.encode_op_pushdata('ff')
self.assertEqual(serialized_data, [OP_INVALIDOPCODE])
</DeepExtract>
<DeepExtract>
'fafbfcfd' = x('fafbfcfd')
[CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)] = x([CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)])
serialized_data = CScriptOp.encode_op_pushdata('fafbfcfd')
self.assertEqual(serialized_data, [CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)])
</DeepExtract>
<DeepExtract>
'512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae' = x('512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae')
[1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG] = x([1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG])
serialized_data = CScriptOp.encode_op_pushdata('512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae')
self.assertEqual(serialized_data, [1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG])
</DeepExtract>
|
def test_tokenize_roundtrip(self):
def T(serialized_script, expected_tokens, test_roundtrip=True):
serialized_script = x(serialized_script)
script_obj = CScript(serialized_script)
actual_tokens = list(script_obj)
self.assertEqual(actual_tokens, expected_tokens)
if test_roundtrip:
recreated_script = CScript(actual_tokens)
self.assertEqual(recreated_script, serialized_script)
'' = x('')
[] = x([])
serialized_data = CScriptOp.encode_op_pushdata('')
self.assertEqual(serialized_data, [])
'00' = x('00')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('00')
self.assertEqual(serialized_data, [b''])
'0100' = x('0100')
[b'\x00'] = x([b'\x00'])
serialized_data = CScriptOp.encode_op_pushdata('0100')
self.assertEqual(serialized_data, [b'\x00'])
'4b' + 'ff' * 75 = x('4b' + 'ff' * 75)
[b'\xff' * 75] = x([b'\xff' * 75])
serialized_data = CScriptOp.encode_op_pushdata('4b' + 'ff' * 75)
self.assertEqual(serialized_data, [b'\xff' * 75])
'4c00' = x('4c00')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4c00')
self.assertEqual(serialized_data, [b''])
'4c04deadbeef' = x('4c04deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4c04deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
'4d0000' = x('4d0000')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4d0000')
self.assertEqual(serialized_data, [b''])
'4d0400deadbeef' = x('4d0400deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4d0400deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
'4e00000000' = x('4e00000000')
[b''] = x([b''])
serialized_data = CScriptOp.encode_op_pushdata('4e00000000')
self.assertEqual(serialized_data, [b''])
'4e04000000deadbeef' = x('4e04000000deadbeef')
[x('deadbeef')] = x([x('deadbeef')])
serialized_data = CScriptOp.encode_op_pushdata('4e04000000deadbeef')
self.assertEqual(serialized_data, [x('deadbeef')])
'4f' = x('4f')
[OP_1NEGATE] = x([OP_1NEGATE])
serialized_data = CScriptOp.encode_op_pushdata('4f')
self.assertEqual(serialized_data, [OP_1NEGATE])
'51' = x('51')
[1] = x([1])
serialized_data = CScriptOp.encode_op_pushdata('51')
self.assertEqual(serialized_data, [1])
'52' = x('52')
[2] = x([2])
serialized_data = CScriptOp.encode_op_pushdata('52')
self.assertEqual(serialized_data, [2])
'53' = x('53')
[3] = x([3])
serialized_data = CScriptOp.encode_op_pushdata('53')
self.assertEqual(serialized_data, [3])
'54' = x('54')
[4] = x([4])
serialized_data = CScriptOp.encode_op_pushdata('54')
self.assertEqual(serialized_data, [4])
'55' = x('55')
[5] = x([5])
serialized_data = CScriptOp.encode_op_pushdata('55')
self.assertEqual(serialized_data, [5])
'56' = x('56')
[6] = x([6])
serialized_data = CScriptOp.encode_op_pushdata('56')
self.assertEqual(serialized_data, [6])
'57' = x('57')
[7] = x([7])
serialized_data = CScriptOp.encode_op_pushdata('57')
self.assertEqual(serialized_data, [7])
'58' = x('58')
[8] = x([8])
serialized_data = CScriptOp.encode_op_pushdata('58')
self.assertEqual(serialized_data, [8])
'59' = x('59')
[9] = x([9])
serialized_data = CScriptOp.encode_op_pushdata('59')
self.assertEqual(serialized_data, [9])
'5a' = x('5a')
[10] = x([10])
serialized_data = CScriptOp.encode_op_pushdata('5a')
self.assertEqual(serialized_data, [10])
'5b' = x('5b')
[11] = x([11])
serialized_data = CScriptOp.encode_op_pushdata('5b')
self.assertEqual(serialized_data, [11])
'5c' = x('5c')
[12] = x([12])
serialized_data = CScriptOp.encode_op_pushdata('5c')
self.assertEqual(serialized_data, [12])
'5d' = x('5d')
[13] = x([13])
serialized_data = CScriptOp.encode_op_pushdata('5d')
self.assertEqual(serialized_data, [13])
'5e' = x('5e')
[14] = x([14])
serialized_data = CScriptOp.encode_op_pushdata('5e')
self.assertEqual(serialized_data, [14])
'5f' = x('5f')
[15] = x([15])
serialized_data = CScriptOp.encode_op_pushdata('5f')
self.assertEqual(serialized_data, [15])
'9b' = x('9b')
[OP_BOOLOR] = x([OP_BOOLOR])
serialized_data = CScriptOp.encode_op_pushdata('9b')
self.assertEqual(serialized_data, [OP_BOOLOR])
'9a9b' = x('9a9b')
[OP_BOOLAND, OP_BOOLOR] = x([OP_BOOLAND, OP_BOOLOR])
serialized_data = CScriptOp.encode_op_pushdata('9a9b')
self.assertEqual(serialized_data, [OP_BOOLAND, OP_BOOLOR])
'ff' = x('ff')
[OP_INVALIDOPCODE] = x([OP_INVALIDOPCODE])
serialized_data = CScriptOp.encode_op_pushdata('ff')
self.assertEqual(serialized_data, [OP_INVALIDOPCODE])
'fafbfcfd' = x('fafbfcfd')
[CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)] = x([CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)])
serialized_data = CScriptOp.encode_op_pushdata('fafbfcfd')
self.assertEqual(serialized_data, [CScriptOp(250), CScriptOp(251), CScriptOp(252), CScriptOp(253)])
'512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae' = x('512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae')
[1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG] = x([1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG])
serialized_data = CScriptOp.encode_op_pushdata('512103e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32410478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc345552ae')
self.assertEqual(serialized_data, [1, x('03e2a0e6a91fa985ce4dda7f048fca5ec8264292aed9290594321aa53d37fdea32'), x('0478d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71a1518063243acd4dfe96b66e3f2ec8013c8e072cd09b3834a19f81f659cc3455'), 2, OP_CHECKMULTISIG])
</DeepExtract>
|
checklocktimeverify-demos
|
positive
|
def __init__(self, parent_widget, obj, allow_obj_sel, allow_face_sel, allow_solid_sel, allow_point_sel=False, allow_edge_sel=False):
ui_path = os.path.join(CfdTools.getModulePath(), 'Gui', 'TaskPanelCfdListOfFaces.ui')
self.parent_widget = parent_widget
self.form = FreeCADGui.PySideUic.loadUi(ui_path, self.parent_widget)
self.parent_widget.layout().addWidget(self.form)
self.selecting_references = False
self.recompute_timer = QTimer()
self.recompute_timer.setSingleShot(True)
self.recompute_timer.timeout.connect(self.recomputeDocument)
self.obj = obj
self.ShapeRefs = self.obj.ShapeRefs
self.doc_name = self.obj.Document.Name
self.view_object = self.obj.ViewObject
self.allow_obj_sel = allow_obj_sel
self.allow_face_sel = allow_face_sel
self.allow_solid_sel = allow_solid_sel
self.allow_point_sel = allow_point_sel
self.allow_edge_sel = allow_edge_sel
self.selection_mode_solid = not allow_face_sel and allow_solid_sel
sel_list = []
sel_rb_list = []
if allow_face_sel:
sel_list.append('faces')
sel_rb_list.append('Face')
if allow_edge_sel:
sel_list.append('edges')
sel_rb_list.append('Edge')
if allow_point_sel:
sel_list.append('vertices')
sel_rb_list.append('Vertex')
sel_rb_text = ' / '.join(sel_rb_list)
sel_msg = ''
if len(sel_list) > 0:
sel_msg = sel_list[0]
if len(sel_list) > 1:
for i in range(len(sel_list) - 2):
sel_msg += ', ' + sel_list[i + 1]
sel_msg += ' and ' + sel_list[-1]
self.form.rb_standard.setText(sel_rb_text)
self.selection_mode_std_print_message = 'Select {} by single-clicking on them'.format(sel_msg)
self.selection_mode_solid_print_message = 'Select solids by single-clicking on a face or edge which belongs to the solid'
if self.allow_obj_sel:
self.selection_mode_std_print_message += ', or entire object by double-clicking on it.'
self.selection_mode_solid_print_message += ', or entire object by double-clicking on it.'
else:
self.selection_mode_std_print_message += '.'
self.selection_mode_solid_print_message += '.'
exclusive_sel = not allow_solid_sel or not (allow_face_sel or allow_edge_sel or allow_point_sel)
self.form.labelSelection.setVisible(not exclusive_sel)
self.form.rb_standard.setVisible(not exclusive_sel)
self.form.rb_solid.setVisible(not exclusive_sel)
self.form.faceSelectPushButton.setVisible(allow_obj_sel)
self.form.rb_standard.toggled.connect(self.choose_selection_mode_standard)
self.form.rb_solid.toggled.connect(self.choose_selection_mode_solid)
self.form.listReferences.currentRowChanged.connect(self.setReferenceListSelection)
self.form.buttonAddFace.clicked.connect(self.buttonAddFaceClicked)
self.form.buttonAddFace.setCheckable(True)
self.form.buttonRemoveFace.clicked.connect(self.buttonRemoveFaceClicked)
self.form.individualFacesFrame.setVisible(not allow_obj_sel)
self.form.faceSelectPushButton.setChecked(not allow_obj_sel)
self.shapeNames = []
self.shapeLabels = []
for i in FreeCADGui.ActiveDocument.Document.Objects:
if 'Shape' in i.PropertiesList:
if not i.Shape.isNull() and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdFluidBoundary.CfdFluidBoundary))) and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdMeshRefinement.CfdMeshRefinement))) and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdZone.CfdZone))):
self.shapeNames.append(i.Name)
self.shapeLabels.append(i.Label)
for (i, label) in enumerate(self.shapeLabels):
item = QtGui.QListWidgetItem(label)
if allow_obj_sel:
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Unchecked)
item.setData(QtCore.Qt.UserRole, i)
self.form.objectListWidget.addItem(item)
self.form.objectListWidget.itemSelectionChanged.connect(self.faceListShapeChosen)
self.form.objectListWidget.itemChanged.connect(self.objectListItemChanged)
self.form.faceSelectPushButton.toggled.connect(self.faceSelectPushButtonChanged)
self.form.faceListWidget.itemSelectionChanged.connect(self.faceHighlightChange)
self.form.faceListWidget.itemChanged.connect(self.faceListItemChanged)
self.form.selectAllButton.clicked.connect(self.selectAllButtonClicked)
self.form.selectNoneButton.clicked.connect(self.selectNoneButtonClicked)
self.form.objectListWidget.setToolTip('Choose solid objects from the list and optionally select one or more of the sub-components associated with the currently selected shape.')
self.form.tabWidget.currentChanged.connect(self.tabChanged)
<DeepExtract>
self.form.objectListWidget.itemChanged.disconnect(self.objectListItemChanged)
if self.allow_obj_sel:
for i in range(self.form.objectListWidget.count()):
listItem = self.form.objectListWidget.item(i)
listItem.setCheckState(QtCore.Qt.Unchecked)
self.form.listReferences.clear()
items = []
remove_refs = []
for ref in self.ShapeRefs:
try:
idx = self.shapeNames.index(ref[0].Name)
except ValueError:
remove_refs.append(ref)
else:
listItem = self.form.objectListWidget.item(idx)
for rr in ref[1]:
if rr:
item_label = self.shapeLabels[idx] + ':' + rr
if self.allow_obj_sel:
if listItem.checkState() == QtCore.Qt.Unchecked:
listItem.setCheckState(QtCore.Qt.PartiallyChecked)
else:
item_label = self.shapeLabels[idx]
if self.allow_obj_sel:
listItem.setCheckState(QtCore.Qt.Checked)
items.append((item_label, (ref[0], rr)))
for ref in remove_refs:
self.ShapeRefs.remove(ref)
if remove_refs:
self.scheduleRecompute()
for listItem in items:
item = QtGui.QListWidgetItem(listItem[0])
item.setData(QtCore.Qt.UserRole, listItem[1])
self.form.listReferences.addItem(item)
self.form.listReferences.setSortingEnabled(False)
self.form.objectListWidget.itemChanged.connect(self.objectListItemChanged)
</DeepExtract>
if len(self.ShapeRefs) == 0:
<DeepExtract>
for sel in FreeCADGui.Selection.getSelectionEx():
if sel.HasSubObjects:
for sub in sel.SubElementNames:
print('Adding selection {}:{}'.format(sel.ObjectName, sub))
self.addSelection(sel.DocumentName, sel.ObjectName, sub)
elif self.allow_obj_sel:
print('Adding selection {}'.format(sel.ObjectName))
self.addSelection(sel.DocumentName, sel.ObjectName, None)
self.scheduleRecompute()
</DeepExtract>
<DeepExtract>
self.recompute_timer.start()
</DeepExtract>
FreeCADGui.Selection.clearSelection()
<DeepExtract>
self.form.buttonAddFace.setChecked(self.selecting_references)
if self.selection_mode_solid:
print_message = self.selection_mode_solid_print_message
else:
print_message = self.selection_mode_std_print_message
self.form.labelHelpText.setText(print_message)
</DeepExtract>
|
def __init__(self, parent_widget, obj, allow_obj_sel, allow_face_sel, allow_solid_sel, allow_point_sel=False, allow_edge_sel=False):
ui_path = os.path.join(CfdTools.getModulePath(), 'Gui', 'TaskPanelCfdListOfFaces.ui')
self.parent_widget = parent_widget
self.form = FreeCADGui.PySideUic.loadUi(ui_path, self.parent_widget)
self.parent_widget.layout().addWidget(self.form)
self.selecting_references = False
self.recompute_timer = QTimer()
self.recompute_timer.setSingleShot(True)
self.recompute_timer.timeout.connect(self.recomputeDocument)
self.obj = obj
self.ShapeRefs = self.obj.ShapeRefs
self.doc_name = self.obj.Document.Name
self.view_object = self.obj.ViewObject
self.allow_obj_sel = allow_obj_sel
self.allow_face_sel = allow_face_sel
self.allow_solid_sel = allow_solid_sel
self.allow_point_sel = allow_point_sel
self.allow_edge_sel = allow_edge_sel
self.selection_mode_solid = not allow_face_sel and allow_solid_sel
sel_list = []
sel_rb_list = []
if allow_face_sel:
sel_list.append('faces')
sel_rb_list.append('Face')
if allow_edge_sel:
sel_list.append('edges')
sel_rb_list.append('Edge')
if allow_point_sel:
sel_list.append('vertices')
sel_rb_list.append('Vertex')
sel_rb_text = ' / '.join(sel_rb_list)
sel_msg = ''
if len(sel_list) > 0:
sel_msg = sel_list[0]
if len(sel_list) > 1:
for i in range(len(sel_list) - 2):
sel_msg += ', ' + sel_list[i + 1]
sel_msg += ' and ' + sel_list[-1]
self.form.rb_standard.setText(sel_rb_text)
self.selection_mode_std_print_message = 'Select {} by single-clicking on them'.format(sel_msg)
self.selection_mode_solid_print_message = 'Select solids by single-clicking on a face or edge which belongs to the solid'
if self.allow_obj_sel:
self.selection_mode_std_print_message += ', or entire object by double-clicking on it.'
self.selection_mode_solid_print_message += ', or entire object by double-clicking on it.'
else:
self.selection_mode_std_print_message += '.'
self.selection_mode_solid_print_message += '.'
exclusive_sel = not allow_solid_sel or not (allow_face_sel or allow_edge_sel or allow_point_sel)
self.form.labelSelection.setVisible(not exclusive_sel)
self.form.rb_standard.setVisible(not exclusive_sel)
self.form.rb_solid.setVisible(not exclusive_sel)
self.form.faceSelectPushButton.setVisible(allow_obj_sel)
self.form.rb_standard.toggled.connect(self.choose_selection_mode_standard)
self.form.rb_solid.toggled.connect(self.choose_selection_mode_solid)
self.form.listReferences.currentRowChanged.connect(self.setReferenceListSelection)
self.form.buttonAddFace.clicked.connect(self.buttonAddFaceClicked)
self.form.buttonAddFace.setCheckable(True)
self.form.buttonRemoveFace.clicked.connect(self.buttonRemoveFaceClicked)
self.form.individualFacesFrame.setVisible(not allow_obj_sel)
self.form.faceSelectPushButton.setChecked(not allow_obj_sel)
self.shapeNames = []
self.shapeLabels = []
for i in FreeCADGui.ActiveDocument.Document.Objects:
if 'Shape' in i.PropertiesList:
if not i.Shape.isNull() and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdFluidBoundary.CfdFluidBoundary))) and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdMeshRefinement.CfdMeshRefinement))) and (not (hasattr(i, 'Proxy') and isinstance(i.Proxy, CfdZone.CfdZone))):
self.shapeNames.append(i.Name)
self.shapeLabels.append(i.Label)
for (i, label) in enumerate(self.shapeLabels):
item = QtGui.QListWidgetItem(label)
if allow_obj_sel:
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Unchecked)
item.setData(QtCore.Qt.UserRole, i)
self.form.objectListWidget.addItem(item)
self.form.objectListWidget.itemSelectionChanged.connect(self.faceListShapeChosen)
self.form.objectListWidget.itemChanged.connect(self.objectListItemChanged)
self.form.faceSelectPushButton.toggled.connect(self.faceSelectPushButtonChanged)
self.form.faceListWidget.itemSelectionChanged.connect(self.faceHighlightChange)
self.form.faceListWidget.itemChanged.connect(self.faceListItemChanged)
self.form.selectAllButton.clicked.connect(self.selectAllButtonClicked)
self.form.selectNoneButton.clicked.connect(self.selectNoneButtonClicked)
self.form.objectListWidget.setToolTip('Choose solid objects from the list and optionally select one or more of the sub-components associated with the currently selected shape.')
self.form.tabWidget.currentChanged.connect(self.tabChanged)
self.form.objectListWidget.itemChanged.disconnect(self.objectListItemChanged)
if self.allow_obj_sel:
for i in range(self.form.objectListWidget.count()):
listItem = self.form.objectListWidget.item(i)
listItem.setCheckState(QtCore.Qt.Unchecked)
self.form.listReferences.clear()
items = []
remove_refs = []
for ref in self.ShapeRefs:
try:
idx = self.shapeNames.index(ref[0].Name)
except ValueError:
remove_refs.append(ref)
else:
listItem = self.form.objectListWidget.item(idx)
for rr in ref[1]:
if rr:
item_label = self.shapeLabels[idx] + ':' + rr
if self.allow_obj_sel:
if listItem.checkState() == QtCore.Qt.Unchecked:
listItem.setCheckState(QtCore.Qt.PartiallyChecked)
else:
item_label = self.shapeLabels[idx]
if self.allow_obj_sel:
listItem.setCheckState(QtCore.Qt.Checked)
items.append((item_label, (ref[0], rr)))
for ref in remove_refs:
self.ShapeRefs.remove(ref)
if remove_refs:
self.scheduleRecompute()
for listItem in items:
item = QtGui.QListWidgetItem(listItem[0])
item.setData(QtCore.Qt.UserRole, listItem[1])
self.form.listReferences.addItem(item)
self.form.listReferences.setSortingEnabled(False)
self.form.objectListWidget.itemChanged.connect(self.objectListItemChanged)
if len(self.ShapeRefs) == 0:
for sel in FreeCADGui.Selection.getSelectionEx():
if sel.HasSubObjects:
for sub in sel.SubElementNames:
print('Adding selection {}:{}'.format(sel.ObjectName, sub))
self.addSelection(sel.DocumentName, sel.ObjectName, sub)
elif self.allow_obj_sel:
print('Adding selection {}'.format(sel.ObjectName))
self.addSelection(sel.DocumentName, sel.ObjectName, None)
self.scheduleRecompute()
self.recompute_timer.start()
FreeCADGui.Selection.clearSelection()
self.form.buttonAddFace.setChecked(self.selecting_references)
if self.selection_mode_solid:
print_message = self.selection_mode_solid_print_message
else:
print_message = self.selection_mode_std_print_message
self.form.labelHelpText.setText(print_message)
</DeepExtract>
|
CfdOF
|
positive
|
def forward(self, enc_pad: th.Tensor, enc_len: Optional[th.Tensor], dec_prev: th.Tensor, ali_prev: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
"""
Args
enc_pad: N x Ti x D_enc
enc_len: N
dec_prev: N x D_dec
ali_prev: N x Ti (do not use here)
Return
ali: N x H x Ti
ctx: N x D_enc
"""
(N, T, _) = enc_pad.shape
if self.enc_part is None:
ep = self.enc_proj(enc_pad)
ep = ep.view(N, T, self.att_head, self.att_dim)
self.enc_part = ep.transpose(1, 2)
kp = self.key_proj(enc_pad)
kp = kp.transpose(1, 2)
self.key_part = kp.view(N, self.att_head, self.att_dim, T)
if enc_len is not None:
self.pad_mask = padding_mask(enc_len)[:, None]
dec_part = self.dec_proj(dec_prev)
dec_part = dec_part.view(-1, self.att_head, self.att_dim)
sum_part = th.tanh(self.key_part + dec_part[..., None])
score = self.w(sum_part.view(N, -1, T))
<DeepExtract>
if enc_len is None:
ali = tf.softmax(score, dim=-1)
else:
if self.pad_mask is None:
raise RuntimeError('Attention: pad_mask should not be None when enc_len is not None')
score = score.masked_fill(self.pad_mask, NEG_INF)
ali = tf.softmax(score, dim=-1)
</DeepExtract>
ctx = th.sum(ali[..., None] * self.enc_part, -2)
ctx = self.ctx_proj(ctx.view(N, -1))
return (ali, ctx)
|
def forward(self, enc_pad: th.Tensor, enc_len: Optional[th.Tensor], dec_prev: th.Tensor, ali_prev: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
"""
Args
enc_pad: N x Ti x D_enc
enc_len: N
dec_prev: N x D_dec
ali_prev: N x Ti (do not use here)
Return
ali: N x H x Ti
ctx: N x D_enc
"""
(N, T, _) = enc_pad.shape
if self.enc_part is None:
ep = self.enc_proj(enc_pad)
ep = ep.view(N, T, self.att_head, self.att_dim)
self.enc_part = ep.transpose(1, 2)
kp = self.key_proj(enc_pad)
kp = kp.transpose(1, 2)
self.key_part = kp.view(N, self.att_head, self.att_dim, T)
if enc_len is not None:
self.pad_mask = padding_mask(enc_len)[:, None]
dec_part = self.dec_proj(dec_prev)
dec_part = dec_part.view(-1, self.att_head, self.att_dim)
sum_part = th.tanh(self.key_part + dec_part[..., None])
score = self.w(sum_part.view(N, -1, T))
if enc_len is None:
ali = tf.softmax(score, dim=-1)
else:
if self.pad_mask is None:
raise RuntimeError('Attention: pad_mask should not be None when enc_len is not None')
score = score.masked_fill(self.pad_mask, NEG_INF)
ali = tf.softmax(score, dim=-1)
ctx = th.sum(ali[..., None] * self.enc_part, -2)
ctx = self.ctx_proj(ctx.view(N, -1))
return (ali, ctx)
|
aps
|
positive
|
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
data_format: 'NHWC' or 'NCHW'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
assert data_format == 'NHWC' or data_format == 'NCHW'
if data_format == 'NHWC':
num_in_channels = inputs.get_shape()[-1].value
elif data_format == 'NCHW':
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_size, num_in_channels, num_output_channels]
<DeepExtract>
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu('weights', kernel_shape, initializer)
if weight_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
kernel = var
</DeepExtract>
outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding, data_format=data_format)
<DeepExtract>
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype)
biases = var
</DeepExtract>
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn:
<DeepExtract>
outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1], bn_decay, data_format)
</DeepExtract>
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
def conv1d(inputs, num_output_channels, kernel_size, scope, stride=1, padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
data_format: 'NHWC' or 'NCHW'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
assert data_format == 'NHWC' or data_format == 'NCHW'
if data_format == 'NHWC':
num_in_channels = inputs.get_shape()[-1].value
elif data_format == 'NCHW':
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_size, num_in_channels, num_output_channels]
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu('weights', kernel_shape, initializer)
if weight_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
kernel = var
outputs = tf.nn.conv1d(inputs, kernel, stride=stride, padding=padding, data_format=data_format)
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable('biases', [num_output_channels], initializer=tf.constant_initializer(0.0), dtype=dtype)
biases = var
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn:
outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1], bn_decay, data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
aiimooc_lesson
|
positive
|
def __getitem__(self, index):
file_name = self.filenames[index]
whole_image_path = os.path.join(self.split_dir, file_name)
whole_image = Image.open(whole_image_path)
image_array = np.asarray(whole_image)
label_array = image_array[:, :256, :]
pixel_array = image_array[:, 256:, :]
label_img = Image.fromarray(label_array)
pix_img = Image.fromarray(pixel_array)
<DeepExtract>
params = {}
params['flip'] = np.random.rand(1)[0] > 0.5
if not self.config.NO_CROP:
(diff_length, diff_width) = (self.config.LOAD_SIZE - self.config.IMSIZE, self.config.IM_RATIO * self.config.LOAD_SIZE - self.config.IM_RATIO * self.config.IMSIZE)
pos_x = np.random.randint(0, diff_width)
pos_y = np.random.randint(0, diff_length)
params['crop_pos'] = (pos_x, pos_y)
params['load_size'] = self.config.LOAD_SIZE
params['img_size'] = self.config.IMSIZE
trans_params = params
</DeepExtract>
<DeepExtract>
if cfg.NO_FLIP:
trans_params['flip'] = False
if self.split == 'val':
trans_list = [transforms.Scale([trans_params['img_size'], cfg.IM_RATIO * trans_params['img_size']], Image.BILINEAR), transforms.ToTensor()]
elif cfg.NO_CROP:
trans_list = [transforms.Scale([trans_params['img_size'], cfg.IM_RATIO * trans_params['img_size']], Image.BILINEAR), transforms.Lambda(lambda img: __flip(img, trans_params['flip'])), transforms.ToTensor()]
else:
trans_list = [transforms.Scale([trans_params['load_size'], cfg.IM_RATIO * trans_params['load_size']], Image.BILINEAR), transforms.Lambda(lambda img: __flip(img, trans_params['flip'])), transforms.Lambda(lambda img: __crop(img, trans_params['crop_pos'], trans_params['img_size'])), transforms.ToTensor()]
if True:
trans_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
trans = transforms.Compose(trans_list)
img_transform = trans
</DeepExtract>
img_tensor = img_transform(pix_img)
label_map_tensor = img_transform(label_img)
return (img_tensor, label_map_tensor, torch.FloatTensor([0]), file_name)
|
def __getitem__(self, index):
file_name = self.filenames[index]
whole_image_path = os.path.join(self.split_dir, file_name)
whole_image = Image.open(whole_image_path)
image_array = np.asarray(whole_image)
label_array = image_array[:, :256, :]
pixel_array = image_array[:, 256:, :]
label_img = Image.fromarray(label_array)
pix_img = Image.fromarray(pixel_array)
params = {}
params['flip'] = np.random.rand(1)[0] > 0.5
if not self.config.NO_CROP:
(diff_length, diff_width) = (self.config.LOAD_SIZE - self.config.IMSIZE, self.config.IM_RATIO * self.config.LOAD_SIZE - self.config.IM_RATIO * self.config.IMSIZE)
pos_x = np.random.randint(0, diff_width)
pos_y = np.random.randint(0, diff_length)
params['crop_pos'] = (pos_x, pos_y)
params['load_size'] = self.config.LOAD_SIZE
params['img_size'] = self.config.IMSIZE
trans_params = params
if cfg.NO_FLIP:
trans_params['flip'] = False
if self.split == 'val':
trans_list = [transforms.Scale([trans_params['img_size'], cfg.IM_RATIO * trans_params['img_size']], Image.BILINEAR), transforms.ToTensor()]
elif cfg.NO_CROP:
trans_list = [transforms.Scale([trans_params['img_size'], cfg.IM_RATIO * trans_params['img_size']], Image.BILINEAR), transforms.Lambda(lambda img: __flip(img, trans_params['flip'])), transforms.ToTensor()]
else:
trans_list = [transforms.Scale([trans_params['load_size'], cfg.IM_RATIO * trans_params['load_size']], Image.BILINEAR), transforms.Lambda(lambda img: __flip(img, trans_params['flip'])), transforms.Lambda(lambda img: __crop(img, trans_params['crop_pos'], trans_params['img_size'])), transforms.ToTensor()]
if True:
trans_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
trans = transforms.Compose(trans_list)
img_transform = trans
img_tensor = img_transform(pix_img)
label_map_tensor = img_transform(label_img)
return (img_tensor, label_map_tensor, torch.FloatTensor([0]), file_name)
|
DSGAN
|
positive
|
def test_forward_and_gradient(self):
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([0, 1], dtype=np.int32)
<DeepExtract>
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
</DeepExtract>
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([1, 0], dtype=np.int32)
<DeepExtract>
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
</DeepExtract>
A = np.random.randn(10, 3, 5, 7).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
<DeepExtract>
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
</DeepExtract>
|
def test_forward_and_gradient(self):
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([0, 1], dtype=np.int32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
A = np.random.randn(2, 3, 5, 7).astype(np.float32)
I = np.array([1, 0], dtype=np.int32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
A = np.random.randn(10, 3, 5, 7).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', A)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if True:
gc = gradient_checker.GradientChecker(stepsize=0.1, threshold=0.001, device_option=core.DeviceOption(caffe2_pb2.CUDA, 0))
(res, grad, grad_estimated) = gc.CheckSimple(op, [A, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = A[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-05, atol=1e-08)
</DeepExtract>
|
Clustered-Object-Detection-in-Aerial-Image
|
positive
|
def getPage(self, pageNumber):
"""
Retrieves a page by number from this PDF file.
:param int pageNumber: The page number to retrieve
(pages begin at zero)
:return: a :class:`PageObject<pdf.PageObject>` instance.
:rtype: :class:`PageObject<pdf.PageObject>`
"""
if self.flattenedPages == None:
<DeepExtract>
inheritablePageAttributes = (NameObject('/Resources'), NameObject('/MediaBox'), NameObject('/CropBox'), NameObject('/Rotate'))
if inherit == None:
inherit = dict()
if pages == None:
self.flattenedPages = []
catalog = self.trailer['/Root'].getObject()
pages = catalog['/Pages'].getObject()
t = '/Pages'
if '/Type' in pages:
t = pages['/Type']
if t == '/Pages':
for attr in inheritablePageAttributes:
if attr in pages:
inherit[attr] = pages[attr]
for page in pages['/Kids']:
addt = {}
if isinstance(page, IndirectObject):
addt['indirectRef'] = page
self._flatten(page.getObject(), inherit, **addt)
elif t == '/Page':
for (attr, value) in list(inherit.items()):
if attr not in pages:
pages[attr] = value
pageObj = PageObject(self, indirectRef)
pageObj.update(pages)
self.flattenedPages.append(pageObj)
</DeepExtract>
return self.flattenedPages[pageNumber]
|
def getPage(self, pageNumber):
"""
Retrieves a page by number from this PDF file.
:param int pageNumber: The page number to retrieve
(pages begin at zero)
:return: a :class:`PageObject<pdf.PageObject>` instance.
:rtype: :class:`PageObject<pdf.PageObject>`
"""
if self.flattenedPages == None:
inheritablePageAttributes = (NameObject('/Resources'), NameObject('/MediaBox'), NameObject('/CropBox'), NameObject('/Rotate'))
if inherit == None:
inherit = dict()
if pages == None:
self.flattenedPages = []
catalog = self.trailer['/Root'].getObject()
pages = catalog['/Pages'].getObject()
t = '/Pages'
if '/Type' in pages:
t = pages['/Type']
if t == '/Pages':
for attr in inheritablePageAttributes:
if attr in pages:
inherit[attr] = pages[attr]
for page in pages['/Kids']:
addt = {}
if isinstance(page, IndirectObject):
addt['indirectRef'] = page
self._flatten(page.getObject(), inherit, **addt)
elif t == '/Page':
for (attr, value) in list(inherit.items()):
if attr not in pages:
pages[attr] = value
pageObj = PageObject(self, indirectRef)
pageObj.update(pages)
self.flattenedPages.append(pageObj)
return self.flattenedPages[pageNumber]
|
endesive
|
positive
|
def step(self, actions):
self.terminal = self.day >= 685
if self.terminal:
plt.plot(self.asset_memory, 'r')
plt.plot(account_growth)
plt.savefig('result_test.png')
plt.close()
print('total_reward:{}'.format(self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:])) - 10000))
return (self.state, self.reward, self.terminal, {})
else:
begin_total_asset = self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:]))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
<DeepExtract>
if self.state[index + 29] > 0:
self.state[0] += self.state[index + 1] * min(abs(actions[index]), self.state[index + 29])
self.state[index + 29] -= min(abs(actions[index]), self.state[index + 29])
else:
pass
</DeepExtract>
for index in buy_index:
<DeepExtract>
available_amount = self.state[0] // self.state[index + 1]
self.state[0] -= self.state[index + 1] * min(available_amount, actions[index])
self.state[index + 29] += min(available_amount, actions[index])
</DeepExtract>
self.day += 1
self.data = test_daily_data[self.day]
self.state = [self.state[0]] + self.data.adjcp.values.tolist() + list(self.state[29:])
end_total_asset = self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:]))
self.reward = end_total_asset - begin_total_asset
self.asset_memory.append(end_total_asset)
return (self.state, self.reward, self.terminal, {})
|
def step(self, actions):
self.terminal = self.day >= 685
if self.terminal:
plt.plot(self.asset_memory, 'r')
plt.plot(account_growth)
plt.savefig('result_test.png')
plt.close()
print('total_reward:{}'.format(self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:])) - 10000))
return (self.state, self.reward, self.terminal, {})
else:
begin_total_asset = self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:]))
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
if self.state[index + 29] > 0:
self.state[0] += self.state[index + 1] * min(abs(actions[index]), self.state[index + 29])
self.state[index + 29] -= min(abs(actions[index]), self.state[index + 29])
else:
pass
for index in buy_index:
available_amount = self.state[0] // self.state[index + 1]
self.state[0] -= self.state[index + 1] * min(available_amount, actions[index])
self.state[index + 29] += min(available_amount, actions[index])
self.day += 1
self.data = test_daily_data[self.day]
self.state = [self.state[0]] + self.data.adjcp.values.tolist() + list(self.state[29:])
end_total_asset = self.state[0] + sum(np.array(self.state[1:29]) * np.array(self.state[29:]))
self.reward = end_total_asset - begin_total_asset
self.asset_memory.append(end_total_asset)
return (self.state, self.reward, self.terminal, {})
|
DQN-DDPG_Stock_Trading
|
positive
|
def insertBlankPage(self, width=None, height=None, index=0):
"""
Inserts a blank page to this PDF file and returns it. If no page size
is specified, use the size of the last page.
:param float width: The width of the new page expressed in default user
space units.
:param float height: The height of the new page expressed in default
user space units.
:param int index: Position to add the page.
:return: the newly appended page
:rtype: :class:`PageObject<PyPDF2.pdf.PageObject>`
:raises PageSizeNotDefinedError: if width and height are not defined
and previous page does not exist.
"""
if width is None or (height is None and self.getNumPages() - 1 >= index):
<DeepExtract>
pages = self.getObject(self._pages)
oldpage = pages['/Kids'][index].getObject()
</DeepExtract>
width = oldpage.mediaBox.getWidth()
height = oldpage.mediaBox.getHeight()
page = PageObject.createBlankPage(self, width, height)
<DeepExtract>
self._addPage(page, lambda l, p: l.insert(index, p))
</DeepExtract>
return page
|
def insertBlankPage(self, width=None, height=None, index=0):
"""
Inserts a blank page to this PDF file and returns it. If no page size
is specified, use the size of the last page.
:param float width: The width of the new page expressed in default user
space units.
:param float height: The height of the new page expressed in default
user space units.
:param int index: Position to add the page.
:return: the newly appended page
:rtype: :class:`PageObject<PyPDF2.pdf.PageObject>`
:raises PageSizeNotDefinedError: if width and height are not defined
and previous page does not exist.
"""
if width is None or (height is None and self.getNumPages() - 1 >= index):
pages = self.getObject(self._pages)
oldpage = pages['/Kids'][index].getObject()
width = oldpage.mediaBox.getWidth()
height = oldpage.mediaBox.getHeight()
page = PageObject.createBlankPage(self, width, height)
self._addPage(page, lambda l, p: l.insert(index, p))
return page
|
endesive
|
positive
|
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
request.session['LIST_QUERY'] = (self.model_info, self.request.META['QUERY_STRING'])
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
<DeepExtract>
self.base_list_display = COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != '' and self.request.GET[COL_LIST_VAR].split('.') or self.list_display
self.list_display = list(self.base_list_display)
</DeepExtract>
<DeepExtract>
if self.list_display_links or not self.list_display:
self.list_display_links = self.list_display_links
else:
self.list_display_links = list(self.list_display)[:1]
</DeepExtract>
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
|
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
request.session['LIST_QUERY'] = (self.model_info, self.request.META['QUERY_STRING'])
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
self.base_list_display = COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR] != '' and self.request.GET[COL_LIST_VAR].split('.') or self.list_display
self.list_display = list(self.base_list_display)
if self.list_display_links or not self.list_display:
self.list_display_links = self.list_display_links
else:
self.list_display_links = list(self.list_display)[:1]
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
|
devops
|
positive
|
def _swap_fds(self, in_variant, out_variant, fds=None):
"""Swap Unix file descriptors in the input."""
<DeepExtract>
(in_variant, fd_list) = acquire_fds(in_variant)
if fds is None:
self.assertIsNone(fd_list)
else:
self.assertIsInstance(fd_list, Gio.UnixFDList)
self.assertEqual(fd_list.peek_fds(), fds)
if out_variant is None:
self.assertIsNone(in_variant)
else:
self.assertIsInstance(in_variant, Variant)
self.assertEqual(in_variant.unpack(), out_variant.unpack())
self.assertTrue(in_variant.equal(out_variant))
(in_variant, fd_list) = (in_variant, fd_list)
</DeepExtract>
<DeepExtract>
variant = restore_fds(variant, fd_list)
if in_variant is None:
self.assertIsNone(variant)
else:
self.assertIsInstance(variant, Variant)
self.assertEqual(variant.unpack(), in_variant.unpack())
self.assertTrue(variant.equal(in_variant))
</DeepExtract>
|
def _swap_fds(self, in_variant, out_variant, fds=None):
"""Swap Unix file descriptors in the input."""
(in_variant, fd_list) = acquire_fds(in_variant)
if fds is None:
self.assertIsNone(fd_list)
else:
self.assertIsInstance(fd_list, Gio.UnixFDList)
self.assertEqual(fd_list.peek_fds(), fds)
if out_variant is None:
self.assertIsNone(in_variant)
else:
self.assertIsInstance(in_variant, Variant)
self.assertEqual(in_variant.unpack(), out_variant.unpack())
self.assertTrue(in_variant.equal(out_variant))
(in_variant, fd_list) = (in_variant, fd_list)
variant = restore_fds(variant, fd_list)
if in_variant is None:
self.assertIsNone(variant)
else:
self.assertIsInstance(variant, Variant)
self.assertEqual(variant.unpack(), in_variant.unpack())
self.assertTrue(variant.equal(in_variant))
</DeepExtract>
|
dasbus
|
positive
|
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
<DeepExtract>
rois = im_rois.astype(np.float, copy=False) * im_scale
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
(rois, levels) = (rois, levels)
</DeepExtract>
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
|
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois = im_rois.astype(np.float, copy=False) * im_scale
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
(rois, levels) = (rois, levels)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
|
Detectron-DA-Faster-RCNN
|
positive
|
def compute_n(self, book, series_ref):
"""
Computes a score for the given SeriesRef, which describes how closely
that ref matches the given ComicBook. The higher the score, the closer
the match. Scores can be negative.
"""
def split(name_s):
if name_s is None:
name_s = ''
name_s = re.sub("'", '', name_s).lower()
name_s = re.sub('\\W+', ' ', name_s)
name_s = re.sub('giant[- ]*sized?', 'giant size', name_s)
name_s = re.sub('king[- ]*sized?', 'king size', name_s)
name_s = re.sub('one[- ]*shot', 'one shot', name_s)
return name_s.split()
bookname_s = '' if not book.series_s else book.series_s
if bookname_s and book.format_s:
bookname_s += ' ' + book.format_s
<DeepExtract>
if bookname_s is None:
bookname_s = ''
bookname_s = re.sub("'", '', bookname_s).lower()
bookname_s = re.sub('\\W+', ' ', bookname_s)
bookname_s = re.sub('giant[- ]*sized?', 'giant size', bookname_s)
bookname_s = re.sub('king[- ]*sized?', 'king size', bookname_s)
bookname_s = re.sub('one[- ]*shot', 'one shot', bookname_s)
bookwords = bookname_s.split()
</DeepExtract>
<DeepExtract>
if series_ref.series_name_s is None:
series_ref.series_name_s = ''
series_ref.series_name_s = re.sub("'", '', series_ref.series_name_s).lower()
series_ref.series_name_s = re.sub('\\W+', ' ', series_ref.series_name_s)
series_ref.series_name_s = re.sub('giant[- ]*sized?', 'giant size', series_ref.series_name_s)
series_ref.series_name_s = re.sub('king[- ]*sized?', 'king size', series_ref.series_name_s)
series_ref.series_name_s = re.sub('one[- ]*shot', 'one shot', series_ref.series_name_s)
serieswords = series_ref.series_name_s.split()
</DeepExtract>
namescore_n = 0
for word in bookwords:
if word in serieswords:
namescore_n += 5
serieswords.remove(word)
else:
namescore_n -= 1
namescore_n -= len(serieswords)
priorscore_n = 7 if sstr(series_ref.series_key) in self.__prior_series_sl else 0
pub_s = series_ref.publisher_s.lower()
publisherscore_n = -6 if 'panini' in pub_s or 'deagostina' in pub_s or pub_s == 'marvel italia' or (pub_s == 'marvel uk') or (pub_s == 'semic_as') or (pub_s == 'abril') else 0
booknumber_n = book.issue_num_s if book.issue_num_s else '-1000'
booknumber_n = re.sub('[^\\d.-]+', '', booknumber_n)
try:
booknumber_n = float(booknumber_n)
except:
booknumber_n = -999
series_count_n = series_ref.issue_count_n
if series_count_n > 100:
bookscore_n = 100
else:
bookscore_n = 100 if booknumber_n - 1 <= series_count_n else -100
current_year_n = datetime.datetime.now().year
is_valid_year_b = lambda y: y > 1900 and y <= current_year_n + 1
series_year_n = series_ref.volume_year_n
book_year_n = book.pub_year_n if is_valid_year_b(book.pub_year_n) else book.rel_year_n
yearscore_n = 0
if is_valid_year_b(book_year_n):
if not is_valid_year_b(series_year_n):
yearscore_n = -100
elif series_year_n > book_year_n:
yearscore_n = -500
if is_valid_year_b(series_year_n):
recency_score_n = -(current_year_n - series_year_n) / 100.0
else:
recency_score_n = -1.0
return bookscore_n + namescore_n + publisherscore_n + priorscore_n + yearscore_n + recency_score_n
|
def compute_n(self, book, series_ref):
"""
Computes a score for the given SeriesRef, which describes how closely
that ref matches the given ComicBook. The higher the score, the closer
the match. Scores can be negative.
"""
def split(name_s):
if name_s is None:
name_s = ''
name_s = re.sub("'", '', name_s).lower()
name_s = re.sub('\\W+', ' ', name_s)
name_s = re.sub('giant[- ]*sized?', 'giant size', name_s)
name_s = re.sub('king[- ]*sized?', 'king size', name_s)
name_s = re.sub('one[- ]*shot', 'one shot', name_s)
return name_s.split()
bookname_s = '' if not book.series_s else book.series_s
if bookname_s and book.format_s:
bookname_s += ' ' + book.format_s
if bookname_s is None:
bookname_s = ''
bookname_s = re.sub("'", '', bookname_s).lower()
bookname_s = re.sub('\\W+', ' ', bookname_s)
bookname_s = re.sub('giant[- ]*sized?', 'giant size', bookname_s)
bookname_s = re.sub('king[- ]*sized?', 'king size', bookname_s)
bookname_s = re.sub('one[- ]*shot', 'one shot', bookname_s)
bookwords = bookname_s.split()
if series_ref.series_name_s is None:
series_ref.series_name_s = ''
series_ref.series_name_s = re.sub("'", '', series_ref.series_name_s).lower()
series_ref.series_name_s = re.sub('\\W+', ' ', series_ref.series_name_s)
series_ref.series_name_s = re.sub('giant[- ]*sized?', 'giant size', series_ref.series_name_s)
series_ref.series_name_s = re.sub('king[- ]*sized?', 'king size', series_ref.series_name_s)
series_ref.series_name_s = re.sub('one[- ]*shot', 'one shot', series_ref.series_name_s)
serieswords = series_ref.series_name_s.split()
namescore_n = 0
for word in bookwords:
if word in serieswords:
namescore_n += 5
serieswords.remove(word)
else:
namescore_n -= 1
namescore_n -= len(serieswords)
priorscore_n = 7 if sstr(series_ref.series_key) in self.__prior_series_sl else 0
pub_s = series_ref.publisher_s.lower()
publisherscore_n = -6 if 'panini' in pub_s or 'deagostina' in pub_s or pub_s == 'marvel italia' or (pub_s == 'marvel uk') or (pub_s == 'semic_as') or (pub_s == 'abril') else 0
booknumber_n = book.issue_num_s if book.issue_num_s else '-1000'
booknumber_n = re.sub('[^\\d.-]+', '', booknumber_n)
try:
booknumber_n = float(booknumber_n)
except:
booknumber_n = -999
series_count_n = series_ref.issue_count_n
if series_count_n > 100:
bookscore_n = 100
else:
bookscore_n = 100 if booknumber_n - 1 <= series_count_n else -100
current_year_n = datetime.datetime.now().year
is_valid_year_b = lambda y: y > 1900 and y <= current_year_n + 1
series_year_n = series_ref.volume_year_n
book_year_n = book.pub_year_n if is_valid_year_b(book.pub_year_n) else book.rel_year_n
yearscore_n = 0
if is_valid_year_b(book_year_n):
if not is_valid_year_b(series_year_n):
yearscore_n = -100
elif series_year_n > book_year_n:
yearscore_n = -500
if is_valid_year_b(series_year_n):
recency_score_n = -(current_year_n - series_year_n) / 100.0
else:
recency_score_n = -1.0
return bookscore_n + namescore_n + publisherscore_n + priorscore_n + yearscore_n + recency_score_n
|
comic-vine-scraper
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = (d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)
sc0 = (d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)
mul_res1 = 2147483647 & sc1 | (d_a >> 16) * (d_b & 65535) << n.value & (sc1 ^ 65535)
mul_res0 = 2147483647 & sc0 | (d_a & 65535) * (d_b & 65535) << n.value & (sc0 ^ 65535)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0 = ssov32(e_d_0 - mul_res0, max_pos, max_neg)
result_w1 = ssov32(e_d_1 - mul_res1, max_pos, max_neg)
self.put(result_w0, 'd{0}'.format(self.data['c']))
self.put(result_w1, 'd{0}'.format(self.data['c'] + 1))
c = 0
ov_w0 = overflow(result_w0)
ov_w1 = overflow(result_w1)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0)
aov_w1 = advanced_overflow(result_w1)
av = aov_w1 | aov_w0
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = (d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)
sc0 = (d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32)
mul_res1 = 2147483647 & sc1 | (d_a >> 16) * (d_b & 65535) << n.value & (sc1 ^ 65535)
mul_res0 = 2147483647 & sc0 | (d_a & 65535) * (d_b & 65535) << n.value & (sc0 ^ 65535)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0 = ssov32(e_d_0 - mul_res0, max_pos, max_neg)
result_w1 = ssov32(e_d_1 - mul_res1, max_pos, max_neg)
self.put(result_w0, 'd{0}'.format(self.data['c']))
self.put(result_w1, 'd{0}'.format(self.data['c'] + 1))
c = 0
ov_w0 = overflow(result_w0)
ov_w1 = overflow(result_w1)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0)
aov_w1 = advanced_overflow(result_w1)
av = aov_w1 | aov_w0
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
angr-platforms
|
positive
|
@parameterized.named_parameters(jtu.cases_from_list(({'testcase_name': rec.test_name.capitalize(), 'name': rec.name, 'np_op': getattr(np, rec.name), 'bm_op': getattr(bm, rec.name)} for rec in JAX_ARGMINMAX_RECORDS)))
def testArgMinMaxEmpty(self, name, np_op, bm_op):
name = name[3:] if name.startswith('nan') else name
msg = 'attempt to get {} of an empty sequence'.format(name)
with self.assertRaises(ValueError, msg=msg):
<DeepExtract>
return bm.geomspace(np.array([]), stop, num, endpoint=endpoint, dtype=dtype, axis=axis)
</DeepExtract>
with self.assertRaises(ValueError, msg=msg):
<DeepExtract>
return bm.geomspace(np.zeros((2, 0)), stop, num, endpoint=endpoint, dtype=dtype, axis=1)
</DeepExtract>
np_fun = jtu.with_jax_dtype_defaults(partial(np_op, axis=0))
bm_fun = partial(bm_op, axis=0)
args_maker = lambda : [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, bm_func(bm_fun), args_maker)
self._CompileAndCheck(bm_func(bm_fun), args_maker)
|
@parameterized.named_parameters(jtu.cases_from_list(({'testcase_name': rec.test_name.capitalize(), 'name': rec.name, 'np_op': getattr(np, rec.name), 'bm_op': getattr(bm, rec.name)} for rec in JAX_ARGMINMAX_RECORDS)))
def testArgMinMaxEmpty(self, name, np_op, bm_op):
name = name[3:] if name.startswith('nan') else name
msg = 'attempt to get {} of an empty sequence'.format(name)
with self.assertRaises(ValueError, msg=msg):
return bm.geomspace(np.array([]), stop, num, endpoint=endpoint, dtype=dtype, axis=axis)
with self.assertRaises(ValueError, msg=msg):
return bm.geomspace(np.zeros((2, 0)), stop, num, endpoint=endpoint, dtype=dtype, axis=1)
np_fun = jtu.with_jax_dtype_defaults(partial(np_op, axis=0))
bm_fun = partial(bm_op, axis=0)
args_maker = lambda : [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, bm_func(bm_fun), args_maker)
self._CompileAndCheck(bm_func(bm_fun), args_maker)
|
BrainPy
|
positive
|
def load_dataset(self):
<DeepExtract>
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', self._image_set + '.txt')
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
self._image_indexes = image_index
</DeepExtract>
self._image_names = [self.image_path_from_index(index) for index in self.image_indexes]
<DeepExtract>
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
self._annotations = roidb
gt_roidb = [self._annotation_from_index(index) for index in self.image_indexes]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
self._annotations = gt_roidb
</DeepExtract>
|
def load_dataset(self):
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', self._image_set + '.txt')
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
self._image_indexes = image_index
self._image_names = [self.image_path_from_index(index) for index in self.image_indexes]
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
self._annotations = roidb
gt_roidb = [self._annotation_from_index(index) for index in self.image_indexes]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
self._annotations = gt_roidb
</DeepExtract>
|
Detection-PyTorch-Notebook
|
positive
|
def getAllUsersByRole(roleId):
payload = {}
params = {}
params['pgSize'] = 250
params['pgNum'] = 1
params['active'] = True
params['roleId'] = roleId
allUsers = []
<DeepExtract>
address = serverAddress + '/api/user'
if 'get' == 'get':
r = requests.get(address, auth=(credsUsername, userPassword), params=params, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
</DeepExtract>
firstContent = response.text
firstData = json.loads(firstContent)['data']
totalUsers = firstData['totalCount']
pagesNeeded = totalUsers // params['pgSize'] + (totalUsers % params['pgSize'] > 0)
while params['pgNum'] <= pagesNeeded:
<DeepExtract>
address = serverAddress + '/api/user'
if 'get' == 'get':
r = requests.get(address, auth=(credsUsername, userPassword), params=params, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
</DeepExtract>
content = response.text
data = json.loads(content)['data']
allUsers.extend(data['users'])
params['pgNum'] += 1
return allUsers
|
def getAllUsersByRole(roleId):
payload = {}
params = {}
params['pgSize'] = 250
params['pgNum'] = 1
params['active'] = True
params['roleId'] = roleId
allUsers = []
address = serverAddress + '/api/user'
if 'get' == 'get':
r = requests.get(address, auth=(credsUsername, userPassword), params=params, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
firstContent = response.text
firstData = json.loads(firstContent)['data']
totalUsers = firstData['totalCount']
pagesNeeded = totalUsers // params['pgSize'] + (totalUsers % params['pgSize'] > 0)
while params['pgNum'] <= pagesNeeded:
address = serverAddress + '/api/user'
if 'get' == 'get':
r = requests.get(address, auth=(credsUsername, userPassword), params=params, verify=False)
elif 'get' == 'post' and execute:
r = requests.post(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'put' and execute:
r = requests.put(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif 'get' == 'delete' and execute:
r = requests.delete(address, auth=(credsUsername, userPassword), data={}, params=params, verify=False)
elif not execute:
r = False
else:
print('ERROR: Invalid Request type. Try again')
r = False
response = r
content = response.text
data = json.loads(content)['data']
allUsers.extend(data['users'])
params['pgNum'] += 1
return allUsers
|
crashplan_api_examples
|
positive
|
def get_quarter_start(self, year: int, quarter: int, fy_start_month: int=None) -> date:
"""
The fiscal year quarter starting date of the EntityModel, according to its settings.
Parameters
----------
year: int
The fiscal year associated with the requested start date.
quarter: int
The quarter number associated with the requested start date.
fy_start_month: int
Optional fiscal year month start. If passed, it will override the EntityModel setting.
Returns
-------
date
The date when the requested EntityModel quarter starts.
"""
if fy_start_month:
<DeepExtract>
if fy_start_month not in self.VALID_MONTHS:
raise ValidationError(f'Specified month is not valid: {fy_start_month}')
</DeepExtract>
fy_start_month = self.get_fy_start_month() if not fy_start_month else fy_start_month
<DeepExtract>
if quarter not in self.VALID_QUARTERS:
raise ValidationError(f'Specified quarter is not valid: {quarter}')
</DeepExtract>
quarter_month_start = (quarter - 1) * 3 + fy_start_month
year_start = year
if quarter_month_start > 12:
quarter_month_start -= 12
year_start = year + 1
return date(year_start, quarter_month_start, 1)
|
def get_quarter_start(self, year: int, quarter: int, fy_start_month: int=None) -> date:
"""
The fiscal year quarter starting date of the EntityModel, according to its settings.
Parameters
----------
year: int
The fiscal year associated with the requested start date.
quarter: int
The quarter number associated with the requested start date.
fy_start_month: int
Optional fiscal year month start. If passed, it will override the EntityModel setting.
Returns
-------
date
The date when the requested EntityModel quarter starts.
"""
if fy_start_month:
if fy_start_month not in self.VALID_MONTHS:
raise ValidationError(f'Specified month is not valid: {fy_start_month}')
fy_start_month = self.get_fy_start_month() if not fy_start_month else fy_start_month
if quarter not in self.VALID_QUARTERS:
raise ValidationError(f'Specified quarter is not valid: {quarter}')
quarter_month_start = (quarter - 1) * 3 + fy_start_month
year_start = year
if quarter_month_start > 12:
quarter_month_start -= 12
year_start = year + 1
return date(year_start, quarter_month_start, 1)
|
django-ledger
|
positive
|
def test_create(self):
<DeepExtract>
namespace = self.namespace if namespace is None else namespace
d_kwargs = {'app_type': kwargs.get('app_type', 'web'), 'version': kwargs.get('version', 'v99'), 'replicas': kwargs.get('replicas', 1), 'pod_termination_grace_period_seconds': 2, 'image': 'quay.io/fake/image', 'entrypoint': 'sh', 'command': 'start'}
deployment = self.scheduler.deployment.create(namespace, name, **d_kwargs)
self.assertEqual(deployment.status_code, 201, deployment.json())
data = {'min': kwargs.get('min', 2), 'max': kwargs.get('max', 4), 'cpu_percent': 45, 'wait': True}
horizontalpodautoscaler = self.scheduler.hpa.create(namespace, name, kwargs.get('app_type'), deployment.json(), **data)
self.assertEqual(horizontalpodautoscaler.status_code, 201, horizontalpodautoscaler.json())
name = name
</DeepExtract>
deployment = self.scheduler.deployment.get(self.namespace, name).json()
self.assertEqual(deployment['spec']['replicas'], 2, deployment)
labels = {'app': self.namespace, 'type': 'web', 'version': 'v99'}
pods = self.scheduler.pod.get(self.namespace, labels=labels).json()
self.assertEqual(len(pods['items']), 2)
|
def test_create(self):
namespace = self.namespace if namespace is None else namespace
d_kwargs = {'app_type': kwargs.get('app_type', 'web'), 'version': kwargs.get('version', 'v99'), 'replicas': kwargs.get('replicas', 1), 'pod_termination_grace_period_seconds': 2, 'image': 'quay.io/fake/image', 'entrypoint': 'sh', 'command': 'start'}
deployment = self.scheduler.deployment.create(namespace, name, **d_kwargs)
self.assertEqual(deployment.status_code, 201, deployment.json())
data = {'min': kwargs.get('min', 2), 'max': kwargs.get('max', 4), 'cpu_percent': 45, 'wait': True}
horizontalpodautoscaler = self.scheduler.hpa.create(namespace, name, kwargs.get('app_type'), deployment.json(), **data)
self.assertEqual(horizontalpodautoscaler.status_code, 201, horizontalpodautoscaler.json())
name = name
deployment = self.scheduler.deployment.get(self.namespace, name).json()
self.assertEqual(deployment['spec']['replicas'], 2, deployment)
labels = {'app': self.namespace, 'type': 'web', 'version': 'v99'}
pods = self.scheduler.pod.get(self.namespace, labels=labels).json()
self.assertEqual(len(pods['items']), 2)
|
controller
|
positive
|
def testMockCallback(self):
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('MockCallback<bool(int)>'))
</DeepExtract>
<DeepExtract>
self.assertEquals('', self.PerformSingleLineLint('MockCallback<int(float, char)>'))
</DeepExtract>
|
def testMockCallback(self):
self.assertEquals('', self.PerformSingleLineLint('MockCallback<bool(int)>'))
self.assertEquals('', self.PerformSingleLineLint('MockCallback<int(float, char)>'))
</DeepExtract>
|
cpplint
|
positive
|
def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'):
"""Get squares according to feature map sizes and guided
anchors.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
shape_preds (list[tensor]): Multi-level shape predictions.
loc_preds (list[tensor]): Multi-level location predictions.
img_metas (list[dict]): Image meta info.
use_loc_filter (bool): Use loc filter or not.
device (torch.device | str): device for returned tensors
Returns:
tuple: square approxs of each image, guided anchors of each image,
loc masks of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
multi_level_squares = []
for i in range(num_levels):
squares = self.square_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_squares.append(squares)
squares_list = [multi_level_squares for _ in range(num_imgs)]
guided_anchors_list = []
loc_mask_list = []
for (img_id, img_meta) in enumerate(img_metas):
multi_level_guided_anchors = []
multi_level_loc_mask = []
for i in range(num_levels):
squares = squares_list[img_id][i]
shape_pred = shape_preds[i][img_id]
loc_pred = loc_preds[i][img_id]
<DeepExtract>
loc_pred = loc_pred.sigmoid().detach()
if use_loc_filter:
loc_mask = loc_pred >= self.loc_filter_thr
else:
loc_mask = loc_pred >= 0.0
mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
mask = mask.contiguous().view(-1)
squares = squares[mask]
anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(-1, 2).detach()[mask]
bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
bbox_deltas[:, 2:] = anchor_deltas
guided_anchors = delta2bbox(squares, bbox_deltas, self.anchoring_means, self.anchoring_stds, wh_ratio_clip=1e-06)
(guided_anchors, loc_mask) = (guided_anchors, mask)
</DeepExtract>
multi_level_guided_anchors.append(guided_anchors)
multi_level_loc_mask.append(loc_mask)
guided_anchors_list.append(multi_level_guided_anchors)
loc_mask_list.append(multi_level_loc_mask)
return (squares_list, guided_anchors_list, loc_mask_list)
|
def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'):
"""Get squares according to feature map sizes and guided
anchors.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
shape_preds (list[tensor]): Multi-level shape predictions.
loc_preds (list[tensor]): Multi-level location predictions.
img_metas (list[dict]): Image meta info.
use_loc_filter (bool): Use loc filter or not.
device (torch.device | str): device for returned tensors
Returns:
tuple: square approxs of each image, guided anchors of each image,
loc masks of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
multi_level_squares = []
for i in range(num_levels):
squares = self.square_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_squares.append(squares)
squares_list = [multi_level_squares for _ in range(num_imgs)]
guided_anchors_list = []
loc_mask_list = []
for (img_id, img_meta) in enumerate(img_metas):
multi_level_guided_anchors = []
multi_level_loc_mask = []
for i in range(num_levels):
squares = squares_list[img_id][i]
shape_pred = shape_preds[i][img_id]
loc_pred = loc_preds[i][img_id]
loc_pred = loc_pred.sigmoid().detach()
if use_loc_filter:
loc_mask = loc_pred >= self.loc_filter_thr
else:
loc_mask = loc_pred >= 0.0
mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
mask = mask.contiguous().view(-1)
squares = squares[mask]
anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(-1, 2).detach()[mask]
bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
bbox_deltas[:, 2:] = anchor_deltas
guided_anchors = delta2bbox(squares, bbox_deltas, self.anchoring_means, self.anchoring_stds, wh_ratio_clip=1e-06)
(guided_anchors, loc_mask) = (guided_anchors, mask)
multi_level_guided_anchors.append(guided_anchors)
multi_level_loc_mask.append(loc_mask)
guided_anchors_list.append(multi_level_guided_anchors)
loc_mask_list.append(multi_level_loc_mask)
return (squares_list, guided_anchors_list, loc_mask_list)
|
DetectoRS
|
positive
|
def get_inception_resnet_v2_unet_softmax(input_shape, weights='imagenet'):
inp = Input(input_shape + (4,))
x = conv2d_bn(inp, 32, 3, strides=2, padding='same')
x = conv2d_bn(x, 32, 3, padding='same')
x = conv2d_bn(x, 64, 3)
conv1 = x
x = MaxPooling2D(3, strides=2, padding='same')(x)
x = conv2d_bn(x, 80, 1, padding='same')
x = conv2d_bn(x, 192, 3, padding='same')
conv2 = x
x = MaxPooling2D(3, strides=2, padding='same')(x)
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
for block_idx in range(1, 11):
x = inception_resnet_block(x, scale=0.17, block_type='block35', block_idx=block_idx)
conv3 = x
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
for block_idx in range(1, 21):
x = inception_resnet_block(x, scale=0.1, block_type='block17', block_idx=block_idx)
conv4 = x
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='same')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
for block_idx in range(1, 10):
x = inception_resnet_block(x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(x, scale=1.0, activation=None, block_type='block8', block_idx=10)
x = conv2d_bn(x, 1536, 1, name='conv_7b')
conv5 = x
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(320, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv5))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv6 = conv
</DeepExtract>
conv6 = concatenate([conv6, conv4], axis=-1)
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(320, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv6)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv6 = conv
</DeepExtract>
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(256, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv6))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv7 = conv
</DeepExtract>
conv7 = concatenate([conv7, conv3], axis=-1)
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(256, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv7)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv7 = conv
</DeepExtract>
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(128, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv7))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv8 = conv
</DeepExtract>
conv8 = concatenate([conv8, conv2], axis=-1)
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(128, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv8)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv8 = conv
</DeepExtract>
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(96, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv8))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv9 = conv
</DeepExtract>
conv9 = concatenate([conv9, conv1], axis=-1)
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(96, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv9)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv9 = conv
</DeepExtract>
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(64, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv9))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv10 = conv
</DeepExtract>
<DeepExtract>
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(64, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv10)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv10 = conv
</DeepExtract>
res = Conv2D(3, (1, 1), activation='softmax')(conv10)
model = Model(inp, res)
if weights == 'imagenet':
inception_resnet_v2 = InceptionResNetV2(weights=weights, include_top=False, input_shape=input_shape + (3,))
for i in range(2, len(inception_resnet_v2.layers) - 1):
model.layers[i].set_weights(inception_resnet_v2.layers[i].get_weights())
model.layers[i].trainable = False
return model
|
def get_inception_resnet_v2_unet_softmax(input_shape, weights='imagenet'):
inp = Input(input_shape + (4,))
x = conv2d_bn(inp, 32, 3, strides=2, padding='same')
x = conv2d_bn(x, 32, 3, padding='same')
x = conv2d_bn(x, 64, 3)
conv1 = x
x = MaxPooling2D(3, strides=2, padding='same')(x)
x = conv2d_bn(x, 80, 1, padding='same')
x = conv2d_bn(x, 192, 3, padding='same')
conv2 = x
x = MaxPooling2D(3, strides=2, padding='same')(x)
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
for block_idx in range(1, 11):
x = inception_resnet_block(x, scale=0.17, block_type='block35', block_idx=block_idx)
conv3 = x
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
for block_idx in range(1, 21):
x = inception_resnet_block(x, scale=0.1, block_type='block17', block_idx=block_idx)
conv4 = x
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='same')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
for block_idx in range(1, 10):
x = inception_resnet_block(x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(x, scale=1.0, activation=None, block_type='block8', block_idx=10)
x = conv2d_bn(x, 1536, 1, name='conv_7b')
conv5 = x
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(320, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv5))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv6 = conv
conv6 = concatenate([conv6, conv4], axis=-1)
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(320, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv6)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv6 = conv
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(256, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv6))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv7 = conv
conv7 = concatenate([conv7, conv3], axis=-1)
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(256, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv7)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv7 = conv
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(128, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv7))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv8 = conv
conv8 = concatenate([conv8, conv2], axis=-1)
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(128, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv8)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv8 = conv
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(96, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv8))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv9 = conv
conv9 = concatenate([conv9, conv1], axis=-1)
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(96, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv9)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv9 = conv
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(64, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(UpSampling2D()(conv9))
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv10 = conv
name = None
if prefix is not None:
name = prefix + '_conv'
conv = Conv2D(64, kernel, padding='same', kernel_initializer='he_normal', strides=strides, name=name)(conv10)
if prefix is not None:
name = prefix + '_norm'
conv = BatchNormalization(name=name, axis=bn_axis)(conv)
if prefix is not None:
name = prefix + '_act'
conv = Activation(act, name=name)(conv)
conv10 = conv
res = Conv2D(3, (1, 1), activation='softmax')(conv10)
model = Model(inp, res)
if weights == 'imagenet':
inception_resnet_v2 = InceptionResNetV2(weights=weights, include_top=False, input_shape=input_shape + (3,))
for i in range(2, len(inception_resnet_v2.layers) - 1):
model.layers[i].set_weights(inception_resnet_v2.layers[i].get_weights())
model.layers[i].trainable = False
return model
|
dsb2018_topcoders
|
positive
|
def _append_to_window(event):
<DeepExtract>
if self.initial_field_names is not None:
self.field_names = self.initial_field_names
else:
self.latest_names = self._extract_field_names(event)
self.field_names = set.union(self.field_names, self.latest_names)
</DeepExtract>
if self.static_sids is None:
sids = set(event.data.keys())
else:
sids = self.static_sids
self.latest_sids = sids
if self.rolling_panel is None:
<DeepExtract>
if self.downsample:
self.rolling_panel = RollingPanel(self.bars_in_day, self.field_names, sids)
self.daily_rolling_panel = RollingPanel(self.window_length, self.field_names, sids)
else:
self.rolling_panel = RollingPanel(self.window_length * self.bars_in_day, self.field_names, sids)
</DeepExtract>
self.rolling_panel.add_frame(event.dt, pd.DataFrame(event.data, index=self.field_names, columns=sids))
if trading.environment.is_trading_day(event.dt):
(_, mkt_close) = trading.environment.get_open_and_close(event.dt)
if self.bars == 'daily':
mkt_close = trading.environment.normalize_date(mkt_close)
if event.dt == mkt_close:
if self.downsample:
<DeepExtract>
cur_panel = self.rolling_panel.get_current()
sids = self.rolling_panel.minor_axis
day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)
dt1 = trading.environment.normalize_date(mkt_close)
dt2 = trading.environment.next_trading_day(mkt_close)
by_close = functools.partial(get_date, mkt_close, dt1, dt2)
for item in self.rolling_panel.items:
frame = cur_panel[item]
func = get_sample_func(item)
dframe = frame.groupby(lambda d: by_close(d)).agg(func)
for stock in sids:
day_frame[stock][item] = dframe[stock].ix[dt1]
self.daily_rolling_panel.add_frame(dt1, day_frame)
</DeepExtract>
self.trading_days_total += 1
self.mkt_close = mkt_close
self.last_dt = event.dt
if self.trading_days_total >= self.window_length:
self.full = True
|
def _append_to_window(event):
if self.initial_field_names is not None:
self.field_names = self.initial_field_names
else:
self.latest_names = self._extract_field_names(event)
self.field_names = set.union(self.field_names, self.latest_names)
if self.static_sids is None:
sids = set(event.data.keys())
else:
sids = self.static_sids
self.latest_sids = sids
if self.rolling_panel is None:
if self.downsample:
self.rolling_panel = RollingPanel(self.bars_in_day, self.field_names, sids)
self.daily_rolling_panel = RollingPanel(self.window_length, self.field_names, sids)
else:
self.rolling_panel = RollingPanel(self.window_length * self.bars_in_day, self.field_names, sids)
self.rolling_panel.add_frame(event.dt, pd.DataFrame(event.data, index=self.field_names, columns=sids))
if trading.environment.is_trading_day(event.dt):
(_, mkt_close) = trading.environment.get_open_and_close(event.dt)
if self.bars == 'daily':
mkt_close = trading.environment.normalize_date(mkt_close)
if event.dt == mkt_close:
if self.downsample:
cur_panel = self.rolling_panel.get_current()
sids = self.rolling_panel.minor_axis
day_frame = pd.DataFrame(columns=sids, index=cur_panel.items)
dt1 = trading.environment.normalize_date(mkt_close)
dt2 = trading.environment.next_trading_day(mkt_close)
by_close = functools.partial(get_date, mkt_close, dt1, dt2)
for item in self.rolling_panel.items:
frame = cur_panel[item]
func = get_sample_func(item)
dframe = frame.groupby(lambda d: by_close(d)).agg(func)
for stock in sids:
day_frame[stock][item] = dframe[stock].ix[dt1]
self.daily_rolling_panel.add_frame(dt1, day_frame)
self.trading_days_total += 1
self.mkt_close = mkt_close
self.last_dt = event.dt
if self.trading_days_total >= self.window_length:
self.full = True
|
AlephNull
|
positive
|
def render_with(template=None, json=False, jsonp=False):
"""
Decorator to render the wrapped function with the given template (or dictionary
of mimetype keys to templates, where the template is a string name of a template
file or a callable that returns a Response). The function's return value must be
a dictionary and is passed to the template as parameters. Callable templates get
a single parameter with the function's return value. Usage::
@app.route('/myview')
@render_with('myview.html')
def myview():
return {'data': 'value'}
@app.route('/myview_with_json')
@render_with('myview.html', json=True)
def myview_no_json():
return {'data': 'value'}
@app.route('/otherview')
@render_with({
'text/html': 'otherview.html',
'text/xml': 'otherview.xml'})
def otherview():
return {'data': 'value'}
@app.route('/404view')
@render_with('myview.html')
def myview():
return {'error': '404 Not Found'}, 404
@app.route('/headerview')
@render_with('myview.html')
def myview():
return {'data': 'value'}, 200, {'X-Header': 'Header value'}
When a mimetype is specified and the template is not a callable, the response is
returned with the same mimetype. Callable templates must return Response objects
to ensure the correct mimetype is set.
If a dictionary of templates is provided and does not include a handler for ``*/*``,
render_with will attempt to use the handler for (in order) ``text/html``,
``text/plain`` and the various JSON types, falling back to rendering the value into
a unicode string.
If the method is called outside a request context, the wrapped method's original
return value is returned. This is meant to facilitate testing and should not be
used to call the method from within another view handler as the presence of a
request context will trigger template rendering.
Rendering may also be suspended by calling the view handler with ``_render=False``.
render_with provides JSON and JSONP handlers for the ``application/json``,
``text/json`` and ``text/x-json`` mimetypes if ``json`` or ``jsonp`` is True
(default is False).
:param template: Single template, or dictionary of MIME type to templates. If the
template is a callable, it is called with the output of the wrapped function
:param json: Helper to add a JSON handler (default is False)
:param jsonp: Helper to add a JSONP handler (if True, also provides JSON, default
is False)
"""
if jsonp:
templates = {'application/json': dict_jsonp, 'application/javascript': dict_jsonp}
elif json:
templates = {'application/json': dict_jsonify}
else:
templates = {}
if isinstance(template, str):
templates['text/html'] = template
elif isinstance(template, dict):
templates.update(template)
elif template is None and (json or jsonp):
pass
else:
raise ValueError('Expected string or dict for template')
default_mimetype = '*/*'
if '*/*' not in templates:
templates['*/*'] = str
default_mimetype = 'text/plain'
for mimetype in ('text/html', 'text/plain', 'application/json'):
if mimetype in templates:
templates['*/*'] = templates[mimetype]
default_mimetype = mimetype
break
template_mimetypes = list(templates.keys())
template_mimetypes.remove('*/*')
def inner(f):
@wraps(f)
def decorated_function(*args, **kwargs):
render = kwargs.pop('_render', True)
result = ensure_sync(f)(*args, **kwargs)
if isinstance(result, (Response, WerkzeugResponse, current_app.response_class)):
return result
if isinstance(result, tuple):
resultset = result
result = resultset[0]
if len(resultset) > 1:
status_code = resultset[1]
else:
status_code = None
if len(resultset) > 2:
headers = Headers(resultset[2])
else:
headers = Headers()
else:
status_code = None
headers = Headers()
if len(templates) > 1:
if 'Vary' in headers:
vary_values = [item.strip() for item in headers['Vary'].split(',')]
if 'Accept' not in vary_values:
vary_values.append('Accept')
headers['Vary'] = ', '.join(vary_values)
else:
headers['Vary'] = 'Accept'
use_mimetype = None
if render and request:
<DeepExtract>
for (use_mimetype, _quality) in request.accept_mimetypes:
for mimetype in template_mimetypes:
if use_mimetype.lower() == mimetype.lower():
use_mimetype = use_mimetype.lower()
use_mimetype = '*/*'
</DeepExtract>
if use_mimetype is not None:
if callable(templates[use_mimetype]):
rendered = templates[use_mimetype](result)
if isinstance(rendered, Response):
if status_code is not None:
rendered.status_code = status_code
if headers is not None:
rendered.headers.extend(headers)
else:
rendered = current_app.response_class(rendered, status=status_code, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype)
else:
rendered = current_app.response_class(render_template(templates[use_mimetype], **result), status=status_code or 200, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype)
return rendered
return result
return decorated_function
return inner
|
def render_with(template=None, json=False, jsonp=False):
"""
Decorator to render the wrapped function with the given template (or dictionary
of mimetype keys to templates, where the template is a string name of a template
file or a callable that returns a Response). The function's return value must be
a dictionary and is passed to the template as parameters. Callable templates get
a single parameter with the function's return value. Usage::
@app.route('/myview')
@render_with('myview.html')
def myview():
return {'data': 'value'}
@app.route('/myview_with_json')
@render_with('myview.html', json=True)
def myview_no_json():
return {'data': 'value'}
@app.route('/otherview')
@render_with({
'text/html': 'otherview.html',
'text/xml': 'otherview.xml'})
def otherview():
return {'data': 'value'}
@app.route('/404view')
@render_with('myview.html')
def myview():
return {'error': '404 Not Found'}, 404
@app.route('/headerview')
@render_with('myview.html')
def myview():
return {'data': 'value'}, 200, {'X-Header': 'Header value'}
When a mimetype is specified and the template is not a callable, the response is
returned with the same mimetype. Callable templates must return Response objects
to ensure the correct mimetype is set.
If a dictionary of templates is provided and does not include a handler for ``*/*``,
render_with will attempt to use the handler for (in order) ``text/html``,
``text/plain`` and the various JSON types, falling back to rendering the value into
a unicode string.
If the method is called outside a request context, the wrapped method's original
return value is returned. This is meant to facilitate testing and should not be
used to call the method from within another view handler as the presence of a
request context will trigger template rendering.
Rendering may also be suspended by calling the view handler with ``_render=False``.
render_with provides JSON and JSONP handlers for the ``application/json``,
``text/json`` and ``text/x-json`` mimetypes if ``json`` or ``jsonp`` is True
(default is False).
:param template: Single template, or dictionary of MIME type to templates. If the
template is a callable, it is called with the output of the wrapped function
:param json: Helper to add a JSON handler (default is False)
:param jsonp: Helper to add a JSONP handler (if True, also provides JSON, default
is False)
"""
if jsonp:
templates = {'application/json': dict_jsonp, 'application/javascript': dict_jsonp}
elif json:
templates = {'application/json': dict_jsonify}
else:
templates = {}
if isinstance(template, str):
templates['text/html'] = template
elif isinstance(template, dict):
templates.update(template)
elif template is None and (json or jsonp):
pass
else:
raise ValueError('Expected string or dict for template')
default_mimetype = '*/*'
if '*/*' not in templates:
templates['*/*'] = str
default_mimetype = 'text/plain'
for mimetype in ('text/html', 'text/plain', 'application/json'):
if mimetype in templates:
templates['*/*'] = templates[mimetype]
default_mimetype = mimetype
break
template_mimetypes = list(templates.keys())
template_mimetypes.remove('*/*')
def inner(f):
@wraps(f)
def decorated_function(*args, **kwargs):
render = kwargs.pop('_render', True)
result = ensure_sync(f)(*args, **kwargs)
if isinstance(result, (Response, WerkzeugResponse, current_app.response_class)):
return result
if isinstance(result, tuple):
resultset = result
result = resultset[0]
if len(resultset) > 1:
status_code = resultset[1]
else:
status_code = None
if len(resultset) > 2:
headers = Headers(resultset[2])
else:
headers = Headers()
else:
status_code = None
headers = Headers()
if len(templates) > 1:
if 'Vary' in headers:
vary_values = [item.strip() for item in headers['Vary'].split(',')]
if 'Accept' not in vary_values:
vary_values.append('Accept')
headers['Vary'] = ', '.join(vary_values)
else:
headers['Vary'] = 'Accept'
use_mimetype = None
if render and request:
for (use_mimetype, _quality) in request.accept_mimetypes:
for mimetype in template_mimetypes:
if use_mimetype.lower() == mimetype.lower():
use_mimetype = use_mimetype.lower()
use_mimetype = '*/*'
if use_mimetype is not None:
if callable(templates[use_mimetype]):
rendered = templates[use_mimetype](result)
if isinstance(rendered, Response):
if status_code is not None:
rendered.status_code = status_code
if headers is not None:
rendered.headers.extend(headers)
else:
rendered = current_app.response_class(rendered, status=status_code, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype)
else:
rendered = current_app.response_class(render_template(templates[use_mimetype], **result), status=status_code or 200, headers=headers, mimetype=default_mimetype if use_mimetype == '*/*' else use_mimetype)
return rendered
return result
return decorated_function
return inner
|
coaster
|
positive
|
def test_without_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, normalize_scores=False)
hypos = generator.generate(self.src_tokens, self.src_lengths, beam_size=2)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
<DeepExtract>
self.assertTensorEqual(hypos[0][0]['tokens'], torch.LongTensor([w1, eos]))
</DeepExtract>
<DeepExtract>
pos_scores = torch.FloatTensor([0.9, 1.0]).log()
self.assertAlmostEqual(hypos[0][0]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[0][0]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[0][0]['score']), 1e-06)
</DeepExtract>
<DeepExtract>
self.assertTensorEqual(hypos[0][1]['tokens'], torch.LongTensor([w2, w1, w2, eos]))
</DeepExtract>
<DeepExtract>
pos_scores = torch.FloatTensor([0.1, 0.9, 0.9, 1.0]).log()
self.assertAlmostEqual(hypos[0][1]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[0][1]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[0][1]['score']), 1e-06)
</DeepExtract>
<DeepExtract>
self.assertTensorEqual(hypos[1][0]['tokens'], torch.LongTensor([w1, w2, eos]))
</DeepExtract>
<DeepExtract>
pos_scores = torch.FloatTensor([0.7, 0.4, 0.6]).log()
self.assertAlmostEqual(hypos[1][0]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[1][0]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[1][0]['score']), 1e-06)
</DeepExtract>
<DeepExtract>
self.assertTensorEqual(hypos[1][1]['tokens'], torch.LongTensor([w1, w2, w1, eos]))
</DeepExtract>
<DeepExtract>
pos_scores = torch.FloatTensor([0.7, 0.4, 0.4, 1.0]).log()
self.assertAlmostEqual(hypos[1][1]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[1][1]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[1][1]['score']), 1e-06)
</DeepExtract>
|
def test_without_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, normalize_scores=False)
hypos = generator.generate(self.src_tokens, self.src_lengths, beam_size=2)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertTensorEqual(hypos[0][0]['tokens'], torch.LongTensor([w1, eos]))
pos_scores = torch.FloatTensor([0.9, 1.0]).log()
self.assertAlmostEqual(hypos[0][0]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[0][0]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[0][0]['score']), 1e-06)
self.assertTensorEqual(hypos[0][1]['tokens'], torch.LongTensor([w2, w1, w2, eos]))
pos_scores = torch.FloatTensor([0.1, 0.9, 0.9, 1.0]).log()
self.assertAlmostEqual(hypos[0][1]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[0][1]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[0][1]['score']), 1e-06)
self.assertTensorEqual(hypos[1][0]['tokens'], torch.LongTensor([w1, w2, eos]))
pos_scores = torch.FloatTensor([0.7, 0.4, 0.6]).log()
self.assertAlmostEqual(hypos[1][0]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[1][0]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[1][0]['score']), 1e-06)
self.assertTensorEqual(hypos[1][1]['tokens'], torch.LongTensor([w1, w2, w1, eos]))
pos_scores = torch.FloatTensor([0.7, 0.4, 0.4, 1.0]).log()
self.assertAlmostEqual(hypos[1][1]['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypos[1][1]['tokens'].numel())
score = pos_scores.sum()
if False:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypos[1][1]['score']), 1e-06)
</DeepExtract>
|
dlcl
|
positive
|
def Discriminator(nc_in, input_size=64):
inp = Input(shape=(input_size, input_size, nc_in))
<DeepExtract>
x = inp
x = Conv2D(64, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
</DeepExtract>
<DeepExtract>
x = x
x = Conv2D(128, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
</DeepExtract>
<DeepExtract>
x = x
x = Conv2D(256, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
</DeepExtract>
out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding='same', activation='sigmoid')(x)
return Model(inputs=[inp], outputs=out)
|
def Discriminator(nc_in, input_size=64):
inp = Input(shape=(input_size, input_size, nc_in))
x = inp
x = Conv2D(64, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
x = x
x = Conv2D(128, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
x = x
x = Conv2D(256, kernel_size=4, strides=2, kernel_initializer=conv_init, use_bias=False, padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = x
out = Conv2D(1, kernel_size=4, kernel_initializer=conv_init, use_bias=False, padding='same', activation='sigmoid')(x)
return Model(inputs=[inp], outputs=out)
|
DeepFakeTutorial
|
positive
|
def _advance(self, i, value):
"""Do a linear search through all items with the same key"""
<DeepExtract>
if self._key is None:
key = value
else:
key = self._key(value)
</DeepExtract>
while i < len(self._blist):
<DeepExtract>
n = 0
while i < len(self._blist):
v = self._i2u(self._blist[i])
if v is None:
del self._blist[i]
n += 1
else:
(n, v) = (n, v)
(n, v) = (n, None)
</DeepExtract>
if v is None:
break
if v == value:
return i
elif key < self._i2key(self._blist[i]):
break
i += 1
return -1
|
def _advance(self, i, value):
"""Do a linear search through all items with the same key"""
if self._key is None:
key = value
else:
key = self._key(value)
while i < len(self._blist):
n = 0
while i < len(self._blist):
v = self._i2u(self._blist[i])
if v is None:
del self._blist[i]
n += 1
else:
(n, v) = (n, v)
(n, v) = (n, None)
if v is None:
break
if v == value:
return i
elif key < self._i2key(self._blist[i]):
break
i += 1
return -1
|
blist
|
positive
|
def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None):
super(BasicBlock, self).__init__()
<DeepExtract>
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
</DeepExtract>
self.relu = nn.ReLU(inplace=True)
<DeepExtract>
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
</DeepExtract>
self.downsample = downsample
self.stride = stride
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
self.downsample = downsample
self.stride = stride
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
Dain-App
|
positive
|
def read(self):
<DeepExtract>
node_data = NodeData()
for node_data_file in self.node_data_files:
try:
node_data.update(node_data_file)
except DuplicatedNonDictAttributeError as e:
print(f'Error parsing {node_data_file.fname}: Mixed dict and non-dict values found for attribute `{e.key}`. Augur requires attributes of the same name to be uniformly dicts or non-dicts.', file=sys.stderr)
sys.exit(2)
node_data = node_data.attrs
</DeepExtract>
<DeepExtract>
if not self.tree_file:
return
if set(node_data['nodes'].keys()) != self.node_names_from_tree_file:
print(f"Names of nodes (including internal nodes) of tree {self.tree_file} don't match node names in the node data files.", file=sys.stderr)
sys.exit(2)
</DeepExtract>
return node_data
|
def read(self):
node_data = NodeData()
for node_data_file in self.node_data_files:
try:
node_data.update(node_data_file)
except DuplicatedNonDictAttributeError as e:
print(f'Error parsing {node_data_file.fname}: Mixed dict and non-dict values found for attribute `{e.key}`. Augur requires attributes of the same name to be uniformly dicts or non-dicts.', file=sys.stderr)
sys.exit(2)
node_data = node_data.attrs
if not self.tree_file:
return
if set(node_data['nodes'].keys()) != self.node_names_from_tree_file:
print(f"Names of nodes (including internal nodes) of tree {self.tree_file} don't match node names in the node data files.", file=sys.stderr)
sys.exit(2)
return node_data
|
augur
|
positive
|
def simulate_aggregated_moments(params, x, y):
"""Calculate aggregated moments for example from Honore, DePaula, Jorgensen."""
<DeepExtract>
y_estimated = x.to_numpy() @ params['value'].to_numpy()
x_np = x.T.to_numpy()
residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)
mom_value = []
length = len(x_np)
for i in range(length):
for j in range(i, length):
moment = residual * x_np[i] * x_np[j]
mom_value.append(moment)
mom_value = np.stack(mom_value, axis=1)[0]
mom_value = pd.DataFrame(data=mom_value)
mom_value = mom_value
</DeepExtract>
moments = mom_value.mean(axis=1)
return moments
|
def simulate_aggregated_moments(params, x, y):
"""Calculate aggregated moments for example from Honore, DePaula, Jorgensen."""
y_estimated = x.to_numpy() @ params['value'].to_numpy()
x_np = x.T.to_numpy()
residual = y.T.to_numpy() - stats.norm.cdf(y_estimated)
mom_value = []
length = len(x_np)
for i in range(length):
for j in range(i, length):
moment = residual * x_np[i] * x_np[j]
mom_value.append(moment)
mom_value = np.stack(mom_value, axis=1)[0]
mom_value = pd.DataFrame(data=mom_value)
mom_value = mom_value
moments = mom_value.mean(axis=1)
return moments
|
estimagic
|
positive
|
def forward(self, x, label=None, size=None):
if size is None:
size = x.size()[2:]
x = self.layer1_conv1(x)
x = self.layer1_relu1(x)
x = self.layer1_conv2(x)
x = self.layer1_relu2(x)
x = self.layer1_maxpool(x)
x = self.layer2_conv1(x)
x = self.layer2_relu1(x)
x = self.layer2_conv2(x)
x = self.layer2_relu2(x)
x = self.layer2_maxpool(x)
x = self.layer3_conv1(x)
x = self.layer3_relu1(x)
x = self.layer3_conv2(x)
x = self.layer3_relu2(x)
x = self.layer3_conv3(x)
x = self.layer3_relu3(x)
x = self.layer3_maxpool(x)
x = self.layer4_conv1(x)
x = self.layer4_relu1(x)
x = self.layer4_conv2(x)
x = self.layer4_relu2(x)
x = self.layer4_conv3(x)
x = self.layer4_relu3(x)
x = self.layer4_maxpool(x)
x = self.layer5_conv1(x)
x = self.layer5_relu1(x)
x = self.layer5_conv2(x)
x = self.layer5_relu2(x)
x = self.layer5_conv3(x)
x = self.layer5_relu3(x)
x = self.extra_conv1(x)
x = self.extra_relu1(x)
x = self.extra_conv2(x)
x = self.extra_relu2(x)
x = self.extra_conv3(x)
x = self.extra_relu3(x)
x = self.extra_conv4(x)
<DeepExtract>
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
x = x.view(-1, 20)
logit = x
</DeepExtract>
if label is None:
return logit
else:
<DeepExtract>
x.detach() = F.relu(x.detach())
x.detach() = F.interpolate(x.detach(), size=size, mode='bilinear', align_corners=True)
x.detach() /= F.adaptive_max_pool2d(x.detach(), 1) + 1e-05
x.detach() = x.detach() * label[:, :, None, None]
x.detach() = x.detach()
</DeepExtract>
return (logit, cam)
|
def forward(self, x, label=None, size=None):
if size is None:
size = x.size()[2:]
x = self.layer1_conv1(x)
x = self.layer1_relu1(x)
x = self.layer1_conv2(x)
x = self.layer1_relu2(x)
x = self.layer1_maxpool(x)
x = self.layer2_conv1(x)
x = self.layer2_relu1(x)
x = self.layer2_conv2(x)
x = self.layer2_relu2(x)
x = self.layer2_maxpool(x)
x = self.layer3_conv1(x)
x = self.layer3_relu1(x)
x = self.layer3_conv2(x)
x = self.layer3_relu2(x)
x = self.layer3_conv3(x)
x = self.layer3_relu3(x)
x = self.layer3_maxpool(x)
x = self.layer4_conv1(x)
x = self.layer4_relu1(x)
x = self.layer4_conv2(x)
x = self.layer4_relu2(x)
x = self.layer4_conv3(x)
x = self.layer4_relu3(x)
x = self.layer4_maxpool(x)
x = self.layer5_conv1(x)
x = self.layer5_relu1(x)
x = self.layer5_conv2(x)
x = self.layer5_relu2(x)
x = self.layer5_conv3(x)
x = self.layer5_relu3(x)
x = self.extra_conv1(x)
x = self.extra_relu1(x)
x = self.extra_conv2(x)
x = self.extra_relu2(x)
x = self.extra_conv3(x)
x = self.extra_relu3(x)
x = self.extra_conv4(x)
x = F.avg_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=0)
x = x.view(-1, 20)
logit = x
if label is None:
return logit
else:
x.detach() = F.relu(x.detach())
x.detach() = F.interpolate(x.detach(), size=size, mode='bilinear', align_corners=True)
x.detach() /= F.adaptive_max_pool2d(x.detach(), 1) + 1e-05
x.detach() = x.detach() * label[:, :, None, None]
x.detach() = x.detach()
return (logit, cam)
|
DRS
|
positive
|
def compile_version(version, target_dir, verbose=False):
<DeepExtract>
logfile = os.path.join(__get_dir(), 'ccm-repository.log')
</DeepExtract>
<DeepExtract>
logger = logging.getLogger('repository')
logger.addHandler(handlers.RotatingFileHandler(logfile, maxBytes=1024 * 1024 * 5, backupCount=5))
logger = logger
</DeepExtract>
common.info('Compiling Cassandra {} ...'.format(version))
logger.info('--- Cassandra Build -------------------\n')
env = update_java_version(install_dir=target_dir, for_build=True, info_message='Cassandra {} build'.format(version))
default_build_properties = os.path.join(common.get_default_path(), 'build.properties.default')
if os.path.exists(default_build_properties):
target_build_properties = os.path.join(target_dir, 'build.properties')
logger.info('Copying %s to %s\n' % (default_build_properties, target_build_properties))
shutil.copyfile(default_build_properties, target_build_properties)
try:
attempt = 0
ret_val = 1
gradlew = os.path.join(target_dir, platform_binary('gradlew'))
if os.path.exists(gradlew):
cmd = [gradlew, 'jar']
else:
cmd = [platform_binary('ant'), 'jar']
if get_jdk_version_int() >= 11:
cmd.append('-Duse.jdk11=true')
while attempt < 3 and ret_val != 0:
if attempt > 0:
logger.info('\n\n`{}` failed. Retry #{}...\n\n'.format(' '.join(cmd), attempt))
process = subprocess.Popen(cmd, cwd=target_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
<DeepExtract>
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, stdout, stderr) = (rc, stdoutdata, stderrdata)
</DeepExtract>
attempt += 1
if ret_val != 0:
raise CCMError('Error compiling Cassandra. See {logfile} or run "ccm showlastlog" for details, stdout=\'{stdout}\' stderr=\'{stderr}\''.format(logfile=logfile, stdout=stdout.decode(), stderr=stderr.decode()))
except OSError as e:
raise CCMError('Error compiling Cassandra. Is ant installed? See %s for details' % logfile)
stress_dir = os.path.join(target_dir, 'tools', 'stress') if version >= '0.8.0' else os.path.join(target_dir, 'contrib', 'stress')
build_xml = os.path.join(stress_dir, 'build.xml')
if os.path.exists(build_xml):
logger.info('\n\n--- cassandra/stress build ------------\n')
try:
stress_bin_dir = os.path.join(stress_dir, 'bin')
for f in os.listdir(stress_bin_dir):
full_path = os.path.join(stress_bin_dir, f)
os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
<DeepExtract>
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, _, _) = (rc, stdoutdata, stderrdata)
</DeepExtract>
if ret_val != 0:
process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
<DeepExtract>
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, _, _) = (rc, stdoutdata, stderrdata)
</DeepExtract>
if ret_val != 0:
raise CCMError('Error compiling Cassandra stress tool. See %s for details (you will still be able to use ccm but not the stress related commands)' % logfile)
except IOError as e:
raise CCMError('Error compiling Cassandra stress tool: %s (you will still be able to use ccm but not the stress related commands)' % str(e))
|
def compile_version(version, target_dir, verbose=False):
logfile = os.path.join(__get_dir(), 'ccm-repository.log')
logger = logging.getLogger('repository')
logger.addHandler(handlers.RotatingFileHandler(logfile, maxBytes=1024 * 1024 * 5, backupCount=5))
logger = logger
common.info('Compiling Cassandra {} ...'.format(version))
logger.info('--- Cassandra Build -------------------\n')
env = update_java_version(install_dir=target_dir, for_build=True, info_message='Cassandra {} build'.format(version))
default_build_properties = os.path.join(common.get_default_path(), 'build.properties.default')
if os.path.exists(default_build_properties):
target_build_properties = os.path.join(target_dir, 'build.properties')
logger.info('Copying %s to %s\n' % (default_build_properties, target_build_properties))
shutil.copyfile(default_build_properties, target_build_properties)
try:
attempt = 0
ret_val = 1
gradlew = os.path.join(target_dir, platform_binary('gradlew'))
if os.path.exists(gradlew):
cmd = [gradlew, 'jar']
else:
cmd = [platform_binary('ant'), 'jar']
if get_jdk_version_int() >= 11:
cmd.append('-Duse.jdk11=true')
while attempt < 3 and ret_val != 0:
if attempt > 0:
logger.info('\n\n`{}` failed. Retry #{}...\n\n'.format(' '.join(cmd), attempt))
process = subprocess.Popen(cmd, cwd=target_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, stdout, stderr) = (rc, stdoutdata, stderrdata)
attempt += 1
if ret_val != 0:
raise CCMError('Error compiling Cassandra. See {logfile} or run "ccm showlastlog" for details, stdout=\'{stdout}\' stderr=\'{stderr}\''.format(logfile=logfile, stdout=stdout.decode(), stderr=stderr.decode()))
except OSError as e:
raise CCMError('Error compiling Cassandra. Is ant installed? See %s for details' % logfile)
stress_dir = os.path.join(target_dir, 'tools', 'stress') if version >= '0.8.0' else os.path.join(target_dir, 'contrib', 'stress')
build_xml = os.path.join(stress_dir, 'build.xml')
if os.path.exists(build_xml):
logger.info('\n\n--- cassandra/stress build ------------\n')
try:
stress_bin_dir = os.path.join(stress_dir, 'bin')
for f in os.listdir(stress_bin_dir):
full_path = os.path.join(stress_bin_dir, f)
os.chmod(full_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
process = subprocess.Popen([platform_binary('ant'), 'build'], cwd=stress_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, _, _) = (rc, stdoutdata, stderrdata)
if ret_val != 0:
process = subprocess.Popen([platform_binary('ant'), 'stress-build'], cwd=target_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutdata, stderrdata) = process.communicate()
rc = process.returncode
logger.info(stdoutdata.decode())
logger.info(stderrdata.decode())
(ret_val, _, _) = (rc, stdoutdata, stderrdata)
if ret_val != 0:
raise CCMError('Error compiling Cassandra stress tool. See %s for details (you will still be able to use ccm but not the stress related commands)' % logfile)
except IOError as e:
raise CCMError('Error compiling Cassandra stress tool: %s (you will still be able to use ccm but not the stress related commands)' % str(e))
|
ccm
|
positive
|
def _scramble_323(password, message):
<DeepExtract>
nr = 1345345333
add = 7
nr2 = 305419889
for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]:
nr ^= ((nr & 63) + add) * c + (nr << 8) & 4294967295
nr2 = nr2 + (nr2 << 8 ^ nr) & 4294967295
add = add + c & 4294967295
r1 = nr & (1 << 31) - 1
r2 = nr2 & (1 << 31) - 1
hash_pass = struct.pack('>LL', r1, r2)
</DeepExtract>
<DeepExtract>
nr = 1345345333
add = 7
nr2 = 305419889
for c in [byte2int(x) for x in message[:SCRAMBLE_LENGTH_323] if x not in (' ', '\t', 32, 9)]:
nr ^= ((nr & 63) + add) * c + (nr << 8) & 4294967295
nr2 = nr2 + (nr2 << 8 ^ nr) & 4294967295
add = add + c & 4294967295
r1 = nr & (1 << 31) - 1
r2 = nr2 & (1 << 31) - 1
hash_message = struct.pack('>LL', r1, r2)
</DeepExtract>
hash_pass_n = struct.unpack('>LL', hash_pass)
hash_message_n = struct.unpack('>LL', hash_message)
rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1])
outbuf = io.BytesIO()
for _ in range_type(min(SCRAMBLE_LENGTH_323, len(message))):
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
extra = int2byte(int(rand_st.my_rnd() * 31))
out = outbuf.getvalue()
outbuf = io.BytesIO()
for c in out:
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
return outbuf.getvalue()
|
def _scramble_323(password, message):
nr = 1345345333
add = 7
nr2 = 305419889
for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]:
nr ^= ((nr & 63) + add) * c + (nr << 8) & 4294967295
nr2 = nr2 + (nr2 << 8 ^ nr) & 4294967295
add = add + c & 4294967295
r1 = nr & (1 << 31) - 1
r2 = nr2 & (1 << 31) - 1
hash_pass = struct.pack('>LL', r1, r2)
nr = 1345345333
add = 7
nr2 = 305419889
for c in [byte2int(x) for x in message[:SCRAMBLE_LENGTH_323] if x not in (' ', '\t', 32, 9)]:
nr ^= ((nr & 63) + add) * c + (nr << 8) & 4294967295
nr2 = nr2 + (nr2 << 8 ^ nr) & 4294967295
add = add + c & 4294967295
r1 = nr & (1 << 31) - 1
r2 = nr2 & (1 << 31) - 1
hash_message = struct.pack('>LL', r1, r2)
hash_pass_n = struct.unpack('>LL', hash_pass)
hash_message_n = struct.unpack('>LL', hash_message)
rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1])
outbuf = io.BytesIO()
for _ in range_type(min(SCRAMBLE_LENGTH_323, len(message))):
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
extra = int2byte(int(rand_st.my_rnd() * 31))
out = outbuf.getvalue()
outbuf = io.BytesIO()
for c in out:
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
return outbuf.getvalue()
|
autoops
|
positive
|
@property
def end_position(self):
"""Last position in the binlog"""
last_position = self.start_position
with open(self._binlog, 'rb') as binlog_descriptor:
<DeepExtract>
return binlog_descriptor.read(4)
</DeepExtract>
while True:
<DeepExtract>
position = binlog_descriptor.tell()
try:
event = BinlogV4Event(timestamp=self.__read_int(binlog_descriptor, 4), type_code=self.__read_int(binlog_descriptor, 1), server_id=self.__read_int(binlog_descriptor, 4), event_length=self.__read_int(binlog_descriptor, 4), curr_position=position, next_position=self.__read_int(binlog_descriptor, 4), flags=self.__read_int(binlog_descriptor, 2))
binlog_descriptor.read(event.event_length - 19)
event = event
except struct.error:
event = None
</DeepExtract>
if event:
last_position = event.curr_position
else:
break
return last_position
|
@property
def end_position(self):
"""Last position in the binlog"""
last_position = self.start_position
with open(self._binlog, 'rb') as binlog_descriptor:
return binlog_descriptor.read(4)
while True:
position = binlog_descriptor.tell()
try:
event = BinlogV4Event(timestamp=self.__read_int(binlog_descriptor, 4), type_code=self.__read_int(binlog_descriptor, 1), server_id=self.__read_int(binlog_descriptor, 4), event_length=self.__read_int(binlog_descriptor, 4), curr_position=position, next_position=self.__read_int(binlog_descriptor, 4), flags=self.__read_int(binlog_descriptor, 2))
binlog_descriptor.read(event.event_length - 19)
event = event
except struct.error:
event = None
if event:
last_position = event.curr_position
else:
break
return last_position
|
backup
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.