before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def __init__(self, data, data_type, shared=None, valid_idxs=None):
self.data = data
self.data_type = data_type
self.shared = shared
<DeepExtract>
if isinstance(self.data, dict):
total_num_examples = len(next(iter(self.data.values())))
elif isinstance(self.data, Data):
total_num_examples = self.data.get_size()
raise Exception()
</DeepExtract>
self.valid_idxs = range(total_num_examples) if valid_idxs is None else valid_idxs
self.num_examples = len(self.valid_idxs)
|
def __init__(self, data, data_type, shared=None, valid_idxs=None):
self.data = data
self.data_type = data_type
self.shared = shared
if isinstance(self.data, dict):
total_num_examples = len(next(iter(self.data.values())))
elif isinstance(self.data, Data):
total_num_examples = self.data.get_size()
raise Exception()
self.valid_idxs = range(total_num_examples) if valid_idxs is None else valid_idxs
self.num_examples = len(self.valid_idxs)
|
bi-att-flow
|
positive
|
def __init__(self, n=35):
<DeepExtract>
if n == 35:
cmap = np.array([(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232), (250, 170, 160), (230, 150, 140), (70, 70, 70), (102, 102, 156), (190, 153, 153), (180, 165, 180), (150, 100, 100), (150, 120, 90), (153, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (0, 0, 230), (119, 11, 32), (0, 0, 142)], dtype=np.uint8)
else:
cmap = np.zeros((n, 3), dtype=np.uint8)
for i in range(n):
(r, g, b) = (0, 0, 0)
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ np.uint8(str_id[-1]) << 7 - j
g = g ^ np.uint8(str_id[-2]) << 7 - j
b = b ^ np.uint8(str_id[-3]) << 7 - j
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
self.cmap = cmap
</DeepExtract>
self.cmap = torch.from_numpy(self.cmap[:n])
|
def __init__(self, n=35):
if n == 35:
cmap = np.array([(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (111, 74, 0), (81, 0, 81), (128, 64, 128), (244, 35, 232), (250, 170, 160), (230, 150, 140), (70, 70, 70), (102, 102, 156), (190, 153, 153), (180, 165, 180), (150, 100, 100), (150, 120, 90), (153, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (0, 0, 230), (119, 11, 32), (0, 0, 142)], dtype=np.uint8)
else:
cmap = np.zeros((n, 3), dtype=np.uint8)
for i in range(n):
(r, g, b) = (0, 0, 0)
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ np.uint8(str_id[-1]) << 7 - j
g = g ^ np.uint8(str_id[-2]) << 7 - j
b = b ^ np.uint8(str_id[-3]) << 7 - j
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
self.cmap = cmap
self.cmap = torch.from_numpy(self.cmap[:n])
|
DeepFashion_Try_On
|
positive
|
@pytest.mark.usefixtures('products')
def test_get_property(get_product_flat):
<DeepExtract>
def helper(queries, *args, **kwargs):
res = 2(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
</DeepExtract>
<DeepExtract>
assert sorted(data, key=str) == sorted([[None], ['bad'], ['tom']], key=str)
</DeepExtract>
|
@pytest.mark.usefixtures('products')
def test_get_property(get_product_flat):
def helper(queries, *args, **kwargs):
res = 2(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
assert sorted(data, key=str) == sorted([[None], ['bad'], ['tom']], key=str)
</DeepExtract>
|
django-data-browser
|
positive
|
def __sub__(self, other, context=None):
<DeepExtract>
if not other:
other = self.__class__('0')
if self._currency_code != getattr(other, '_currency_code', None):
raise ValueError('Can not add/substract money in different currencies.')
other = other
</DeepExtract>
amount = Decimal.__add__(self, other.copy_negate())
return self.__class__(amount)
|
def __sub__(self, other, context=None):
if not other:
other = self.__class__('0')
if self._currency_code != getattr(other, '_currency_code', None):
raise ValueError('Can not add/substract money in different currencies.')
other = other
amount = Decimal.__add__(self, other.copy_negate())
return self.__class__(amount)
|
django-shop
|
positive
|
@patch('edx_proctoring.api.get_backend_provider')
def test_get_studentview_practice_from_wrong_browser(self, mocked_get_backend):
"""
Test for get_student_view practice proctored exam as viewed
from an insecure browser.
"""
self._create_started_practice_exam_attempt()
mocked_get_backend.return_value.should_block_access_to_exam_material.return_value = True
mocked_get_backend.return_value.supports_onboarding = False
<DeepExtract>
exam_context_overrides = {'is_proctored': True, 'is_practice_exam': True}
if context_overrides:
exam_context_overrides.update(context_overrides)
rendered_response = self._render_exam(self.practice_exam_id, context_overrides=exam_context_overrides)
</DeepExtract>
self.assertIn(self.wrong_browser_msg, rendered_response)
|
@patch('edx_proctoring.api.get_backend_provider')
def test_get_studentview_practice_from_wrong_browser(self, mocked_get_backend):
"""
Test for get_student_view practice proctored exam as viewed
from an insecure browser.
"""
self._create_started_practice_exam_attempt()
mocked_get_backend.return_value.should_block_access_to_exam_material.return_value = True
mocked_get_backend.return_value.supports_onboarding = False
exam_context_overrides = {'is_proctored': True, 'is_practice_exam': True}
if context_overrides:
exam_context_overrides.update(context_overrides)
rendered_response = self._render_exam(self.practice_exam_id, context_overrides=exam_context_overrides)
self.assertIn(self.wrong_browser_msg, rendered_response)
|
edx-proctoring
|
positive
|
def _grab(self):
"""
Await the :attr:`~MLX90640._grab_event` and then average over the frames stored in
:attr:`~MLX90640._frames`
Returns:
(:class:`~numpy.ndarray`) Averaged and interpolated frame
"""
ret = self._grab_event.wait(1)
if not ret:
return None
frame = np.mean(self._frames, axis=2)
self._grab_event.clear()
if self.interpolate is not None:
<DeepExtract>
frame = griddata(self._points, frame.flatten(), (self._grid_x, self._grid_y), method='cubic')
</DeepExtract>
return (self._timestamp(), frame)
|
def _grab(self):
"""
Await the :attr:`~MLX90640._grab_event` and then average over the frames stored in
:attr:`~MLX90640._frames`
Returns:
(:class:`~numpy.ndarray`) Averaged and interpolated frame
"""
ret = self._grab_event.wait(1)
if not ret:
return None
frame = np.mean(self._frames, axis=2)
self._grab_event.clear()
if self.interpolate is not None:
frame = griddata(self._points, frame.flatten(), (self._grid_x, self._grid_y), method='cubic')
return (self._timestamp(), frame)
|
autopilot
|
positive
|
def converse(self, batch, mode):
(context_tokens, context_entities, context_words, response) = batch
entity_graph_representations = self.entity_encoder(None, self.entity_edge_idx, self.entity_edge_type)
word_graph_representations = self.word_encoder(self.word_kg_embedding.weight, self.word_edges)
entity_padding_mask = context_entities.eq(self.pad_entity_idx)
word_padding_mask = context_words.eq(self.pad_word_idx)
entity_representations = entity_graph_representations[context_entities]
word_representations = word_graph_representations[context_words]
entity_attn_rep = self.entity_self_attn(entity_representations, entity_padding_mask)
word_attn_rep = self.word_self_attn(word_representations, word_padding_mask)
tokens_encoding = self.conv_encoder(context_tokens)
conv_entity_emb = self.conv_entity_attn_norm(entity_attn_rep)
conv_word_emb = self.conv_word_attn_norm(word_attn_rep)
conv_entity_reps = self.conv_entity_norm(entity_representations)
conv_word_reps = self.conv_word_norm(word_representations)
if mode != 'test':
<DeepExtract>
(batch_size, seq_len) = response.shape
start = self._starts(batch_size)
inputs = torch.cat((start, response[:, :-1]), dim=-1).long()
(dialog_latent, _) = self.conv_decoder(inputs, tokens_encoding, conv_word_reps, word_padding_mask, conv_entity_reps, entity_padding_mask)
entity_latent = conv_entity_emb.unsqueeze(1).expand(-1, seq_len, -1)
word_latent = conv_word_emb.unsqueeze(1).expand(-1, seq_len, -1)
copy_latent = self.copy_norm(torch.cat((entity_latent, word_latent, dialog_latent), dim=-1))
copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)
gen_logits = F.linear(dialog_latent, self.token_embedding.weight)
sum_logits = copy_logits + gen_logits
preds = sum_logits.argmax(dim=-1)
(logits, preds) = (sum_logits, preds)
</DeepExtract>
logits = logits.view(-1, logits.shape[-1])
response = response.view(-1)
loss = self.conv_loss(logits, response)
return (loss, preds)
else:
<DeepExtract>
batch_size = tokens_encoding[0].shape[0]
inputs = self._starts(batch_size).long()
incr_state = None
logits = []
for _ in range(self.response_truncate):
(dialog_latent, incr_state) = self.conv_decoder(inputs, tokens_encoding, conv_word_reps, word_padding_mask, conv_entity_reps, entity_padding_mask, incr_state)
dialog_latent = dialog_latent[:, -1:, :]
db_latent = conv_entity_emb.unsqueeze(1)
concept_latent = conv_word_emb.unsqueeze(1)
copy_latent = self.copy_norm(torch.cat((db_latent, concept_latent, dialog_latent), dim=-1))
copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)
gen_logits = F.linear(dialog_latent, self.token_embedding.weight)
sum_logits = copy_logits + gen_logits
preds = sum_logits.argmax(dim=-1).long()
logits.append(sum_logits)
inputs = torch.cat((inputs, preds), dim=1)
finished = ((inputs == self.end_token_idx).sum(dim=-1) > 0).sum().item() == batch_size
if finished:
break
logits = torch.cat(logits, dim=1)
(logits, preds) = (logits, inputs)
</DeepExtract>
return preds
|
def converse(self, batch, mode):
(context_tokens, context_entities, context_words, response) = batch
entity_graph_representations = self.entity_encoder(None, self.entity_edge_idx, self.entity_edge_type)
word_graph_representations = self.word_encoder(self.word_kg_embedding.weight, self.word_edges)
entity_padding_mask = context_entities.eq(self.pad_entity_idx)
word_padding_mask = context_words.eq(self.pad_word_idx)
entity_representations = entity_graph_representations[context_entities]
word_representations = word_graph_representations[context_words]
entity_attn_rep = self.entity_self_attn(entity_representations, entity_padding_mask)
word_attn_rep = self.word_self_attn(word_representations, word_padding_mask)
tokens_encoding = self.conv_encoder(context_tokens)
conv_entity_emb = self.conv_entity_attn_norm(entity_attn_rep)
conv_word_emb = self.conv_word_attn_norm(word_attn_rep)
conv_entity_reps = self.conv_entity_norm(entity_representations)
conv_word_reps = self.conv_word_norm(word_representations)
if mode != 'test':
(batch_size, seq_len) = response.shape
start = self._starts(batch_size)
inputs = torch.cat((start, response[:, :-1]), dim=-1).long()
(dialog_latent, _) = self.conv_decoder(inputs, tokens_encoding, conv_word_reps, word_padding_mask, conv_entity_reps, entity_padding_mask)
entity_latent = conv_entity_emb.unsqueeze(1).expand(-1, seq_len, -1)
word_latent = conv_word_emb.unsqueeze(1).expand(-1, seq_len, -1)
copy_latent = self.copy_norm(torch.cat((entity_latent, word_latent, dialog_latent), dim=-1))
copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)
gen_logits = F.linear(dialog_latent, self.token_embedding.weight)
sum_logits = copy_logits + gen_logits
preds = sum_logits.argmax(dim=-1)
(logits, preds) = (sum_logits, preds)
logits = logits.view(-1, logits.shape[-1])
response = response.view(-1)
loss = self.conv_loss(logits, response)
return (loss, preds)
else:
batch_size = tokens_encoding[0].shape[0]
inputs = self._starts(batch_size).long()
incr_state = None
logits = []
for _ in range(self.response_truncate):
(dialog_latent, incr_state) = self.conv_decoder(inputs, tokens_encoding, conv_word_reps, word_padding_mask, conv_entity_reps, entity_padding_mask, incr_state)
dialog_latent = dialog_latent[:, -1:, :]
db_latent = conv_entity_emb.unsqueeze(1)
concept_latent = conv_word_emb.unsqueeze(1)
copy_latent = self.copy_norm(torch.cat((db_latent, concept_latent, dialog_latent), dim=-1))
copy_logits = self.copy_output(copy_latent) * self.copy_mask.unsqueeze(0).unsqueeze(0)
gen_logits = F.linear(dialog_latent, self.token_embedding.weight)
sum_logits = copy_logits + gen_logits
preds = sum_logits.argmax(dim=-1).long()
logits.append(sum_logits)
inputs = torch.cat((inputs, preds), dim=1)
finished = ((inputs == self.end_token_idx).sum(dim=-1) > 0).sum().item() == batch_size
if finished:
break
logits = torch.cat(logits, dim=1)
(logits, preds) = (logits, inputs)
return preds
|
CRSLab
|
positive
|
def retrieve_margin_values(self):
"""
Retrieves margin values and sets them to instance variables.
"""
if self.isolated:
assets = self.get_isolated_margin_account()['assets']
coin = [asset for asset in assets if asset['baseAsset']['asset'] == self.coin_name][0]['baseAsset']
usdt = [asset for asset in assets if asset['baseAsset']['asset'] == self.coin_name and asset['quoteAsset']['asset'] == 'USDT'][0]['quoteAsset']
else:
assets = self.binance_client.get_margin_account()['userAssets']
coin = [asset for asset in assets if asset['asset'] == self.coin_name][0]
usdt = [asset for asset in assets if asset['asset'] == 'USDT'][0]
<DeepExtract>
factor = 10.0 ** self.purchase_precision
self.balance = math.floor(float(float(usdt['free'])) * factor) / factor
</DeepExtract>
<DeepExtract>
factor = 10.0 ** self.purchase_precision
self.coin = math.floor(float(float(coin['free'])) * factor) / factor
</DeepExtract>
<DeepExtract>
factor = 10.0 ** self.purchase_precision
self.coin_owed = math.floor(float(float(coin['borrowed'])) * factor) / factor
</DeepExtract>
|
def retrieve_margin_values(self):
"""
Retrieves margin values and sets them to instance variables.
"""
if self.isolated:
assets = self.get_isolated_margin_account()['assets']
coin = [asset for asset in assets if asset['baseAsset']['asset'] == self.coin_name][0]['baseAsset']
usdt = [asset for asset in assets if asset['baseAsset']['asset'] == self.coin_name and asset['quoteAsset']['asset'] == 'USDT'][0]['quoteAsset']
else:
assets = self.binance_client.get_margin_account()['userAssets']
coin = [asset for asset in assets if asset['asset'] == self.coin_name][0]
usdt = [asset for asset in assets if asset['asset'] == 'USDT'][0]
factor = 10.0 ** self.purchase_precision
self.balance = math.floor(float(float(usdt['free'])) * factor) / factor
factor = 10.0 ** self.purchase_precision
self.coin = math.floor(float(float(coin['free'])) * factor) / factor
factor = 10.0 ** self.purchase_precision
self.coin_owed = math.floor(float(float(coin['borrowed'])) * factor) / factor
</DeepExtract>
|
algobot
|
positive
|
def learn_loop(self, max_total_steps, batch_size=32):
throughput = 0.0
while self.pull_getattr('env.T') < max_total_steps:
t_start = time.time()
<DeepExtract>
assert self.param_store is not None, 'cannot call pull_state on param_store itself'
if isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.set_state(self.__ray.get(self.param_store.get_state.remote()))
else:
self.set_state(self.param_store.get_state())
</DeepExtract>
<DeepExtract>
pass
</DeepExtract>
metrics['throughput/learn_loop'] = throughput
<DeepExtract>
assert self.param_store is not None, 'cannot call push_state on param_store itself'
if isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.__ray.get(self.param_store.set_state.remote(self.get_state()))
else:
self.param_store.set_state(self.get_state())
</DeepExtract>
<DeepExtract>
if self.param_store is None:
self.env.record_metrics(metrics)
elif isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.__ray.get(self.param_store.push_metrics.remote(metrics))
else:
self.param_store.push_metrics(metrics)
</DeepExtract>
throughput = batch_size / (time.time() - t_start)
|
def learn_loop(self, max_total_steps, batch_size=32):
throughput = 0.0
while self.pull_getattr('env.T') < max_total_steps:
t_start = time.time()
assert self.param_store is not None, 'cannot call pull_state on param_store itself'
if isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.set_state(self.__ray.get(self.param_store.get_state.remote()))
else:
self.set_state(self.param_store.get_state())
pass
metrics['throughput/learn_loop'] = throughput
assert self.param_store is not None, 'cannot call push_state on param_store itself'
if isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.__ray.get(self.param_store.set_state.remote(self.get_state()))
else:
self.param_store.set_state(self.get_state())
if self.param_store is None:
self.env.record_metrics(metrics)
elif isinstance(self.param_store, self.__ray.actor.ActorHandle):
self.__ray.get(self.param_store.push_metrics.remote(metrics))
else:
self.param_store.push_metrics(metrics)
throughput = batch_size / (time.time() - t_start)
|
coax
|
positive
|
def is_vertical_coordinate(var_name, var):
"""
Determines if a variable is a vertical coordinate variable
4.3
A vertical coordinate will be identifiable by: units of pressure; or the presence of the positive attribute with a
value of up or down (case insensitive). Optionally, the vertical type may be indicated additionally by providing
the standard_name attribute with an appropriate value, and/or the axis attribute with the value Z.
"""
satisfied = var_name.lower() in _possiblez
satisfied |= getattr(var, 'standard_name', '') in _possiblez
satisfied |= getattr(var, 'axis', '').lower() == 'z'
<DeepExtract>
try:
u1 = Unit(getattr(var, 'units', '1'))
u2 = Unit('dbar')
except ValueError:
is_pressure = False
is_pressure = u1.is_convertible(u2)
</DeepExtract>
satisfied |= is_pressure
if not is_pressure:
satisfied |= getattr(var, 'positive', '').lower() in ('up', 'down')
return satisfied
|
def is_vertical_coordinate(var_name, var):
"""
Determines if a variable is a vertical coordinate variable
4.3
A vertical coordinate will be identifiable by: units of pressure; or the presence of the positive attribute with a
value of up or down (case insensitive). Optionally, the vertical type may be indicated additionally by providing
the standard_name attribute with an appropriate value, and/or the axis attribute with the value Z.
"""
satisfied = var_name.lower() in _possiblez
satisfied |= getattr(var, 'standard_name', '') in _possiblez
satisfied |= getattr(var, 'axis', '').lower() == 'z'
try:
u1 = Unit(getattr(var, 'units', '1'))
u2 = Unit('dbar')
except ValueError:
is_pressure = False
is_pressure = u1.is_convertible(u2)
satisfied |= is_pressure
if not is_pressure:
satisfied |= getattr(var, 'positive', '').lower() in ('up', 'down')
return satisfied
|
compliance-checker
|
positive
|
def save_weights(save_dirs):
import joblib
all_vars = self.sess.run(self.var_list)
all_save_dict = {v.name: value for (v, value) in zip(self.var_list, all_vars)}
trainable_save_dict = {name_encode(v.name, convert=False)[0]: all_save_dict[v.name] for v in self.trainable_var_list}
for save_dir in save_dirs:
all_save_path = os.path.join(save_dir, 'agent{}.weights'.format(self.index))
<DeepExtract>
dirname = os.path.dirname(all_save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
</DeepExtract>
joblib.dump(all_save_dict, all_save_path)
trainable_save_path = os.path.join(save_dir, 'agent{}.trainable-weights'.format(self.index))
<DeepExtract>
dirname = os.path.dirname(trainable_save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
</DeepExtract>
joblib.dump(trainable_save_dict, trainable_save_path)
return trainable_save_dict
|
def save_weights(save_dirs):
import joblib
all_vars = self.sess.run(self.var_list)
all_save_dict = {v.name: value for (v, value) in zip(self.var_list, all_vars)}
trainable_save_dict = {name_encode(v.name, convert=False)[0]: all_save_dict[v.name] for v in self.trainable_var_list}
for save_dir in save_dirs:
all_save_path = os.path.join(save_dir, 'agent{}.weights'.format(self.index))
dirname = os.path.dirname(all_save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(all_save_dict, all_save_path)
trainable_save_path = os.path.join(save_dir, 'agent{}.trainable-weights'.format(self.index))
dirname = os.path.dirname(trainable_save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(trainable_save_dict, trainable_save_path)
return trainable_save_dict
|
epciclr2020
|
positive
|
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [resnet_v1.resnet_v1_block('block1', base_depth=1, num_units=2, stride=2), resnet_v1.resnet_v1_block('block2', base_depth=2, num_units=2, stride=1)]
<DeepExtract>
if None in [2, 32, 16, 3]:
inputs = tf.placeholder(tf.float32, (2, 32, 16, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(32), [32, 1]) + np.reshape(np.arange(16), [1, 16]), [1, 32, 16, 1]), [2, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
with tf.variable_scope('tiny', values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
(_, end_points) = (net, end_points)
</DeepExtract>
expected = ['tiny/block1/unit_1/bottleneck_v1/shortcut', 'tiny/block1/unit_1/bottleneck_v1/conv1', 'tiny/block1/unit_1/bottleneck_v1/conv2', 'tiny/block1/unit_1/bottleneck_v1/conv3', 'tiny/block1/unit_2/bottleneck_v1/conv1', 'tiny/block1/unit_2/bottleneck_v1/conv2', 'tiny/block1/unit_2/bottleneck_v1/conv3', 'tiny/block2/unit_1/bottleneck_v1/shortcut', 'tiny/block2/unit_1/bottleneck_v1/conv1', 'tiny/block2/unit_1/bottleneck_v1/conv2', 'tiny/block2/unit_1/bottleneck_v1/conv3', 'tiny/block2/unit_2/bottleneck_v1/conv1', 'tiny/block2/unit_2/bottleneck_v1/conv2', 'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points.keys())
|
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [resnet_v1.resnet_v1_block('block1', base_depth=1, num_units=2, stride=2), resnet_v1.resnet_v1_block('block2', base_depth=2, num_units=2, stride=1)]
if None in [2, 32, 16, 3]:
inputs = tf.placeholder(tf.float32, (2, 32, 16, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(32), [32, 1]) + np.reshape(np.arange(16), [1, 16]), [1, 32, 16, 1]), [2, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.variable_scope('tiny', values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
(_, end_points) = (net, end_points)
expected = ['tiny/block1/unit_1/bottleneck_v1/shortcut', 'tiny/block1/unit_1/bottleneck_v1/conv1', 'tiny/block1/unit_1/bottleneck_v1/conv2', 'tiny/block1/unit_1/bottleneck_v1/conv3', 'tiny/block1/unit_2/bottleneck_v1/conv1', 'tiny/block1/unit_2/bottleneck_v1/conv2', 'tiny/block1/unit_2/bottleneck_v1/conv3', 'tiny/block2/unit_1/bottleneck_v1/shortcut', 'tiny/block2/unit_1/bottleneck_v1/conv1', 'tiny/block2/unit_1/bottleneck_v1/conv2', 'tiny/block2/unit_1/bottleneck_v1/conv3', 'tiny/block2/unit_2/bottleneck_v1/conv1', 'tiny/block2/unit_2/bottleneck_v1/conv2', 'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points.keys())
|
CVTron
|
positive
|
def set_annealer(self, func_type, annealer):
"""
Common interface for attaching a function to this Scheduler.
:param func_type: str, must be in self.allowed_func_types
:param annealer: Annealer, must have a get_value method
:return: None
"""
<DeepExtract>
allowed_func_types = frozenset(['lr', 'rew_shape', 'noise'])
if func_type not in allowed_func_types:
raise KeyError('func_type not in allowed_func_types')
</DeepExtract>
if not isinstance(annealer, Annealer):
raise TypeError('set_annealer_and_func requires an Annealer as input')
self.annealer_dict[func_type] = annealer
|
def set_annealer(self, func_type, annealer):
"""
Common interface for attaching a function to this Scheduler.
:param func_type: str, must be in self.allowed_func_types
:param annealer: Annealer, must have a get_value method
:return: None
"""
allowed_func_types = frozenset(['lr', 'rew_shape', 'noise'])
if func_type not in allowed_func_types:
raise KeyError('func_type not in allowed_func_types')
if not isinstance(annealer, Annealer):
raise TypeError('set_annealer_and_func requires an Annealer as input')
self.annealer_dict[func_type] = annealer
|
adversarial-policies
|
positive
|
def _size(query_params: 'QueryParams', post_processing: List['PostProcessingAction']) -> Optional[int]:
size = query_params.size
<DeepExtract>
size = None
for action in post_processing:
if isinstance(action, SizeTask):
if size is None or action.size() < size:
size = action.size()
pp_size = size
</DeepExtract>
if pp_size is not None:
if size is not None:
size = min(size, pp_size)
else:
size = pp_size
return size
|
def _size(query_params: 'QueryParams', post_processing: List['PostProcessingAction']) -> Optional[int]:
size = query_params.size
size = None
for action in post_processing:
if isinstance(action, SizeTask):
if size is None or action.size() < size:
size = action.size()
pp_size = size
if pp_size is not None:
if size is not None:
size = min(size, pp_size)
else:
size = pp_size
return size
|
eland
|
positive
|
@pytest.fixture(scope='module')
def cleanup_jobs():
yield
if not JOBIDS:
return
for job in JOBIDS:
<DeepExtract>
try:
subprocess.check_output(['/opt/pbs/bin/qdel', job], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
if b'Job has finished' not in exc.output:
print(f'Failed to stop {job}, output: {exc.output.decode()}')
</DeepExtract>
print('-- Stopped %d lost clusters --' % len(JOBIDS))
|
@pytest.fixture(scope='module')
def cleanup_jobs():
yield
if not JOBIDS:
return
for job in JOBIDS:
try:
subprocess.check_output(['/opt/pbs/bin/qdel', job], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
if b'Job has finished' not in exc.output:
print(f'Failed to stop {job}, output: {exc.output.decode()}')
print('-- Stopped %d lost clusters --' % len(JOBIDS))
|
dask-gateway
|
positive
|
def visit_Repr(self, node):
<DeepExtract>
if len('`') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '`'
</DeepExtract>
<DeepExtract>
if node.value is None:
return None
if isinstance(node.value, tuple):
return tuple([self.visit(n) for n in node.value])
try:
self.blame_stack.append((node.value.lineno, node.value.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.value.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.value.__class__.__name__, repr(node.value)))
ret = visitor(node.value)
if info:
self.blame_stack.pop()
return ret
</DeepExtract>
<DeepExtract>
if len('`') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '`'
</DeepExtract>
|
def visit_Repr(self, node):
if len('`') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '`'
if node.value is None:
return None
if isinstance(node.value, tuple):
return tuple([self.visit(n) for n in node.value])
try:
self.blame_stack.append((node.value.lineno, node.value.col_offset))
info = True
except AttributeError:
info = False
visitor = getattr(self, 'visit_%s' % node.value.__class__.__name__, None)
if visitor is None:
raise Exception('No handler for ``{}`` ({}).'.format(node.value.__class__.__name__, repr(node.value)))
ret = visitor(node.value)
if info:
self.blame_stack.pop()
return ret
if len('`') == 0:
return
if len(self.blame_stack) == 0:
if self.last is not None:
self.last = None
self.line_info.append((len(self.line), self.last))
elif self.last != self.blame_stack[-1]:
self.last = self.blame_stack[-1]
self.line_info.append((len(self.line), self.last))
self.line += '`'
</DeepExtract>
|
chameleon
|
positive
|
def reset_parameters(self):
<DeepExtract>
self.running_mean.zero_()
self.running_var.fill_(1)
</DeepExtract>
init.uniform_(self.weight)
init.zeros_(self.bias)
|
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.fill_(1)
init.uniform_(self.weight)
init.zeros_(self.bias)
|
CAG_UDA
|
positive
|
def get_similarity(sig1, sig2):
"""
Obtains a measure of the similarity between two multi-lead signals, as the
mean of the cross-correlation maximum value for each lead.
"""
cleads = set(sig1.keys()).intersection(set(sig2.keys()))
corrs = []
for lead in set(sig1.keys()).union(set(sig2.keys())):
if lead not in cleads:
corrs.append(0.0)
else:
(arr1, arr2) = (sig1[lead].sig, sig2[lead].sig)
if len(arr2) > len(arr1):
(arr1, arr2) = (arr2, arr1)
<DeepExtract>
tr1 = arr1 - arr1[0] if arr1[0] != 0 else arr1
tr2 = arr2 - arr2[0] if arr2[0] != 0 else arr2
corr = np.correlate(tr1, tr2, mode='full')
if np.any(tr1) and np.any(tr2):
corr /= np.sqrt(np.dot(tr1, tr1) * np.dot(tr2, tr2))
idx = np.argmax(corr)
(corr, _) = (corr[idx], idx - len(tr2) + 1)
</DeepExtract>
corrs.append(corr)
return np.mean(corrs)
|
def get_similarity(sig1, sig2):
"""
Obtains a measure of the similarity between two multi-lead signals, as the
mean of the cross-correlation maximum value for each lead.
"""
cleads = set(sig1.keys()).intersection(set(sig2.keys()))
corrs = []
for lead in set(sig1.keys()).union(set(sig2.keys())):
if lead not in cleads:
corrs.append(0.0)
else:
(arr1, arr2) = (sig1[lead].sig, sig2[lead].sig)
if len(arr2) > len(arr1):
(arr1, arr2) = (arr2, arr1)
tr1 = arr1 - arr1[0] if arr1[0] != 0 else arr1
tr2 = arr2 - arr2[0] if arr2[0] != 0 else arr2
corr = np.correlate(tr1, tr2, mode='full')
if np.any(tr1) and np.any(tr2):
corr /= np.sqrt(np.dot(tr1, tr1) * np.dot(tr2, tr2))
idx = np.argmax(corr)
(corr, _) = (corr[idx], idx - len(tr2) + 1)
corrs.append(corr)
return np.mean(corrs)
|
construe
|
positive
|
@dxpy.entry_point('main')
def main(input_bam, paired_end, spp_version):
input_bam_file = dxpy.DXFile(input_bam)
input_bam_filename = input_bam_file.name
input_bam_basename = input_bam_file.name.rstrip('.bam')
dxpy.download_dxfile(input_bam_file.get_id(), input_bam_filename)
intermediate_TA_filename = input_bam_basename + '.tagAlign'
if paired_end:
end_infix = 'PE2SE'
else:
end_infix = 'SE'
final_TA_filename = input_bam_basename + '.' + end_infix + '.tagAlign.gz'
(out, err) = common.run_pipe(['bamToBed -i %s' % input_bam_filename, 'awk \'BEGIN{OFS="\\t"}{$4="N";$5="1000";print $0}\'', 'tee %s' % intermediate_TA_filename, 'gzip -cn'], outfile=final_TA_filename)
if paired_end:
final_BEDPE_filename = input_bam_basename + '.bedpe.gz'
final_nmsrt_bam_prefix = input_bam_basename + '.nmsrt'
final_nmsrt_bam_filename = final_nmsrt_bam_prefix + '.bam'
samtools_sort_command = 'samtools sort -n %s %s' % (input_bam_filename, final_nmsrt_bam_prefix)
logger.info(samtools_sort_command)
subprocess.check_output(shlex.split(samtools_sort_command))
(out, err) = common.run_pipe(['bamToBed -bedpe -mate1 -i %s' % final_nmsrt_bam_filename, 'gzip -cn'], outfile=final_BEDPE_filename)
logger.info('Intermediate tA md5: %s' % common.md5(intermediate_TA_filename))
NREADS = 15000000
if paired_end:
end_infix = 'MATE1'
else:
end_infix = 'SE'
subsampled_TA_filename = input_bam_basename + '.filt.nodup.sample.%d.%s.tagAlign.gz' % (NREADS / 1000000, end_infix)
steps = ['grep -v "chrM" %s' % intermediate_TA_filename, 'shuf -n %d --random-source=%s' % (NREADS, intermediate_TA_filename)]
if paired_end:
steps.extend(['awk \'BEGIN{OFS="\\t"}{$4="N";$5="1000";print $0}\''])
steps.extend(['gzip -cn'])
(out, err) = common.run_pipe(steps, outfile=subsampled_TA_filename)
logger.info('Subsampled tA md5: %s' % common.md5(subsampled_TA_filename))
CC_scores_filename = subsampled_TA_filename + '.cc.qc'
CC_plot_filename = subsampled_TA_filename + '.cc.plot.pdf'
run_spp_command = '/phantompeakqualtools/run_spp.R'
(out, err) = common.run_pipe(['Rscript %s -c=%s -p=%d -filtchr=chrM -savp=%s -out=%s' % (run_spp_command, subsampled_TA_filename, cpu_count(), CC_plot_filename, CC_scores_filename)])
(out, err) = common.run_pipe(["sed -r 's/,[^\\t]+//g' %s" % CC_scores_filename], outfile='temp')
(out, err) = common.run_pipe(['mv temp %s' % CC_scores_filename])
tagAlign_file = dxpy.upload_local_file(final_TA_filename)
if paired_end:
BEDPE_file = dxpy.upload_local_file(final_BEDPE_filename)
CC_scores_file = dxpy.upload_local_file(CC_scores_filename)
CC_plot_file = dxpy.upload_local_file(CC_plot_filename)
<DeepExtract>
with open(CC_scores_filename, 'r') as xcor_file:
if not xcor_file:
xcor_qc = None
lines = xcor_file.read().splitlines()
line = lines[0].rstrip('\n')
headers = ['Filename', 'numReads', 'estFragLen', 'corr_estFragLen', 'PhantomPeak', 'corr_phantomPeak', 'argmin_corr', 'min_corr', 'phantomPeakCoef', 'relPhantomPeakCoef', 'QualityTag']
metrics = line.split('\t')
headers.pop(0)
metrics.pop(0)
xcor_qc = dict(zip(headers, metrics))
xcor_qc = xcor_qc
</DeepExtract>
output = {'tagAlign_file': dxpy.dxlink(tagAlign_file), 'CC_scores_file': dxpy.dxlink(CC_scores_file), 'CC_plot_file': dxpy.dxlink(CC_plot_file), 'paired_end': paired_end, 'RSC': float(xcor_qc.get('relPhantomPeakCoef')), 'NSC': float(xcor_qc.get('phantomPeakCoef')), 'est_frag_len': float(xcor_qc.get('estFragLen'))}
if paired_end:
output.update({'BEDPE_file': dxpy.dxlink(BEDPE_file)})
return output
|
@dxpy.entry_point('main')
def main(input_bam, paired_end, spp_version):
input_bam_file = dxpy.DXFile(input_bam)
input_bam_filename = input_bam_file.name
input_bam_basename = input_bam_file.name.rstrip('.bam')
dxpy.download_dxfile(input_bam_file.get_id(), input_bam_filename)
intermediate_TA_filename = input_bam_basename + '.tagAlign'
if paired_end:
end_infix = 'PE2SE'
else:
end_infix = 'SE'
final_TA_filename = input_bam_basename + '.' + end_infix + '.tagAlign.gz'
(out, err) = common.run_pipe(['bamToBed -i %s' % input_bam_filename, 'awk \'BEGIN{OFS="\\t"}{$4="N";$5="1000";print $0}\'', 'tee %s' % intermediate_TA_filename, 'gzip -cn'], outfile=final_TA_filename)
if paired_end:
final_BEDPE_filename = input_bam_basename + '.bedpe.gz'
final_nmsrt_bam_prefix = input_bam_basename + '.nmsrt'
final_nmsrt_bam_filename = final_nmsrt_bam_prefix + '.bam'
samtools_sort_command = 'samtools sort -n %s %s' % (input_bam_filename, final_nmsrt_bam_prefix)
logger.info(samtools_sort_command)
subprocess.check_output(shlex.split(samtools_sort_command))
(out, err) = common.run_pipe(['bamToBed -bedpe -mate1 -i %s' % final_nmsrt_bam_filename, 'gzip -cn'], outfile=final_BEDPE_filename)
logger.info('Intermediate tA md5: %s' % common.md5(intermediate_TA_filename))
NREADS = 15000000
if paired_end:
end_infix = 'MATE1'
else:
end_infix = 'SE'
subsampled_TA_filename = input_bam_basename + '.filt.nodup.sample.%d.%s.tagAlign.gz' % (NREADS / 1000000, end_infix)
steps = ['grep -v "chrM" %s' % intermediate_TA_filename, 'shuf -n %d --random-source=%s' % (NREADS, intermediate_TA_filename)]
if paired_end:
steps.extend(['awk \'BEGIN{OFS="\\t"}{$4="N";$5="1000";print $0}\''])
steps.extend(['gzip -cn'])
(out, err) = common.run_pipe(steps, outfile=subsampled_TA_filename)
logger.info('Subsampled tA md5: %s' % common.md5(subsampled_TA_filename))
CC_scores_filename = subsampled_TA_filename + '.cc.qc'
CC_plot_filename = subsampled_TA_filename + '.cc.plot.pdf'
run_spp_command = '/phantompeakqualtools/run_spp.R'
(out, err) = common.run_pipe(['Rscript %s -c=%s -p=%d -filtchr=chrM -savp=%s -out=%s' % (run_spp_command, subsampled_TA_filename, cpu_count(), CC_plot_filename, CC_scores_filename)])
(out, err) = common.run_pipe(["sed -r 's/,[^\\t]+//g' %s" % CC_scores_filename], outfile='temp')
(out, err) = common.run_pipe(['mv temp %s' % CC_scores_filename])
tagAlign_file = dxpy.upload_local_file(final_TA_filename)
if paired_end:
BEDPE_file = dxpy.upload_local_file(final_BEDPE_filename)
CC_scores_file = dxpy.upload_local_file(CC_scores_filename)
CC_plot_file = dxpy.upload_local_file(CC_plot_filename)
with open(CC_scores_filename, 'r') as xcor_file:
if not xcor_file:
xcor_qc = None
lines = xcor_file.read().splitlines()
line = lines[0].rstrip('\n')
headers = ['Filename', 'numReads', 'estFragLen', 'corr_estFragLen', 'PhantomPeak', 'corr_phantomPeak', 'argmin_corr', 'min_corr', 'phantomPeakCoef', 'relPhantomPeakCoef', 'QualityTag']
metrics = line.split('\t')
headers.pop(0)
metrics.pop(0)
xcor_qc = dict(zip(headers, metrics))
xcor_qc = xcor_qc
output = {'tagAlign_file': dxpy.dxlink(tagAlign_file), 'CC_scores_file': dxpy.dxlink(CC_scores_file), 'CC_plot_file': dxpy.dxlink(CC_plot_file), 'paired_end': paired_end, 'RSC': float(xcor_qc.get('relPhantomPeakCoef')), 'NSC': float(xcor_qc.get('phantomPeakCoef')), 'est_frag_len': float(xcor_qc.get('estFragLen'))}
if paired_end:
output.update({'BEDPE_file': dxpy.dxlink(BEDPE_file)})
return output
|
chip-seq-pipeline
|
positive
|
def next_stim(self):
"""
Compute and return the next stimulus
If we are doing correction trials, compute that.
Same thing with bias correction.
Otherwise, randomly select a stimulus to present.
Returns:
('L'/'R' Target, 'L'/'R' distractor, Stimulus to present)
"""
if self.correction:
<DeepExtract>
if self.target is None:
self.correction_trial = False
if (self.correction_trial or self.last_was_correction) and (not self.correct):
self.correction_trial = True
elif (self.correction_trial or self.last_was_correction) and self.correct:
self.last_was_correction = False
self.correction_trial = False
elif np.random.rand() < self.correction_pct:
self.last_was_correction = True
self.correction_trial = False
else:
self.correction_trial = False
</DeepExtract>
if self.correction_trial:
return (self.target, self.distractor, self.last_stim)
if self.bias:
threshold = self.bias.next_bias()
else:
threshold = 0.5
if np.random.rand() < threshold:
self.target = 'L'
else:
self.target = 'R'
if self.target == 'L':
self.distractor = 'R'
elif self.target == 'R':
self.distractor = 'L'
self.last_stim = np.random.choice(self.stimuli[self.target])
return (self.target, self.distractor, self.last_stim)
|
def next_stim(self):
"""
Compute and return the next stimulus
If we are doing correction trials, compute that.
Same thing with bias correction.
Otherwise, randomly select a stimulus to present.
Returns:
('L'/'R' Target, 'L'/'R' distractor, Stimulus to present)
"""
if self.correction:
if self.target is None:
self.correction_trial = False
if (self.correction_trial or self.last_was_correction) and (not self.correct):
self.correction_trial = True
elif (self.correction_trial or self.last_was_correction) and self.correct:
self.last_was_correction = False
self.correction_trial = False
elif np.random.rand() < self.correction_pct:
self.last_was_correction = True
self.correction_trial = False
else:
self.correction_trial = False
if self.correction_trial:
return (self.target, self.distractor, self.last_stim)
if self.bias:
threshold = self.bias.next_bias()
else:
threshold = 0.5
if np.random.rand() < threshold:
self.target = 'L'
else:
self.target = 'R'
if self.target == 'L':
self.distractor = 'R'
elif self.target == 'R':
self.distractor = 'L'
self.last_stim = np.random.choice(self.stimuli[self.target])
return (self.target, self.distractor, self.last_stim)
|
autopilot
|
positive
|
def _update_cov_mat(self, rng_t, npc=None):
"""
Updates the covariance matrix.
Parameters
----------
seed_t: numpy.ndarray
2 dimensional array. The first entry defines the initial seed of the random number generator.
The second entry defines the way in which the accepted covariance matrix is transformed.
Returns
-------
numpy.ndarray
accepted covariance matrix
"""
rng = rng_t[0]
t = rng_t[1]
rng.seed(rng.randint(np.iinfo(np.uint32).max, dtype=np.uint32))
acceptance = 0
accepted_cov_mats_transformed = [cov_mat * pow(2.0, -2.0 * t) for cov_mat in self.accepted_parameters_manager.accepted_cov_mats_bds.value()]
theta = self.accepted_parameters_manager.accepted_parameters_bds.value()[0]
(mapping_for_kernels, garbage_index) = self.accepted_parameters_manager.get_mapping(self.accepted_parameters_manager.model)
counter = 0
for ind in range(0, self.chain_length):
self.logger.debug('Parameter acceptance loop step {}.'.format(ind))
while True:
<DeepExtract>
current_epoch = 0
if accepted_parameters_manager is None:
accepted_parameters_manager = self.accepted_parameters_manager
while current_epoch < epochs:
new_parameters = self.kernel.update(accepted_parameters_manager, 0, rng=rng)
self._reset_flags()
correctly_ordered_parameters = self.get_correct_ordering(new_parameters)
(accepted, last_index) = self.set_parameters(correctly_ordered_parameters, 0)
if accepted:
break
current_epoch += 1
if current_epoch == 10:
perturbation_output = [False]
perturbation_output = [True, correctly_ordered_parameters]
</DeepExtract>
if perturbation_output[0] and self.pdf_of_prior(self.model, perturbation_output[1]) != 0:
break
y_sim = self.simulate(self.n_samples_per_param, rng=rng, npc=npc)
counter += 1
new_distance = self.distance.distance(self.accepted_parameters_manager.observations_bds.value(), y_sim)
self.logger.debug('Calculate acceptance probability.')
ratio_prior_prob = self.pdf_of_prior(self.model, perturbation_output[1]) / self.pdf_of_prior(self.model, theta)
kernel_numerator = self.kernel.pdf(mapping_for_kernels, self.accepted_parameters_manager, perturbation_output[1], theta)
kernel_denominator = self.kernel.pdf(mapping_for_kernels, self.accepted_parameters_manager, theta, perturbation_output[1])
ratio_likelihood_prob = kernel_numerator / kernel_denominator
acceptance_prob = min(1, ratio_prior_prob * ratio_likelihood_prob) * (new_distance < self.anneal_parameter)
if rng.binomial(1, acceptance_prob) == 1:
theta = perturbation_output[1]
acceptance = acceptance + 1
self.logger.debug('Return accepted parameters.')
if acceptance / 10 <= 0.5 and acceptance / 10 >= 0.3:
return (accepted_cov_mats_transformed, t, 1, counter)
else:
return (accepted_cov_mats_transformed, t, 0, counter)
|
def _update_cov_mat(self, rng_t, npc=None):
"""
Updates the covariance matrix.
Parameters
----------
seed_t: numpy.ndarray
2 dimensional array. The first entry defines the initial seed of the random number generator.
The second entry defines the way in which the accepted covariance matrix is transformed.
Returns
-------
numpy.ndarray
accepted covariance matrix
"""
rng = rng_t[0]
t = rng_t[1]
rng.seed(rng.randint(np.iinfo(np.uint32).max, dtype=np.uint32))
acceptance = 0
accepted_cov_mats_transformed = [cov_mat * pow(2.0, -2.0 * t) for cov_mat in self.accepted_parameters_manager.accepted_cov_mats_bds.value()]
theta = self.accepted_parameters_manager.accepted_parameters_bds.value()[0]
(mapping_for_kernels, garbage_index) = self.accepted_parameters_manager.get_mapping(self.accepted_parameters_manager.model)
counter = 0
for ind in range(0, self.chain_length):
self.logger.debug('Parameter acceptance loop step {}.'.format(ind))
while True:
current_epoch = 0
if accepted_parameters_manager is None:
accepted_parameters_manager = self.accepted_parameters_manager
while current_epoch < epochs:
new_parameters = self.kernel.update(accepted_parameters_manager, 0, rng=rng)
self._reset_flags()
correctly_ordered_parameters = self.get_correct_ordering(new_parameters)
(accepted, last_index) = self.set_parameters(correctly_ordered_parameters, 0)
if accepted:
break
current_epoch += 1
if current_epoch == 10:
perturbation_output = [False]
perturbation_output = [True, correctly_ordered_parameters]
if perturbation_output[0] and self.pdf_of_prior(self.model, perturbation_output[1]) != 0:
break
y_sim = self.simulate(self.n_samples_per_param, rng=rng, npc=npc)
counter += 1
new_distance = self.distance.distance(self.accepted_parameters_manager.observations_bds.value(), y_sim)
self.logger.debug('Calculate acceptance probability.')
ratio_prior_prob = self.pdf_of_prior(self.model, perturbation_output[1]) / self.pdf_of_prior(self.model, theta)
kernel_numerator = self.kernel.pdf(mapping_for_kernels, self.accepted_parameters_manager, perturbation_output[1], theta)
kernel_denominator = self.kernel.pdf(mapping_for_kernels, self.accepted_parameters_manager, theta, perturbation_output[1])
ratio_likelihood_prob = kernel_numerator / kernel_denominator
acceptance_prob = min(1, ratio_prior_prob * ratio_likelihood_prob) * (new_distance < self.anneal_parameter)
if rng.binomial(1, acceptance_prob) == 1:
theta = perturbation_output[1]
acceptance = acceptance + 1
self.logger.debug('Return accepted parameters.')
if acceptance / 10 <= 0.5 and acceptance / 10 >= 0.3:
return (accepted_cov_mats_transformed, t, 1, counter)
else:
return (accepted_cov_mats_transformed, t, 0, counter)
|
abcpy
|
positive
|
def testClassificationShapes(self):
global_pool = True
num_classes = 10
<DeepExtract>
if None in [2, 224, 224, 3]:
inputs = tf.placeholder(tf.float32, (2, 224, 224, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(224), [224, 1]) + np.reshape(np.arange(224), [1, 224]), [1, 224, 224, 1]), [2, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(_, end_points) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope='resnet')
</DeepExtract>
endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
def testClassificationShapes(self):
global_pool = True
num_classes = 10
if None in [2, 224, 224, 3]:
inputs = tf.placeholder(tf.float32, (2, 224, 224, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(224), [224, 1]) + np.reshape(np.arange(224), [1, 224]), [1, 224, 224, 1]), [2, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
(_, end_points) = resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
|
CSL_RetinaNet_Tensorflow
|
positive
|
def get_all(self):
<DeepExtract>
self.start = self.pdict.get('start')
</DeepExtract>
<DeepExtract>
self.error_func = self.pdict.get('p_error')
</DeepExtract>
<DeepExtract>
tokens = self.pdict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = 1
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = 1
return
if not tokens:
self.log.error('tokens is empty')
self.error = 1
return
self.tokens = tokens
</DeepExtract>
<DeepExtract>
self.prec = self.pdict.get('precedence', None)
</DeepExtract>
<DeepExtract>
p_functions = []
for (name, item) in self.pdict.items():
if name[:2] != 'p_':
continue
if name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line, file, name, item.__doc__))
p_functions.sort()
self.pfuncs = p_functions
</DeepExtract>
|
def get_all(self):
self.start = self.pdict.get('start')
self.error_func = self.pdict.get('p_error')
tokens = self.pdict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = 1
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = 1
return
if not tokens:
self.log.error('tokens is empty')
self.error = 1
return
self.tokens = tokens
self.prec = self.pdict.get('precedence', None)
p_functions = []
for (name, item) in self.pdict.items():
if name[:2] != 'p_':
continue
if name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line, file, name, item.__doc__))
p_functions.sort()
self.pfuncs = p_functions
</DeepExtract>
|
asp
|
positive
|
def test_uptime_emails(self):
<DeepExtract>
alerts_history_model.collection.remove()
alerts_model.collection.remove()
mail.outbox = []
</DeepExtract>
uptime_alert = {'above_below': 'above', 'rule_type': 'uptime', 'server': self.server_id, 'process': self.process_id, 'account_id': self.account_id, 'period': 0, 'notifications': self.notifications_list}
down_alert = {**uptime_alert, 'metric': 'Down', 'metric_value': 0}
alerts_model.collection.insert(down_alert)
data = {'data': []}
uptime_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent(server_id=self.server_id)
eq_(unsent_alerts['data'].count(), 1)
notifications = generate_notifications()
for n in notifications:
send_notification_email(notification=n, emails=self.emails)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Server: test / testprocess is Down')
self.assertEqual(mail.outbox[0].to, ['foo@test.com'])
<DeepExtract>
alerts_history_model.collection.remove()
alerts_model.collection.remove()
mail.outbox = []
</DeepExtract>
|
def test_uptime_emails(self):
alerts_history_model.collection.remove()
alerts_model.collection.remove()
mail.outbox = []
uptime_alert = {'above_below': 'above', 'rule_type': 'uptime', 'server': self.server_id, 'process': self.process_id, 'account_id': self.account_id, 'period': 0, 'notifications': self.notifications_list}
down_alert = {**uptime_alert, 'metric': 'Down', 'metric_value': 0}
alerts_model.collection.insert(down_alert)
data = {'data': []}
uptime_alerter.check(data, self.server)
unsent_alerts = alerts_history_model.get_unsent(server_id=self.server_id)
eq_(unsent_alerts['data'].count(), 1)
notifications = generate_notifications()
for n in notifications:
send_notification_email(notification=n, emails=self.emails)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Server: test / testprocess is Down')
self.assertEqual(mail.outbox[0].to, ['foo@test.com'])
alerts_history_model.collection.remove()
alerts_model.collection.remove()
mail.outbox = []
</DeepExtract>
|
amon
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default=None, type=str, required=True, help='SQuAD json for training. E.g., train-v1.1.json')
parser.add_argument('--predict_file', default=None, type=str, required=True, help='SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.')
parser.add_argument('--max_seq_length', default=384, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
parser.add_argument('--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.')
parser.add_argument('--max_query_length', default=64, type=int, help='The maximum number of tokens for the question. Questions longer than this will be truncated to this length.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Rul evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.')
parser.add_argument('--max_answer_length', default=30, type=int, help='The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.')
parser.add_argument('--verbose_logging', action='store_true', help='If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal SQuAD evaluation.')
parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=50, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--debug_mode', action='store_true', help='whether to use debug mode')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
<DeepExtract>
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
</DeepExtract>
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
<DeepExtract>
input_file = args.predict_file if False else args.train_file
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format('dev' if False else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length)))
if os.path.exists(cached_features_file) and (not args.overwrite_cache) and (not False):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', input_file)
examples = read_squad_examples(input_file=input_file, is_training=not False, version_2_with_negative=args.version_2_with_negative)
if args.debug_mode:
examples = examples[:10]
features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not False)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if False:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask)
if False:
train_dataset = (dataset, examples, features)
train_dataset = dataset
</DeepExtract>
<DeepExtract>
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'start_positions': batch[3], 'end_positions': batch[4]}
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[5], 'p_mask': batch[6]})
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step()
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
</DeepExtract>
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('pytorch_transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
<DeepExtract>
(dataset, examples, features) = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(global_step))
logger.info(' Num examples = %d', len(dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': None if args.model_type == 'xlm' else batch[2]}
example_indices = batch[3]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[4], 'p_mask': batch[5]})
outputs = model(**inputs)
for (i, example_index) in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
if args.model_type in ['xlnet', 'xlm']:
result = RawResultExtended(unique_id=unique_id, start_top_log_probs=to_list(outputs[0][i]), start_top_index=to_list(outputs[1][i]), end_top_log_probs=to_list(outputs[2][i]), end_top_index=to_list(outputs[3][i]), cls_logits=to_list(outputs[4][i]))
else:
result = RawResult(unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]))
all_results.append(result)
output_prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(global_step))
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions_{}.json'.format(global_step))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds_{}.json'.format(global_step))
else:
output_null_log_odds_file = None
if args.model_type in ['xlnet', 'xlm']:
write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging)
else:
write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold)
evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file)
(results, exact_raw, f1_raw) = evaluate_on_squad(evaluate_options)
fout = open(args.output_dir + 'evaluation_metrics_per_example.txt', 'w')
fout.write('example_id\texact_match\tf1\n')
for e_id in exact_raw:
fout.write(str(e_id) + '\t' + str(exact_raw[e_id]) + '\t' + str(f1_raw[e_id]) + '\n')
fout.close()
(result, exact_raw, f1_raw) = (results, exact_raw, f1_raw)
</DeepExtract>
result = dict(((k + ('_{}'.format(global_step) if global_step else ''), v) for (k, v) in result.items()))
results.update(result)
logger.info('Results: {}'.format(results))
return results
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default=None, type=str, required=True, help='SQuAD json for training. E.g., train-v1.1.json')
parser.add_argument('--predict_file', default=None, type=str, required=True, help='SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.')
parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.')
parser.add_argument('--max_seq_length', default=384, type=int, help='The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.')
parser.add_argument('--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.')
parser.add_argument('--max_query_length', default=64, type=int, help='The maximum number of tokens for the question. Questions longer than this will be truncated to this length.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Rul evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.')
parser.add_argument('--max_answer_length', default=30, type=int, help='The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.')
parser.add_argument('--verbose_logging', action='store_true', help='If true, all of the warnings related to data processing will be printed. A number of warnings are expected for a normal SQuAD evaluation.')
parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=50, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--local_rank', type=int, default=-1, help='local_rank for distributed training on gpus')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--debug_mode', action='store_true', help='whether to use debug mode')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
input_file = args.predict_file if False else args.train_file
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format('dev' if False else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length)))
if os.path.exists(cached_features_file) and (not args.overwrite_cache) and (not False):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', input_file)
examples = read_squad_examples(input_file=input_file, is_training=not False, version_2_with_negative=args.version_2_with_negative)
if args.debug_mode:
examples = examples[:10]
features = convert_examples_to_features(examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not False)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
if False:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask)
if False:
train_dataset = (dataset, examples, features)
train_dataset = dataset
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': None if args.model_type == 'xlm' else batch[2], 'start_positions': batch[3], 'end_positions': batch[4]}
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[5], 'p_mask': batch[6]})
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step()
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('pytorch_transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
(dataset, examples, features) = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(global_step))
logger.info(' Num examples = %d', len(dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
all_results = []
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': None if args.model_type == 'xlm' else batch[2]}
example_indices = batch[3]
if args.model_type in ['xlnet', 'xlm']:
inputs.update({'cls_index': batch[4], 'p_mask': batch[5]})
outputs = model(**inputs)
for (i, example_index) in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
if args.model_type in ['xlnet', 'xlm']:
result = RawResultExtended(unique_id=unique_id, start_top_log_probs=to_list(outputs[0][i]), start_top_index=to_list(outputs[1][i]), end_top_log_probs=to_list(outputs[2][i]), end_top_index=to_list(outputs[3][i]), cls_logits=to_list(outputs[4][i]))
else:
result = RawResult(unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]))
all_results.append(result)
output_prediction_file = os.path.join(args.output_dir, 'predictions_{}.json'.format(global_step))
output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions_{}.json'.format(global_step))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds_{}.json'.format(global_step))
else:
output_null_log_odds_file = None
if args.model_type in ['xlnet', 'xlm']:
write_predictions_extended(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.predict_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging)
else:
write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold)
evaluate_options = EVAL_OPTS(data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file)
(results, exact_raw, f1_raw) = evaluate_on_squad(evaluate_options)
fout = open(args.output_dir + 'evaluation_metrics_per_example.txt', 'w')
fout.write('example_id\texact_match\tf1\n')
for e_id in exact_raw:
fout.write(str(e_id) + '\t' + str(exact_raw[e_id]) + '\t' + str(f1_raw[e_id]) + '\n')
fout.close()
(result, exact_raw, f1_raw) = (results, exact_raw, f1_raw)
result = dict(((k + ('_{}'.format(global_step) if global_step else ''), v) for (k, v) in result.items()))
results.update(result)
logger.info('Results: {}'.format(results))
return results
|
ACS-QG
|
positive
|
def remove_data(self, nodename, group=None):
<DeepExtract>
if not group:
in_file_path = '/' + nodename
else:
in_file_path = group + '/' + nodename
</DeepExtract>
del self[in_file_path]
|
def remove_data(self, nodename, group=None):
if not group:
in_file_path = '/' + nodename
else:
in_file_path = group + '/' + nodename
del self[in_file_path]
|
acoular
|
positive
|
def build_nasnet_cifar(images, num_classes, is_training=True, config=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
<DeepExtract>
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
</DeepExtract>
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format):
return _build_nasnet_base(images, normal_cell=normal_cell, reduction_cell=reduction_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, stem_type='cifar')
|
def build_nasnet_cifar(images, num_classes, is_training=True, config=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format):
return _build_nasnet_base(images, normal_cell=normal_cell, reduction_cell=reduction_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, stem_type='cifar')
|
CBAM-tensorflow-slim
|
positive
|
def add_scalar(self, name, value):
<DeepExtract>
assert isinstance(name, str) and name
if re.search('[^a-z0-9/_-]+', name):
message = "Invalid metric name '{}'. Names must contain only lower case letters, digits, dashes, underscores, and forward slashes."
raise NameError(message.format(name))
</DeepExtract>
record = self._tags.copy()
record['value'] = float(value)
self._records[name].append(record)
|
def add_scalar(self, name, value):
assert isinstance(name, str) and name
if re.search('[^a-z0-9/_-]+', name):
message = "Invalid metric name '{}'. Names must contain only lower case letters, digits, dashes, underscores, and forward slashes."
raise NameError(message.format(name))
record = self._tags.copy()
record['value'] = float(value)
self._records[name].append(record)
|
dreamer
|
positive
|
def main():
if sys.version_info >= (3, 6):
if not multiprocessing.get_start_method(allow_none=True):
multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--config-file', default='projects.ini', help='Config file. Default: projects.ini')
parser.add_argument('--config-override', action='append', help='Override a configuration option from the config file. Specify it as: section.option=value. Can be used multiple times if more than one override is needed.')
parser.add_argument('--info-repo', help='use a local distroinfo repo instead of fetching the default one. Only applies when pkginfo_driver is rdoinfo or downstream in projects.ini')
parser.add_argument('--build-env', action='append', help='Variables for the build environment.')
parser.add_argument('--local', action='store_true', help='Use local git repos if possible. Only commited changes in the local repo will be used in the build.')
parser.add_argument('--head-only', action='store_true', help='Build from the most recent Git commit only.')
group = parser.add_mutually_exclusive_group()
group.add_argument('--project-name', action='append', help='Build a specific project name only. Use multiple times to build more than one project in a run.')
group.add_argument('--package-name', action='append', help='Build a specific package name only. Use multiple times to build more than one package in a run.')
parser.add_argument('--dev', action='store_true', help="Don't reset packaging git repo, force build and add public master repo for dependencies (dev mode).")
parser.add_argument('--log-commands', action='store_true', help='Log the commands run by dlrn.')
parser.add_argument('--use-public', action='store_true', help='Use the public master repo for dependencies when doing install verification.')
parser.add_argument('--order', action='store_true', help='Compute the build order according to the spec files instead of the dates of the commits. Implies --sequential.')
parser.add_argument('--sequential', action='store_true', help='Run all actions sequentially, regardless of the number of workers specified in projects.ini.')
parser.add_argument('--status', action='store_true', help='Get the status of packages.')
parser.add_argument('--recheck', action='store_true', help='Force a rebuild for a particular package. Implies --package-name')
parser.add_argument('--force-recheck', action='store_true', help='Force a rebuild for a particular package, even if its last build was successful. Requires setting allow_force_rechecks=True in projects.ini. Implies --package-name and --recheck')
parser.add_argument('--version', action='version', version=version.version_info.version_string())
parser.add_argument('--run', help='Run a program instead of trying to build. Implies --head-only')
parser.add_argument('--stop', action='store_true', help='Stop on error.')
parser.add_argument('--verbose-build', action='store_true', help='Show verbose output during the package build.')
parser.add_argument('--verbose-mock', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--no-repo', action='store_true', help='Do not generate a repo with all the built packages.')
parser.add_argument('--debug', action='store_true', help='Print debug logs')
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
if options.verbose_mock:
logger.warning('The --verbose-mock command-line option is deprecated. Please use --verbose-build instead.')
options.verbose_build = options.verbose_mock
cp = configparser.RawConfigParser()
cp.read(options.config_file)
if options.log_commands is True:
logging.getLogger('sh.command').setLevel(logging.INFO)
if options.order is True:
options.sequential = True
config_options = ConfigOptions(cp, overrides=options.config_override)
if options.dev:
(_, tmpdb_path) = tempfile.mkstemp()
logger.info('Using file %s for temporary db' % tmpdb_path)
config_options.database_connection = 'sqlite:///%s' % tmpdb_path
config_options.verbose_build = options.verbose_build
session = getSession(config_options.database_connection)
pkginfo_driver = config_options.pkginfo_driver
global pkginfo
pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
packages = pkginfo.getpackages(local_info_repo=options.info_repo, tags=config_options.tags, dev_mode=options.dev)
if options.project_name:
pkg_names = [p['name'] for p in packages if p['project'] in options.project_name]
elif options.package_name:
pkg_names = options.package_name
else:
pkg_names = None
if options.status is True:
if not pkg_names:
pkg_names = [p['name'] for p in packages]
for name in pkg_names:
package = [p for p in packages if p['name'] == name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, name, 'invalid status', type=build_type)
if commit:
print('{:>9}'.format(build_type), name, commit.status)
else:
print('{:>9}'.format(build_type), name, 'NO_BUILD')
sys.exit(0)
if pkg_names:
pkg_name = pkg_names[0]
else:
pkg_name = None
def recheck_commit(commit, force):
if commit.status == 'SUCCESS':
if not force:
logger.error('Trying to recheck an already successful commit, ignoring. If you want to force it, use --force-recheck and set allow_force_rechecks=True in projects.ini')
sys.exit(1)
else:
logger.info('Forcefully rechecking a successfully built commit for %s' % commit.project_name)
elif commit.status == 'RETRY':
logger.warning('Trying to recheck a commit in RETRY state, ignoring.')
sys.exit(0)
session.delete(commit)
session.commit()
sys.exit(0)
if options.recheck is True:
if not pkg_name:
logger.error('Please use --package-name or --project-name with --recheck.')
sys.exit(1)
if options.force_recheck and config_options.allow_force_rechecks:
force_recheck = True
else:
force_recheck = False
package = [p for p in packages if p['name'] == pkg_name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, pkg_name, type=build_type)
if commit:
<DeepExtract>
if commit.status == 'SUCCESS':
if not force_recheck:
logger.error('Trying to recheck an already successful commit, ignoring. If you want to force it, use --force-recheck and set allow_force_rechecks=True in projects.ini')
sys.exit(1)
else:
logger.info('Forcefully rechecking a successfully built commit for %s' % commit.project_name)
elif commit.status == 'RETRY':
logger.warning('Trying to recheck a commit in RETRY state, ignoring.')
sys.exit(0)
session.delete(commit)
session.commit()
sys.exit(0)
</DeepExtract>
else:
logger.error('There are no existing commits for package %s', pkg_name)
sys.exit(1)
if options.run:
options.head_only = True
toprocess = []
skipped_list = []
if not pkg_name and (not pkg_names):
pool = multiprocessing.Pool()
getinfo_wrapper = partial(getinfo, local=options.local, dev_mode=options.dev, head_only=options.head_only, db_connection=config_options.database_connection, branch=config_options.source, pkginfo=pkginfo)
iterator = pool.imap(getinfo_wrapper, packages)
while True:
try:
(project_toprocess, updated_pkg, skipped) = iterator.next()
for package in packages:
if package['name'] == updated_pkg['name']:
if package['upstream'] == 'Unknown':
package['upstream'] = updated_pkg['upstream']
logger.debug('Updated upstream for package %s to %s', package['name'], package['upstream'])
break
if skipped:
skipped_list.append(updated_pkg['name'])
<DeepExtract>
for commit_toprocess in project_toprocess:
if options.dev is True or options.run or (not session.query(Commit).filter(Commit.commit_hash == commit_toprocess.commit_hash, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all() and (not session.query(Commit).filter(Commit.dt_commit == commit_toprocess.dt_commit, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all())):
toprocess.append(commit_toprocess)
</DeepExtract>
except StopIteration:
break
pool.close()
pool.join()
else:
for package in packages:
if package['name'] in pkg_names:
<DeepExtract>
project = package['name']
since = '-1'
session = getSession(config_options.database_connection)
commit = getLastProcessedCommit(session, project, type=type)
if commit:
if commit.commit_branch == getsourcebranch(package, default_branch=config_options.source):
since = '--after=%d' % commit.dt_commit
else:
commit = getLastBuiltCommit(session, project, getsourcebranch(package, default_branch=config_options.source), type=type)
if commit:
logger.info("Last commit belongs to another branch, but we're ok with that")
since = '--after=%d' % commit.dt_commit
options.head_only = True
(project_toprocess, skipped) = pkginfo.getinfo(project=project, package=package, since=since, local=options.local, dev_mode=options.dev, type=type)
closeSession(session)
if since == '-1' or options.head_only:
del project_toprocess[:-1]
(project_toprocess, _, skipped) = (project_toprocess, package, skipped)
</DeepExtract>
if skipped:
skipped_list.append(package['name'])
<DeepExtract>
for commit_toprocess in project_toprocess:
if options.dev is True or options.run or (not session.query(Commit).filter(Commit.commit_hash == commit_toprocess.commit_hash, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all() and (not session.query(Commit).filter(Commit.dt_commit == commit_toprocess.dt_commit, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all())):
toprocess.append(commit_toprocess)
</DeepExtract>
closeSession(session)
datadir = os.path.realpath(config_options.datadir)
if not os.path.exists(os.path.join(datadir, 'repos')):
os.makedirs(os.path.join(datadir, 'repos'))
with open(os.path.join(datadir, 'repos', 'skiplist.txt'), 'w') as fp:
for pkg in skipped_list:
fp.write(pkg + '\n')
if len(toprocess) == 0:
if not pkg_name:
logger.info('No commits to build.')
else:
logger.info('No commits to build. If this is not expected, please make sure the package name(s) are correct, and that any failed commit you want to rebuild has been removed from the database.')
return 0
if options.order is True:
logger.info('Reading rpm spec files')
projects = sorted([c.project_name for c in toprocess])
speclist = []
bootstraplist = []
for project_name in projects:
filename = None
for f in os.listdir(pkginfo.distgit_dir(project_name)):
if f.endswith('.spec'):
filename = f
if filename:
specpath = os.path.join(pkginfo.distgit_dir(project_name), filename)
speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1', '-P', specpath))
rawspec = open(specpath).read(-1)
if 'repo_bootstrap' in rawspec:
bootstraplist.append(project_name)
else:
logger.warning('Could not find a spec for package %s' % project_name)
logger.debug('Packages to rebuild: %s' % bootstraplist)
specs = RpmSpecCollection([RpmSpecFile(spec) for spec in speclist])
logger.info('Computing build order')
orders = specs.compute_order()
if 'python-networking_arista' in orders:
orders.insert(orders.index('python-networking_arista'), 'python-networking-arista')
def my_cmp(a, b):
if a.project_name == b.project_name:
_a = a.dt_commit
_b = b.dt_commit
else:
_a = orders.index(a.project_name) if a.project_name in orders else sys.maxsize
_b = orders.index(b.project_name) if b.project_name in orders else sys.maxsize
return (_a > _b) - (_a < _b)
toprocess.sort(key=cmp_to_key(my_cmp))
else:
toprocess.sort()
exit_code = 0
if options.sequential is True:
toprocess_copy = deepcopy(toprocess)
for commit in toprocess:
status = build_worker(packages, commit, run_cmd=options.run, build_env=options.build_env, dev_mode=options.dev, use_public=options.use_public, order=options.order, sequential=True, config_options=config_options, pkginfo=pkginfo)
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.error('Received exception %s' % exception)
failures = 1
elif not options.run:
<DeepExtract>
if status[0].type == 'rpm':
failures = post_build_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
failures = post_build_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
</DeepExtract>
consistent = failures == 0
<DeepExtract>
if status[0].type == 'rpm':
exit_value = process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
exit_value = process_build_result_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
</DeepExtract>
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
else:
pool = multiprocessing.Pool(config_options.workers)
build_worker_wrapper = partial(build_worker, packages, run_cmd=options.run, build_env=options.build_env, dev_mode=options.dev, use_public=options.use_public, order=options.order, sequential=False, config_options=config_options, pkginfo=pkginfo)
iterator = pool.imap(build_worker_wrapper, toprocess)
while True:
try:
status = iterator.next()
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.info('Received exception %s' % exception)
failures = 1
elif not options.run:
<DeepExtract>
if status[0].type == 'rpm':
failures = post_build_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
failures = post_build_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
</DeepExtract>
consistent = failures == 0
<DeepExtract>
if status[0].type == 'rpm':
exit_value = process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
exit_value = process_build_result_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
</DeepExtract>
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
except StopIteration:
break
pool.close()
pool.join()
session = getSession(config_options.database_connection)
if options.order is True and (not pkg_name):
for bpackage in bootstraplist:
commit = getLastProcessedCommit(session, bpackage)
commit.status = 'RETRY'
session.add(commit)
session.commit()
genreports(packages, options.head_only, session, [])
closeSession(session)
if options.dev:
os.remove(tmpdb_path)
return exit_code
|
def main():
if sys.version_info >= (3, 6):
if not multiprocessing.get_start_method(allow_none=True):
multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--config-file', default='projects.ini', help='Config file. Default: projects.ini')
parser.add_argument('--config-override', action='append', help='Override a configuration option from the config file. Specify it as: section.option=value. Can be used multiple times if more than one override is needed.')
parser.add_argument('--info-repo', help='use a local distroinfo repo instead of fetching the default one. Only applies when pkginfo_driver is rdoinfo or downstream in projects.ini')
parser.add_argument('--build-env', action='append', help='Variables for the build environment.')
parser.add_argument('--local', action='store_true', help='Use local git repos if possible. Only commited changes in the local repo will be used in the build.')
parser.add_argument('--head-only', action='store_true', help='Build from the most recent Git commit only.')
group = parser.add_mutually_exclusive_group()
group.add_argument('--project-name', action='append', help='Build a specific project name only. Use multiple times to build more than one project in a run.')
group.add_argument('--package-name', action='append', help='Build a specific package name only. Use multiple times to build more than one package in a run.')
parser.add_argument('--dev', action='store_true', help="Don't reset packaging git repo, force build and add public master repo for dependencies (dev mode).")
parser.add_argument('--log-commands', action='store_true', help='Log the commands run by dlrn.')
parser.add_argument('--use-public', action='store_true', help='Use the public master repo for dependencies when doing install verification.')
parser.add_argument('--order', action='store_true', help='Compute the build order according to the spec files instead of the dates of the commits. Implies --sequential.')
parser.add_argument('--sequential', action='store_true', help='Run all actions sequentially, regardless of the number of workers specified in projects.ini.')
parser.add_argument('--status', action='store_true', help='Get the status of packages.')
parser.add_argument('--recheck', action='store_true', help='Force a rebuild for a particular package. Implies --package-name')
parser.add_argument('--force-recheck', action='store_true', help='Force a rebuild for a particular package, even if its last build was successful. Requires setting allow_force_rechecks=True in projects.ini. Implies --package-name and --recheck')
parser.add_argument('--version', action='version', version=version.version_info.version_string())
parser.add_argument('--run', help='Run a program instead of trying to build. Implies --head-only')
parser.add_argument('--stop', action='store_true', help='Stop on error.')
parser.add_argument('--verbose-build', action='store_true', help='Show verbose output during the package build.')
parser.add_argument('--verbose-mock', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--no-repo', action='store_true', help='Do not generate a repo with all the built packages.')
parser.add_argument('--debug', action='store_true', help='Print debug logs')
options = parser.parse_args(sys.argv[1:])
setup_logging(options.debug)
if options.verbose_mock:
logger.warning('The --verbose-mock command-line option is deprecated. Please use --verbose-build instead.')
options.verbose_build = options.verbose_mock
cp = configparser.RawConfigParser()
cp.read(options.config_file)
if options.log_commands is True:
logging.getLogger('sh.command').setLevel(logging.INFO)
if options.order is True:
options.sequential = True
config_options = ConfigOptions(cp, overrides=options.config_override)
if options.dev:
(_, tmpdb_path) = tempfile.mkstemp()
logger.info('Using file %s for temporary db' % tmpdb_path)
config_options.database_connection = 'sqlite:///%s' % tmpdb_path
config_options.verbose_build = options.verbose_build
session = getSession(config_options.database_connection)
pkginfo_driver = config_options.pkginfo_driver
global pkginfo
pkginfo = import_object(pkginfo_driver, cfg_options=config_options)
packages = pkginfo.getpackages(local_info_repo=options.info_repo, tags=config_options.tags, dev_mode=options.dev)
if options.project_name:
pkg_names = [p['name'] for p in packages if p['project'] in options.project_name]
elif options.package_name:
pkg_names = options.package_name
else:
pkg_names = None
if options.status is True:
if not pkg_names:
pkg_names = [p['name'] for p in packages]
for name in pkg_names:
package = [p for p in packages if p['name'] == name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, name, 'invalid status', type=build_type)
if commit:
print('{:>9}'.format(build_type), name, commit.status)
else:
print('{:>9}'.format(build_type), name, 'NO_BUILD')
sys.exit(0)
if pkg_names:
pkg_name = pkg_names[0]
else:
pkg_name = None
def recheck_commit(commit, force):
if commit.status == 'SUCCESS':
if not force:
logger.error('Trying to recheck an already successful commit, ignoring. If you want to force it, use --force-recheck and set allow_force_rechecks=True in projects.ini')
sys.exit(1)
else:
logger.info('Forcefully rechecking a successfully built commit for %s' % commit.project_name)
elif commit.status == 'RETRY':
logger.warning('Trying to recheck a commit in RETRY state, ignoring.')
sys.exit(0)
session.delete(commit)
session.commit()
sys.exit(0)
if options.recheck is True:
if not pkg_name:
logger.error('Please use --package-name or --project-name with --recheck.')
sys.exit(1)
if options.force_recheck and config_options.allow_force_rechecks:
force_recheck = True
else:
force_recheck = False
package = [p for p in packages if p['name'] == pkg_name][0]
for build_type in package.get('types', ['rpm']):
commit = getLastProcessedCommit(session, pkg_name, type=build_type)
if commit:
if commit.status == 'SUCCESS':
if not force_recheck:
logger.error('Trying to recheck an already successful commit, ignoring. If you want to force it, use --force-recheck and set allow_force_rechecks=True in projects.ini')
sys.exit(1)
else:
logger.info('Forcefully rechecking a successfully built commit for %s' % commit.project_name)
elif commit.status == 'RETRY':
logger.warning('Trying to recheck a commit in RETRY state, ignoring.')
sys.exit(0)
session.delete(commit)
session.commit()
sys.exit(0)
else:
logger.error('There are no existing commits for package %s', pkg_name)
sys.exit(1)
if options.run:
options.head_only = True
toprocess = []
skipped_list = []
if not pkg_name and (not pkg_names):
pool = multiprocessing.Pool()
getinfo_wrapper = partial(getinfo, local=options.local, dev_mode=options.dev, head_only=options.head_only, db_connection=config_options.database_connection, branch=config_options.source, pkginfo=pkginfo)
iterator = pool.imap(getinfo_wrapper, packages)
while True:
try:
(project_toprocess, updated_pkg, skipped) = iterator.next()
for package in packages:
if package['name'] == updated_pkg['name']:
if package['upstream'] == 'Unknown':
package['upstream'] = updated_pkg['upstream']
logger.debug('Updated upstream for package %s to %s', package['name'], package['upstream'])
break
if skipped:
skipped_list.append(updated_pkg['name'])
for commit_toprocess in project_toprocess:
if options.dev is True or options.run or (not session.query(Commit).filter(Commit.commit_hash == commit_toprocess.commit_hash, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all() and (not session.query(Commit).filter(Commit.dt_commit == commit_toprocess.dt_commit, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all())):
toprocess.append(commit_toprocess)
except StopIteration:
break
pool.close()
pool.join()
else:
for package in packages:
if package['name'] in pkg_names:
project = package['name']
since = '-1'
session = getSession(config_options.database_connection)
commit = getLastProcessedCommit(session, project, type=type)
if commit:
if commit.commit_branch == getsourcebranch(package, default_branch=config_options.source):
since = '--after=%d' % commit.dt_commit
else:
commit = getLastBuiltCommit(session, project, getsourcebranch(package, default_branch=config_options.source), type=type)
if commit:
logger.info("Last commit belongs to another branch, but we're ok with that")
since = '--after=%d' % commit.dt_commit
options.head_only = True
(project_toprocess, skipped) = pkginfo.getinfo(project=project, package=package, since=since, local=options.local, dev_mode=options.dev, type=type)
closeSession(session)
if since == '-1' or options.head_only:
del project_toprocess[:-1]
(project_toprocess, _, skipped) = (project_toprocess, package, skipped)
if skipped:
skipped_list.append(package['name'])
for commit_toprocess in project_toprocess:
if options.dev is True or options.run or (not session.query(Commit).filter(Commit.commit_hash == commit_toprocess.commit_hash, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all() and (not session.query(Commit).filter(Commit.dt_commit == commit_toprocess.dt_commit, Commit.distro_hash == commit_toprocess.distro_hash, Commit.extended_hash == commit_toprocess.extended_hash, Commit.type == commit_toprocess.type, Commit.status != 'RETRY').all())):
toprocess.append(commit_toprocess)
closeSession(session)
datadir = os.path.realpath(config_options.datadir)
if not os.path.exists(os.path.join(datadir, 'repos')):
os.makedirs(os.path.join(datadir, 'repos'))
with open(os.path.join(datadir, 'repos', 'skiplist.txt'), 'w') as fp:
for pkg in skipped_list:
fp.write(pkg + '\n')
if len(toprocess) == 0:
if not pkg_name:
logger.info('No commits to build.')
else:
logger.info('No commits to build. If this is not expected, please make sure the package name(s) are correct, and that any failed commit you want to rebuild has been removed from the database.')
return 0
if options.order is True:
logger.info('Reading rpm spec files')
projects = sorted([c.project_name for c in toprocess])
speclist = []
bootstraplist = []
for project_name in projects:
filename = None
for f in os.listdir(pkginfo.distgit_dir(project_name)):
if f.endswith('.spec'):
filename = f
if filename:
specpath = os.path.join(pkginfo.distgit_dir(project_name), filename)
speclist.append(sh.rpmspec('-D', 'repo_bootstrap 1', '-P', specpath))
rawspec = open(specpath).read(-1)
if 'repo_bootstrap' in rawspec:
bootstraplist.append(project_name)
else:
logger.warning('Could not find a spec for package %s' % project_name)
logger.debug('Packages to rebuild: %s' % bootstraplist)
specs = RpmSpecCollection([RpmSpecFile(spec) for spec in speclist])
logger.info('Computing build order')
orders = specs.compute_order()
if 'python-networking_arista' in orders:
orders.insert(orders.index('python-networking_arista'), 'python-networking-arista')
def my_cmp(a, b):
if a.project_name == b.project_name:
_a = a.dt_commit
_b = b.dt_commit
else:
_a = orders.index(a.project_name) if a.project_name in orders else sys.maxsize
_b = orders.index(b.project_name) if b.project_name in orders else sys.maxsize
return (_a > _b) - (_a < _b)
toprocess.sort(key=cmp_to_key(my_cmp))
else:
toprocess.sort()
exit_code = 0
if options.sequential is True:
toprocess_copy = deepcopy(toprocess)
for commit in toprocess:
status = build_worker(packages, commit, run_cmd=options.run, build_env=options.build_env, dev_mode=options.dev, use_public=options.use_public, order=options.order, sequential=True, config_options=config_options, pkginfo=pkginfo)
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.error('Received exception %s' % exception)
failures = 1
elif not options.run:
if status[0].type == 'rpm':
failures = post_build_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
failures = post_build_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
consistent = failures == 0
if status[0].type == 'rpm':
exit_value = process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
exit_value = process_build_result_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
else:
pool = multiprocessing.Pool(config_options.workers)
build_worker_wrapper = partial(build_worker, packages, run_cmd=options.run, build_env=options.build_env, dev_mode=options.dev, use_public=options.use_public, order=options.order, sequential=False, config_options=config_options, pkginfo=pkginfo)
iterator = pool.imap(build_worker_wrapper, toprocess)
while True:
try:
status = iterator.next()
exception = status[3]
consistent = False
datadir = os.path.realpath(config_options.datadir)
with lock_file(os.path.join(datadir, 'remote.lck')):
session = getSession(config_options.database_connection)
if exception is not None:
logger.info('Received exception %s' % exception)
failures = 1
elif not options.run:
if status[0].type == 'rpm':
failures = post_build_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
failures = post_build_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
consistent = failures == 0
if status[0].type == 'rpm':
exit_value = process_build_result_rpm(status, *args, **kwargs)
elif status[0].type == 'container':
exit_value = process_build_result_container(status, *args, **kwargs)
else:
raise Exception('Unknown type %s' % status[0].type)
closeSession(session)
if exit_value != 0:
exit_code = exit_value
if options.stop and exit_code != 0:
return exit_code
except StopIteration:
break
pool.close()
pool.join()
session = getSession(config_options.database_connection)
if options.order is True and (not pkg_name):
for bpackage in bootstraplist:
commit = getLastProcessedCommit(session, bpackage)
commit.status = 'RETRY'
session.add(commit)
session.commit()
genreports(packages, options.head_only, session, [])
closeSession(session)
if options.dev:
os.remove(tmpdb_path)
return exit_code
|
DLRN
|
positive
|
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
<DeepExtract>
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
</DeepExtract>
<DeepExtract>
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(['--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir])
preprocess.main(preprocess_args)
</DeepExtract>
<DeepExtract>
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, ['--task', 'translation', data_dir, '--save-dir', data_dir, '--arch', 'lstm_wiseman_iwslt_de_en', '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--source-lang', 'in', '--target-lang', 'out'] + (['--encoder-layers', '2', '--decoder-layers', '2'] or []))
train.main(train_args)
</DeepExtract>
<DeepExtract>
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(generate_parser, [data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar'])
generate.main(generate_args)
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
</DeepExtract>
|
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(['--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir])
preprocess.main(preprocess_args)
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, ['--task', 'translation', data_dir, '--save-dir', data_dir, '--arch', 'lstm_wiseman_iwslt_de_en', '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--source-lang', 'in', '--target-lang', 'out'] + (['--encoder-layers', '2', '--decoder-layers', '2'] or []))
train.main(train_args)
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(generate_parser, [data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar'])
generate.main(generate_args)
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
</DeepExtract>
|
dlcl
|
positive
|
def test_get_exec_commmands(self):
assert_a = lambda a: self.assertEqual(ExecCommandList(a), [ExecCommand('a b c', None, ExecPolicy.RESTART)])
assert_b = lambda b: six.assertCountEqual(self, ExecCommandList(b), [ExecCommand(['a', 'b', 'c'], None, ExecPolicy.RESTART), ExecCommand('a b c', 'user', ExecPolicy.RESTART), ExecCommand(['a', 'b', 'c'], 'root', ExecPolicy.INITIAL)])
<DeepExtract>
self.assertEqual(InputConfigIdList('a b c', map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids('a b c', maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList([ExecCommand('a b c', None, ExecPolicy.RESTART)], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids([ExecCommand('a b c', None, ExecPolicy.RESTART)], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList(['a b c'], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(['a b c'], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
self.assertEqual(InputConfigIdList(({'cmd': 'a b c'},), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(({'cmd': 'a b c'},), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
</DeepExtract>
<DeepExtract>
six.assertCountEqual(self, InputConfigIdList([(['a', 'b', 'c'],), ('a b c', 'user'), [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids([(['a', 'b', 'c'],), ('a b c', 'user'), [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
<DeepExtract>
six.assertCountEqual(self, InputConfigIdList([(['a', 'b', 'c'], None), {'cmd': 'a b c', 'user': 'user', 'policy': ExecPolicy.RESTART}, [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids([(['a', 'b', 'c'], None), {'cmd': 'a b c', 'user': 'user', 'policy': ExecPolicy.RESTART}, [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
|
def test_get_exec_commmands(self):
assert_a = lambda a: self.assertEqual(ExecCommandList(a), [ExecCommand('a b c', None, ExecPolicy.RESTART)])
assert_b = lambda b: six.assertCountEqual(self, ExecCommandList(b), [ExecCommand(['a', 'b', 'c'], None, ExecPolicy.RESTART), ExecCommand('a b c', 'user', ExecPolicy.RESTART), ExecCommand(['a', 'b', 'c'], 'root', ExecPolicy.INITIAL)])
self.assertEqual(InputConfigIdList('a b c', map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids('a b c', maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList([ExecCommand('a b c', None, ExecPolicy.RESTART)], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids([ExecCommand('a b c', None, ExecPolicy.RESTART)], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList(['a b c'], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(['a b c'], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
self.assertEqual(InputConfigIdList(({'cmd': 'a b c'},), map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c')])
self.assertEqual(get_map_config_ids(({'cmd': 'a b c'},), maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i')])
six.assertCountEqual(self, InputConfigIdList([(['a', 'b', 'c'],), ('a b c', 'user'), [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids([(['a', 'b', 'c'],), ('a b c', 'user'), [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
six.assertCountEqual(self, InputConfigIdList([(['a', 'b', 'c'], None), {'cmd': 'a b c', 'user': 'user', 'policy': ExecPolicy.RESTART}, [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], map_name=m, instances=i), [InputConfigId(ItemType.CONTAINER, 'm', 'c', ('i',)), InputConfigId(ItemType.CONTAINER, 'm', 'd', ('i',)), InputConfigId(ItemType.CONTAINER, 'n', 'e', ('i', 'j'))])
six.assertCountEqual(self, get_map_config_ids([(['a', 'b', 'c'], None), {'cmd': 'a b c', 'user': 'user', 'policy': ExecPolicy.RESTART}, [['a', 'b', 'c'], 'root', ExecPolicy.INITIAL]], maps, default_map_name=m, default_instances=i), [MapConfigId(ItemType.CONTAINER, 'm', 'c', 'i'), MapConfigId(ItemType.CONTAINER, 'm', 'd', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'i'), MapConfigId(ItemType.CONTAINER, 'n', 'e', 'j')])
</DeepExtract>
|
docker-map
|
positive
|
def __init__(self, num_classes, builder: ConvBuilder, deps):
super(VCNet, self).__init__()
if deps is None:
deps = VGG_ORIGIN_DEPS
<DeepExtract>
sq = builder.Sequential()
sq.add_module('conv1', builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=3, stride=1, padding=1))
sq.add_module('conv2', builder.Conv2dBNReLU(in_channels=deps[0], out_channels=deps[1], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool1', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv3', builder.Conv2dBNReLU(in_channels=deps[1], out_channels=deps[2], kernel_size=3, stride=1, padding=1))
sq.add_module('conv4', builder.Conv2dBNReLU(in_channels=deps[2], out_channels=deps[3], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool2', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv5', builder.Conv2dBNReLU(in_channels=deps[3], out_channels=deps[4], kernel_size=3, stride=1, padding=1))
sq.add_module('conv6', builder.Conv2dBNReLU(in_channels=deps[4], out_channels=deps[5], kernel_size=3, stride=1, padding=1))
sq.add_module('conv7', builder.Conv2dBNReLU(in_channels=deps[5], out_channels=deps[6], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool3', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv8', builder.Conv2dBNReLU(in_channels=deps[6], out_channels=deps[7], kernel_size=3, stride=1, padding=1))
sq.add_module('conv9', builder.Conv2dBNReLU(in_channels=deps[7], out_channels=deps[8], kernel_size=3, stride=1, padding=1))
sq.add_module('conv10', builder.Conv2dBNReLU(in_channels=deps[8], out_channels=deps[9], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool4', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv11', builder.Conv2dBNReLU(in_channels=deps[9], out_channels=deps[10], kernel_size=3, stride=1, padding=1))
sq.add_module('conv12', builder.Conv2dBNReLU(in_channels=deps[10], out_channels=deps[11], kernel_size=3, stride=1, padding=1))
sq.add_module('conv13', builder.Conv2dBNReLU(in_channels=deps[11], out_channels=deps[12], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool5', builder.Maxpool2d(kernel_size=2))
self.stem = sq
</DeepExtract>
self.flatten = builder.Flatten()
self.linear1 = builder.IntermediateLinear(in_features=deps[12], out_features=512)
self.relu = builder.ReLU()
self.linear2 = builder.Linear(in_features=512, out_features=num_classes)
|
def __init__(self, num_classes, builder: ConvBuilder, deps):
super(VCNet, self).__init__()
if deps is None:
deps = VGG_ORIGIN_DEPS
sq = builder.Sequential()
sq.add_module('conv1', builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=3, stride=1, padding=1))
sq.add_module('conv2', builder.Conv2dBNReLU(in_channels=deps[0], out_channels=deps[1], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool1', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv3', builder.Conv2dBNReLU(in_channels=deps[1], out_channels=deps[2], kernel_size=3, stride=1, padding=1))
sq.add_module('conv4', builder.Conv2dBNReLU(in_channels=deps[2], out_channels=deps[3], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool2', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv5', builder.Conv2dBNReLU(in_channels=deps[3], out_channels=deps[4], kernel_size=3, stride=1, padding=1))
sq.add_module('conv6', builder.Conv2dBNReLU(in_channels=deps[4], out_channels=deps[5], kernel_size=3, stride=1, padding=1))
sq.add_module('conv7', builder.Conv2dBNReLU(in_channels=deps[5], out_channels=deps[6], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool3', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv8', builder.Conv2dBNReLU(in_channels=deps[6], out_channels=deps[7], kernel_size=3, stride=1, padding=1))
sq.add_module('conv9', builder.Conv2dBNReLU(in_channels=deps[7], out_channels=deps[8], kernel_size=3, stride=1, padding=1))
sq.add_module('conv10', builder.Conv2dBNReLU(in_channels=deps[8], out_channels=deps[9], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool4', builder.Maxpool2d(kernel_size=2))
sq.add_module('conv11', builder.Conv2dBNReLU(in_channels=deps[9], out_channels=deps[10], kernel_size=3, stride=1, padding=1))
sq.add_module('conv12', builder.Conv2dBNReLU(in_channels=deps[10], out_channels=deps[11], kernel_size=3, stride=1, padding=1))
sq.add_module('conv13', builder.Conv2dBNReLU(in_channels=deps[11], out_channels=deps[12], kernel_size=3, stride=1, padding=1))
sq.add_module('maxpool5', builder.Maxpool2d(kernel_size=2))
self.stem = sq
self.flatten = builder.Flatten()
self.linear1 = builder.IntermediateLinear(in_features=deps[12], out_features=512)
self.relu = builder.ReLU()
self.linear2 = builder.Linear(in_features=512, out_features=num_classes)
|
ACNet
|
positive
|
def send_notification_email(notification=None, emails=None):
sent = False
rule_type = notification.alert.get('rule_type', 'system')
if len(emails) > 0:
compile_functions = {'system': compile_system_email, 'global': compile_system_email, 'process': compile_process_email, 'process_global': compile_process_email, 'uptime': compile_uptime_email, 'plugin': compile_plugin_email, 'plugin_global': compile_plugin_email, 'notsendingdata': compile_notsendingdata_email, 'health_check': compile_health_check_email}
message = None
if rule_type in compile_functions.keys():
try:
message = compile_functions[rule_type](notification=notification)
except Exception as e:
logger.exception('Can not generate {0} email notification'.format(rule_type))
if message:
<DeepExtract>
for to in emails:
msg = EmailMultiAlternatives(message['subject'], '', settings.DEFAULT_FROM_EMAIL, [to])
msg.attach_alternative(message['html_content'], 'text/html')
msg.send()
</DeepExtract>
sent = True
return sent
|
def send_notification_email(notification=None, emails=None):
sent = False
rule_type = notification.alert.get('rule_type', 'system')
if len(emails) > 0:
compile_functions = {'system': compile_system_email, 'global': compile_system_email, 'process': compile_process_email, 'process_global': compile_process_email, 'uptime': compile_uptime_email, 'plugin': compile_plugin_email, 'plugin_global': compile_plugin_email, 'notsendingdata': compile_notsendingdata_email, 'health_check': compile_health_check_email}
message = None
if rule_type in compile_functions.keys():
try:
message = compile_functions[rule_type](notification=notification)
except Exception as e:
logger.exception('Can not generate {0} email notification'.format(rule_type))
if message:
for to in emails:
msg = EmailMultiAlternatives(message['subject'], '', settings.DEFAULT_FROM_EMAIL, [to])
msg.attach_alternative(message['html_content'], 'text/html')
msg.send()
sent = True
return sent
|
amon
|
positive
|
def initialize(self, ply_path, dem_path, offset=True):
self.dem = gdal.Open(dem_path)
transform = self.dem.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
band = self.dem.GetRasterBand(1)
data = band.ReadAsArray()
r1 = [[0, i] for i in range(data.shape[1])]
r2 = [[data.shape[0] - 1, i] for i in range(data.shape[1])]
r3 = [[i, 0] for i in range(data.shape[0])]
r4 = [[i, data.shape[1] - 1] for i in range(data.shape[0])]
r = np.r_[r1, r2, r3, r4]
dem_parameter = [xOrigin, yOrigin, pixelWidth, pixelHeight, data, r]
self.ply_path = ply_path
self.obj_path = ply_path + '_obj'
self.surface_path = ply_path + '_surface'
self.offset_flag = offset
if not os.path.exists(self.obj_path):
os.makedirs(self.obj_path)
if not os.path.exists(self.surface_path):
os.makedirs(self.surface_path)
file_name = os.listdir(self.ply_path)
log_file = open(self.obj_path + '_model_log.txt', 'w')
log_file.write(time.asctime() + '\n######\n')
start_time = time.time()
self.building_name = [fp.replace('.ply', '') for fp in file_name]
for fp in file_name:
<DeepExtract>
try:
plydata = PlyData.read(os.path.join(self.ply_path, fp))
if plydata['vertex'].count == 0:
return
cor = np.vstack((plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z'])).transpose()
if self.x_offset is None:
self.x_offset = min(cor[:, 0])
self.y_offset = min(cor[:, 1])
self.z_offset = min(cor[:, 2])
else:
self.x_offset = min(self.x_offset, min(cor[:, 0]))
self.y_offset = min(self.y_offset, min(cor[:, 1]))
self.z_offset = min(self.z_offset, min(cor[:, 2]))
except:
(cor, f) = ply_parser(os.path.join(self.ply_path, fp))
for i in range(0, len(f)):
for j in range(0, len(f[i])):
f[i][j] = int(f[i][j])
del f[i][0]
for face_index in f:
face_cor = cor[face_index]
if self.x_offset is None:
self.x_offset = min(face_cor[:, 0])
self.y_offset = min(face_cor[:, 1])
self.z_offset = min(face_cor[:, 2])
else:
self.x_offset = min(self.x_offset, min(face_cor[:, 0]))
self.y_offset = min(self.y_offset, min(face_cor[:, 1]))
self.z_offset = min(self.z_offset, min(face_cor[:, 2]))
</DeepExtract>
for fp in file_name:
process = ''.join(['Now loading the PLY: ' + fp + '\n'])
sys.stdout.write(process)
if os.path.splitext(fp)[-1] == '.ply':
if 'curve' in fp:
self.buildings.append(self.load_from_curved_ply(os.path.join(self.ply_path, fp)))
else:
self.buildings.append(self.load_from_ply(os.path.join(self.ply_path, fp)))
self.building_num += 1
sys.stdout.write('Loading PLY finished! \n')
for i in range(0, self.building_num):
process = ''.join(['Now processing intersected surfaces: ' + str(i) + '\r'])
sys.stdout.write(process)
self.buildings[i].split_surface()
sys.stdout.flush()
sys.stdout.write('Processing intersected surfaces finished!\n')
for i in range(0, self.building_num):
process = ''.join(['Now generating bottom surfaces: ' + str(i) + '\r'])
sys.stdout.write(process)
self.buildings[i].get_bottomsurface(dem_parameter)
self.buildings[i].get_flatsurface()
sys.stdout.flush()
sys.stdout.write('Generating bottom surfaces finished!\n')
generate_model_time = time.time()
log_file.write('Generate model time:')
log_file.write(str(generate_model_time - start_time) + 's' + '\n######\n')
log_file.close()
|
def initialize(self, ply_path, dem_path, offset=True):
self.dem = gdal.Open(dem_path)
transform = self.dem.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
band = self.dem.GetRasterBand(1)
data = band.ReadAsArray()
r1 = [[0, i] for i in range(data.shape[1])]
r2 = [[data.shape[0] - 1, i] for i in range(data.shape[1])]
r3 = [[i, 0] for i in range(data.shape[0])]
r4 = [[i, data.shape[1] - 1] for i in range(data.shape[0])]
r = np.r_[r1, r2, r3, r4]
dem_parameter = [xOrigin, yOrigin, pixelWidth, pixelHeight, data, r]
self.ply_path = ply_path
self.obj_path = ply_path + '_obj'
self.surface_path = ply_path + '_surface'
self.offset_flag = offset
if not os.path.exists(self.obj_path):
os.makedirs(self.obj_path)
if not os.path.exists(self.surface_path):
os.makedirs(self.surface_path)
file_name = os.listdir(self.ply_path)
log_file = open(self.obj_path + '_model_log.txt', 'w')
log_file.write(time.asctime() + '\n######\n')
start_time = time.time()
self.building_name = [fp.replace('.ply', '') for fp in file_name]
for fp in file_name:
try:
plydata = PlyData.read(os.path.join(self.ply_path, fp))
if plydata['vertex'].count == 0:
return
cor = np.vstack((plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z'])).transpose()
if self.x_offset is None:
self.x_offset = min(cor[:, 0])
self.y_offset = min(cor[:, 1])
self.z_offset = min(cor[:, 2])
else:
self.x_offset = min(self.x_offset, min(cor[:, 0]))
self.y_offset = min(self.y_offset, min(cor[:, 1]))
self.z_offset = min(self.z_offset, min(cor[:, 2]))
except:
(cor, f) = ply_parser(os.path.join(self.ply_path, fp))
for i in range(0, len(f)):
for j in range(0, len(f[i])):
f[i][j] = int(f[i][j])
del f[i][0]
for face_index in f:
face_cor = cor[face_index]
if self.x_offset is None:
self.x_offset = min(face_cor[:, 0])
self.y_offset = min(face_cor[:, 1])
self.z_offset = min(face_cor[:, 2])
else:
self.x_offset = min(self.x_offset, min(face_cor[:, 0]))
self.y_offset = min(self.y_offset, min(face_cor[:, 1]))
self.z_offset = min(self.z_offset, min(face_cor[:, 2]))
for fp in file_name:
process = ''.join(['Now loading the PLY: ' + fp + '\n'])
sys.stdout.write(process)
if os.path.splitext(fp)[-1] == '.ply':
if 'curve' in fp:
self.buildings.append(self.load_from_curved_ply(os.path.join(self.ply_path, fp)))
else:
self.buildings.append(self.load_from_ply(os.path.join(self.ply_path, fp)))
self.building_num += 1
sys.stdout.write('Loading PLY finished! \n')
for i in range(0, self.building_num):
process = ''.join(['Now processing intersected surfaces: ' + str(i) + '\r'])
sys.stdout.write(process)
self.buildings[i].split_surface()
sys.stdout.flush()
sys.stdout.write('Processing intersected surfaces finished!\n')
for i in range(0, self.building_num):
process = ''.join(['Now generating bottom surfaces: ' + str(i) + '\r'])
sys.stdout.write(process)
self.buildings[i].get_bottomsurface(dem_parameter)
self.buildings[i].get_flatsurface()
sys.stdout.flush()
sys.stdout.write('Generating bottom surfaces finished!\n')
generate_model_time = time.time()
log_file.write('Generate model time:')
log_file.write(str(generate_model_time - start_time) + 's' + '\n######\n')
log_file.close()
|
Danesfield
|
positive
|
def is_hashed_base58_valid(base58):
"""Return True if and only if base58 is valid hashed_base58."""
try:
<DeepExtract>
data = a2b_base58(base58)
(data, the_hash) = (data[:-4], data[-4:])
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError('hashed base58 has bad checksum %s' % base58)
</DeepExtract>
except EncodingError:
return False
return True
|
def is_hashed_base58_valid(base58):
"""Return True if and only if base58 is valid hashed_base58."""
try:
data = a2b_base58(base58)
(data, the_hash) = (data[:-4], data[-4:])
if double_sha256(data)[:4] == the_hash:
return data
raise EncodingError('hashed base58 has bad checksum %s' % base58)
except EncodingError:
return False
return True
|
dashman
|
positive
|
def on_release_on_area(self, event, surface, event_x, event_y):
<DeepExtract>
cairo_context = self.get_context()
if self._path is None:
cairo_context.move_to(self.x_press, self.y_press)
elif self._didnt_really_move(cairo_context, event_x, event_y):
length = -1
for pts in self._path:
if pts[1] == ():
continue
length += 1
for (index, pts) in enumerate(self._path):
if pts[1] == ():
continue
if pts[0] == cairo.PathDataType.MOVE_TO:
cairo_context.move_to(pts[1][0], pts[1][1])
elif index == length:
event_x = (pts[1][0] + event_x) / 2
event_y = (pts[1][1] + event_y) / 2
break
else:
cairo_context.line_to(pts[1][0], pts[1][1])
cairo_context.line_to(event_x, event_y)
self._path = cairo_context.copy_path()
</DeepExtract>
<DeepExtract>
operation = {'tool_id': self.id, 'rgba': self.main_color, 'width': self.tool_width, 'path': self._path, 'bg-type': self._bg_type, 'halpha': self._force_alpha}
operation = operation
</DeepExtract>
self.apply_operation(operation)
|
def on_release_on_area(self, event, surface, event_x, event_y):
cairo_context = self.get_context()
if self._path is None:
cairo_context.move_to(self.x_press, self.y_press)
elif self._didnt_really_move(cairo_context, event_x, event_y):
length = -1
for pts in self._path:
if pts[1] == ():
continue
length += 1
for (index, pts) in enumerate(self._path):
if pts[1] == ():
continue
if pts[0] == cairo.PathDataType.MOVE_TO:
cairo_context.move_to(pts[1][0], pts[1][1])
elif index == length:
event_x = (pts[1][0] + event_x) / 2
event_y = (pts[1][1] + event_y) / 2
break
else:
cairo_context.line_to(pts[1][0], pts[1][1])
cairo_context.line_to(event_x, event_y)
self._path = cairo_context.copy_path()
operation = {'tool_id': self.id, 'rgba': self.main_color, 'width': self.tool_width, 'path': self._path, 'bg-type': self._bg_type, 'halpha': self._force_alpha}
operation = operation
self.apply_operation(operation)
|
drawing
|
positive
|
def create_proto_files():
print('###CREATING PROTO FILES###')
<DeepExtract>
proto_directories = [x[0] for x in os.walk(get_dir()) if 'api' in x[0] and '__pycache__' not in x[0]]
file_result = []
path_lists = []
for path in proto_directories:
if exclude_dir is not None and exclude_dir in path:
continue
path_list = split_to_list(path)
deepness = get_deepness('api', path_list)
left_over_paths = path_list[-deepness:]
path_lists.append((path, deepness, left_over_paths))
for path_item in path_lists:
path = path_item[0]
only_files = [(os.path.join(path, f), f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and '.proto' in f and ('__init__' not in f)]
for file in only_files:
file_result.append((path_item[1], file[0]))
file_list = file_result
</DeepExtract>
for file in file_list:
path = file[0]
file = file[1]
print('creating proto file', file, end='\t')
result = call([get_proto(), '--python_out=' + proto_dir, '--proto_path=' + current_dir, file])
if result != 0:
raise ValueError(result)
print(result)
|
def create_proto_files():
print('###CREATING PROTO FILES###')
proto_directories = [x[0] for x in os.walk(get_dir()) if 'api' in x[0] and '__pycache__' not in x[0]]
file_result = []
path_lists = []
for path in proto_directories:
if exclude_dir is not None and exclude_dir in path:
continue
path_list = split_to_list(path)
deepness = get_deepness('api', path_list)
left_over_paths = path_list[-deepness:]
path_lists.append((path, deepness, left_over_paths))
for path_item in path_lists:
path = path_item[0]
only_files = [(os.path.join(path, f), f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and '.proto' in f and ('__init__' not in f)]
for file in only_files:
file_result.append((path_item[1], file[0]))
file_list = file_result
for file in file_list:
path = file[0]
file = file[1]
print('creating proto file', file, end='\t')
result = call([get_proto(), '--python_out=' + proto_dir, '--proto_path=' + current_dir, file])
if result != 0:
raise ValueError(result)
print(result)
|
carball
|
positive
|
def test_rql_operators(self):
(limit, offset) = (1, 2)
request = Request(factory.get('/?search=0&limit=eq={0},eq(offset,{1})'.format(limit, offset)))
<DeepExtract>
queryset = list(self.pagination.paginate_queryset(self.queryset, request))
</DeepExtract>
assert queryset == [3]
|
def test_rql_operators(self):
(limit, offset) = (1, 2)
request = Request(factory.get('/?search=0&limit=eq={0},eq(offset,{1})'.format(limit, offset)))
queryset = list(self.pagination.paginate_queryset(self.queryset, request))
assert queryset == [3]
|
django-rql
|
positive
|
def _remove(self):
<DeepExtract>
self.root._unregister(self._function, self.id)
</DeepExtract>
return super(Button, self)._remove()
|
def _remove(self):
self.root._unregister(self._function, self.id)
return super(Button, self)._remove()
|
awe
|
positive
|
def _test_many_to_may_field_simple(self, order_class, product_class, items_field_attribute):
items_field = order_class._meta.get_field(items_field_attribute)
self.assertTrue(items_field.is_relation)
self.assertTrue(items_field.many_to_many)
<DeepExtract>
if isinstance(items_field.related_model, str):
self.assertEqual(items_field.related_model, product_class.__name__)
else:
self.assertIs(items_field.related_model, product_class)
</DeepExtract>
m2m_field_name = items_field.m2m_field_name()
m2m_field = items_field.remote_field.through._meta.get_field(m2m_field_name)
m2m_reverse_field_name = items_field.m2m_reverse_field_name()
m2m_reverse_field = items_field.remote_field.through._meta.get_field(m2m_reverse_field_name)
<DeepExtract>
if isinstance(m2m_field.related_model, str):
self.assertEqual(m2m_field.related_model, order_class.__name__)
else:
self.assertIs(m2m_field.related_model, order_class)
</DeepExtract>
<DeepExtract>
if isinstance(m2m_reverse_field.related_model, str):
self.assertEqual(m2m_reverse_field.related_model, product_class.__name__)
else:
self.assertIs(m2m_reverse_field.related_model, product_class)
</DeepExtract>
|
def _test_many_to_may_field_simple(self, order_class, product_class, items_field_attribute):
items_field = order_class._meta.get_field(items_field_attribute)
self.assertTrue(items_field.is_relation)
self.assertTrue(items_field.many_to_many)
if isinstance(items_field.related_model, str):
self.assertEqual(items_field.related_model, product_class.__name__)
else:
self.assertIs(items_field.related_model, product_class)
m2m_field_name = items_field.m2m_field_name()
m2m_field = items_field.remote_field.through._meta.get_field(m2m_field_name)
m2m_reverse_field_name = items_field.m2m_reverse_field_name()
m2m_reverse_field = items_field.remote_field.through._meta.get_field(m2m_reverse_field_name)
if isinstance(m2m_field.related_model, str):
self.assertEqual(m2m_field.related_model, order_class.__name__)
else:
self.assertIs(m2m_field.related_model, order_class)
if isinstance(m2m_reverse_field.related_model, str):
self.assertEqual(m2m_reverse_field.related_model, product_class.__name__)
else:
self.assertIs(m2m_reverse_field.related_model, product_class)
</DeepExtract>
|
django-shop
|
positive
|
def checkin(self, info, run=True):
"""
Process attached device
"""
self.cache.listed = self.cache.listed + [info]
<DeepExtract>
for d in self.devices:
if info['ECID'] == d.ecid:
device = d
raise CacheError('{0!s}: not in cache'.format(info['ECID']))
</DeepExtract>
device.verified = False
if self.ignored(device):
self.task.remove([device.ecid])
self.log.info('ignored: %s', device)
return
with self.lock.acquire(timeout=-1):
if not self.isManaged(device):
try:
choice = prompt.automation(device)
except prompt.Cancelled:
self.log.info('user cancelled automation: %s', device)
return
if choice == 'Ignore':
<DeepExtract>
self.log.info('ignoring device: %s', device)
if not self.ignored(device):
self.config.add('IgnoredDevices', [device.ecid])
else:
self.log.debug('device already ignored: %s', device)
</DeepExtract>
return
elif choice == 'Erase':
<DeepExtract>
self.log.debug('%s: managing device', device)
device.managed = True
</DeepExtract>
self.task.erase([device.ecid])
else:
err = 'unsupported choice: {0!r}'.format(choice)
self.log.error(err)
raise Error(err)
else:
self.log.debug('%s: is managed', device)
if self.stopped:
<DeepExtract>
_reason = 'StopReason'
if self.stopped and (not self.config.get(_reason)):
self.log.debug('stopped for no reason...')
self.stopped = False
return
self.log.debug('instructed to wait for: {0}'.format('restart'))
if 'restart' != self.config.get(_reason):
raise Stopped('not stopped for: {0}'.format('restart'))
self.task.get('restart', only=[device.ecid])
device.restarting = False
self.log.debug('waiting for: {0}'.format('restart'))
lockfile = '/tmp/ipad-{0}.lock'.format('restart')
lock = config.FileLock(lockfile)
with lock.acquire(timeout=-1):
waiting = self.task.list('restart')
if not waiting:
if self.stopped and self.config.get(_reason):
self.stopped = False
self.config.delete(_reason)
return
stoptime = dt.datetime.now() + dt.timedelta(seconds=wait)
while waiting:
time.sleep(5)
msg = 'waiting on {0}: {1}'.format('restart', waiting)
self.log.debug(msg)
waiting = self.task.list('restart')
if dt.datetime.now() > stoptime:
self.log.debug('gave up waiting')
break
self.stopped = False
self.config.delete(_reason)
</DeepExtract>
if device.restarting:
device.restarting = False
if self.need_to_erase(device):
self.log.debug('%s: will be erased', device)
self.task.erase([device.ecid])
self.task.query('installedApps', [device.ecid])
else:
self.log.debug('%s: will not be erased', device)
device.checkin = dt.datetime.now()
if run:
<DeepExtract>
with self.lock.acquire(timeout=-1):
self.log.info('running automation')
if self.stopped:
self.log.info('automation stopped')
return
self.verified = False
if self.task.alldone() and self.verified:
self.log.info('all tasks have been completed')
return
else:
for (k, v) in self.task.record.items():
if v:
self.log.info('tasked: %s: %s', k, v)
time.sleep(5)
self.run_queries()
devices = self.available()
self.log.debug('available: %s', devices)
try:
self.erase(devices)
try:
self.supervise(devices)
except SkipSupervision:
self.log.info('supervision skipped')
self.installapps(devices)
except Stopped as e:
self.log.info(e)
return
except:
self.log.exception('unexpected error occurred')
raise
self.finalize()
self.log.info('finished')
</DeepExtract>
|
def checkin(self, info, run=True):
"""
Process attached device
"""
self.cache.listed = self.cache.listed + [info]
for d in self.devices:
if info['ECID'] == d.ecid:
device = d
raise CacheError('{0!s}: not in cache'.format(info['ECID']))
device.verified = False
if self.ignored(device):
self.task.remove([device.ecid])
self.log.info('ignored: %s', device)
return
with self.lock.acquire(timeout=-1):
if not self.isManaged(device):
try:
choice = prompt.automation(device)
except prompt.Cancelled:
self.log.info('user cancelled automation: %s', device)
return
if choice == 'Ignore':
self.log.info('ignoring device: %s', device)
if not self.ignored(device):
self.config.add('IgnoredDevices', [device.ecid])
else:
self.log.debug('device already ignored: %s', device)
return
elif choice == 'Erase':
self.log.debug('%s: managing device', device)
device.managed = True
self.task.erase([device.ecid])
else:
err = 'unsupported choice: {0!r}'.format(choice)
self.log.error(err)
raise Error(err)
else:
self.log.debug('%s: is managed', device)
if self.stopped:
_reason = 'StopReason'
if self.stopped and (not self.config.get(_reason)):
self.log.debug('stopped for no reason...')
self.stopped = False
return
self.log.debug('instructed to wait for: {0}'.format('restart'))
if 'restart' != self.config.get(_reason):
raise Stopped('not stopped for: {0}'.format('restart'))
self.task.get('restart', only=[device.ecid])
device.restarting = False
self.log.debug('waiting for: {0}'.format('restart'))
lockfile = '/tmp/ipad-{0}.lock'.format('restart')
lock = config.FileLock(lockfile)
with lock.acquire(timeout=-1):
waiting = self.task.list('restart')
if not waiting:
if self.stopped and self.config.get(_reason):
self.stopped = False
self.config.delete(_reason)
return
stoptime = dt.datetime.now() + dt.timedelta(seconds=wait)
while waiting:
time.sleep(5)
msg = 'waiting on {0}: {1}'.format('restart', waiting)
self.log.debug(msg)
waiting = self.task.list('restart')
if dt.datetime.now() > stoptime:
self.log.debug('gave up waiting')
break
self.stopped = False
self.config.delete(_reason)
if device.restarting:
device.restarting = False
if self.need_to_erase(device):
self.log.debug('%s: will be erased', device)
self.task.erase([device.ecid])
self.task.query('installedApps', [device.ecid])
else:
self.log.debug('%s: will not be erased', device)
device.checkin = dt.datetime.now()
if run:
with self.lock.acquire(timeout=-1):
self.log.info('running automation')
if self.stopped:
self.log.info('automation stopped')
return
self.verified = False
if self.task.alldone() and self.verified:
self.log.info('all tasks have been completed')
return
else:
for (k, v) in self.task.record.items():
if v:
self.log.info('tasked: %s: %s', k, v)
time.sleep(5)
self.run_queries()
devices = self.available()
self.log.debug('available: %s', devices)
try:
self.erase(devices)
try:
self.supervise(devices)
except SkipSupervision:
self.log.info('supervision skipped')
self.installapps(devices)
except Stopped as e:
self.log.info(e)
return
except:
self.log.exception('unexpected error occurred')
raise
self.finalize()
self.log.info('finished')
</DeepExtract>
|
aeios
|
positive
|
@registry.ROI_KEYPOINT_FEATURE_EXTRACTORS.register('FBNet.roi_head_keypoints')
def add_roi_head_keypoints(cfg, in_channels):
<DeepExtract>
bn_type = cfg.MODEL.FBNET.BN_TYPE
if bn_type == 'gn':
bn_type = (bn_type, cfg.GROUP_NORM.NUM_GROUPS)
factor = cfg.MODEL.FBNET.SCALE_FACTOR
arch = cfg.MODEL.FBNET.ARCH
arch_def = cfg.MODEL.FBNET.ARCH_DEF
if len(arch_def) > 0:
arch_def = json.loads(arch_def)
if arch in modeldef.MODEL_ARCH:
if len(arch_def) > 0:
assert arch_def == modeldef.MODEL_ARCH[arch], 'Two architectures with the same name {},\n{},\n{}'.format(arch, arch_def, modeldef.MODEL_ARCH[arch])
arch_def = modeldef.MODEL_ARCH[arch]
else:
assert arch_def is not None and len(arch_def) > 0
arch_def = mbuilder.unify_arch_def(arch_def)
rpn_stride = arch_def.get('rpn_stride', None)
if rpn_stride is not None:
assert cfg.MODEL.RPN.ANCHOR_STRIDE[0] == rpn_stride, 'Needs to set cfg.MODEL.RPN.ANCHOR_STRIDE to {}, got {}'.format(rpn_stride, cfg.MODEL.RPN.ANCHOR_STRIDE)
width_divisor = cfg.MODEL.FBNET.WIDTH_DIVISOR
dw_skip_bn = cfg.MODEL.FBNET.DW_CONV_SKIP_BN
dw_skip_relu = cfg.MODEL.FBNET.DW_CONV_SKIP_RELU
logger.info('Building fbnet model with arch {} (without scaling):\n{}'.format(arch, arch_def))
builder = mbuilder.FBNetBuilder(width_ratio=factor, bn_type=bn_type, width_divisor=width_divisor, dw_skip_bn=dw_skip_bn, dw_skip_relu=dw_skip_relu)
(builder, model_arch) = (builder, arch_def)
</DeepExtract>
builder.last_depth = in_channels
return FBNetROIHead(cfg, in_channels, builder, model_arch, head_name='kpts', use_blocks=cfg.MODEL.FBNET.KPTS_HEAD_BLOCKS, stride_init=cfg.MODEL.FBNET.KPTS_HEAD_STRIDE, last_layer_scale=cfg.MODEL.FBNET.KPTS_HEAD_LAST_SCALE)
|
@registry.ROI_KEYPOINT_FEATURE_EXTRACTORS.register('FBNet.roi_head_keypoints')
def add_roi_head_keypoints(cfg, in_channels):
bn_type = cfg.MODEL.FBNET.BN_TYPE
if bn_type == 'gn':
bn_type = (bn_type, cfg.GROUP_NORM.NUM_GROUPS)
factor = cfg.MODEL.FBNET.SCALE_FACTOR
arch = cfg.MODEL.FBNET.ARCH
arch_def = cfg.MODEL.FBNET.ARCH_DEF
if len(arch_def) > 0:
arch_def = json.loads(arch_def)
if arch in modeldef.MODEL_ARCH:
if len(arch_def) > 0:
assert arch_def == modeldef.MODEL_ARCH[arch], 'Two architectures with the same name {},\n{},\n{}'.format(arch, arch_def, modeldef.MODEL_ARCH[arch])
arch_def = modeldef.MODEL_ARCH[arch]
else:
assert arch_def is not None and len(arch_def) > 0
arch_def = mbuilder.unify_arch_def(arch_def)
rpn_stride = arch_def.get('rpn_stride', None)
if rpn_stride is not None:
assert cfg.MODEL.RPN.ANCHOR_STRIDE[0] == rpn_stride, 'Needs to set cfg.MODEL.RPN.ANCHOR_STRIDE to {}, got {}'.format(rpn_stride, cfg.MODEL.RPN.ANCHOR_STRIDE)
width_divisor = cfg.MODEL.FBNET.WIDTH_DIVISOR
dw_skip_bn = cfg.MODEL.FBNET.DW_CONV_SKIP_BN
dw_skip_relu = cfg.MODEL.FBNET.DW_CONV_SKIP_RELU
logger.info('Building fbnet model with arch {} (without scaling):\n{}'.format(arch, arch_def))
builder = mbuilder.FBNetBuilder(width_ratio=factor, bn_type=bn_type, width_divisor=width_divisor, dw_skip_bn=dw_skip_bn, dw_skip_relu=dw_skip_relu)
(builder, model_arch) = (builder, arch_def)
builder.last_depth = in_channels
return FBNetROIHead(cfg, in_channels, builder, model_arch, head_name='kpts', use_blocks=cfg.MODEL.FBNET.KPTS_HEAD_BLOCKS, stride_init=cfg.MODEL.FBNET.KPTS_HEAD_STRIDE, last_layer_scale=cfg.MODEL.FBNET.KPTS_HEAD_LAST_SCALE)
|
dgmn
|
positive
|
def __init__(self, backend_name):
self.backend_name = backend_name
<DeepExtract>
opts = Config()
self.database = create_database('mysql://' + opts.db_user_out + ':' + opts.db_password_out + '@' + opts.db_hostname_out + ':' + opts.db_port_out + '/' + opts.db_database_out)
self.store = Store(self.database)
</DeepExtract>
<DeepExtract>
print('self.backend_name = %s' % self.backend_name)
if self.backend_is_bugzilla():
self.store.execute(__sql_table_bugzilla__)
elif self.backend_is_jira():
self.store.execute(__sql_table_jira__)
</DeepExtract>
|
def __init__(self, backend_name):
self.backend_name = backend_name
opts = Config()
self.database = create_database('mysql://' + opts.db_user_out + ':' + opts.db_password_out + '@' + opts.db_hostname_out + ':' + opts.db_port_out + '/' + opts.db_database_out)
self.store = Store(self.database)
print('self.backend_name = %s' % self.backend_name)
if self.backend_is_bugzilla():
self.store.execute(__sql_table_bugzilla__)
elif self.backend_is_jira():
self.store.execute(__sql_table_jira__)
</DeepExtract>
|
Bicho
|
positive
|
@pytest.mark.end2end
@responses.activate
def test_slack_oauth_flow_first_time_installation(client, login_client, session, factory, patch_slack, create_slack_headers):
<DeepExtract>
def _patch_slack(module_to_patch_slack, *, is_admin=None, details=None):
obj = FakeSlackClient(is_admin=is_admin, details=details)
'busy_beaver.apps.slack_integration.oauth.workflow'(module_to_patch_slack, namespace='SlackClient', replacement=obj)
patched_slack = obj
patched_slack = _patch_slack
</DeepExtract>
authorizing_user_id = 'abc'
workspace_id = 'T9TK3CUKW'
workspace_name = 'Slack Softball Team'
bot_user_id = 'U0KRQLJ9H'
scope = 'app_mentions:read,channels:history'
bot_access_token = 'xoxb-17653672481-19874698323-pdFZKVeTuE8sk7oOcBrzbqgy'
responses.add(responses.POST, SlackInstallationOAuthFlow.TOKEN_URL, match_querystring=False, json={'access_token': bot_access_token, 'app_id': 'A0KRD7HC3', 'authed_user': {'id': authorizing_user_id}, 'bot_user_id': bot_user_id, 'enterprise': None, 'ok': True, 'response_metadata': {'warnings': ['superfluous_charset']}, 'scope': scope, 'team': {'name': workspace_name, 'id': workspace_id}, 'token_type': 'bot', 'warning': 'superfluous_charset'})
params = {'code': 'issued_code', 'state': ''}
response = client.get('/slack/installation-callback', query_string=params)
assert response.status_code == 200
installation = SlackInstallation.query.first()
assert installation.scope == scope
assert installation.workspace_name == workspace_name
assert installation.workspace_id == workspace_id
assert installation.authorizing_user_id == authorizing_user_id
assert installation.bot_user_id == bot_user_id
assert installation.bot_access_token == bot_access_token
post_message_args = patched_slack.mock.call_args_list[-1]
(args, kwargs) = post_message_args
assert '`/busybeaver settings` to configure Busy Beaver' in args[0]
assert kwargs['user_id'] == authorizing_user_id
|
@pytest.mark.end2end
@responses.activate
def test_slack_oauth_flow_first_time_installation(client, login_client, session, factory, patch_slack, create_slack_headers):
def _patch_slack(module_to_patch_slack, *, is_admin=None, details=None):
obj = FakeSlackClient(is_admin=is_admin, details=details)
'busy_beaver.apps.slack_integration.oauth.workflow'(module_to_patch_slack, namespace='SlackClient', replacement=obj)
patched_slack = obj
patched_slack = _patch_slack
authorizing_user_id = 'abc'
workspace_id = 'T9TK3CUKW'
workspace_name = 'Slack Softball Team'
bot_user_id = 'U0KRQLJ9H'
scope = 'app_mentions:read,channels:history'
bot_access_token = 'xoxb-17653672481-19874698323-pdFZKVeTuE8sk7oOcBrzbqgy'
responses.add(responses.POST, SlackInstallationOAuthFlow.TOKEN_URL, match_querystring=False, json={'access_token': bot_access_token, 'app_id': 'A0KRD7HC3', 'authed_user': {'id': authorizing_user_id}, 'bot_user_id': bot_user_id, 'enterprise': None, 'ok': True, 'response_metadata': {'warnings': ['superfluous_charset']}, 'scope': scope, 'team': {'name': workspace_name, 'id': workspace_id}, 'token_type': 'bot', 'warning': 'superfluous_charset'})
params = {'code': 'issued_code', 'state': ''}
response = client.get('/slack/installation-callback', query_string=params)
assert response.status_code == 200
installation = SlackInstallation.query.first()
assert installation.scope == scope
assert installation.workspace_name == workspace_name
assert installation.workspace_id == workspace_id
assert installation.authorizing_user_id == authorizing_user_id
assert installation.bot_user_id == bot_user_id
assert installation.bot_access_token == bot_access_token
post_message_args = patched_slack.mock.call_args_list[-1]
(args, kwargs) = post_message_args
assert '`/busybeaver settings` to configure Busy Beaver' in args[0]
assert kwargs['user_id'] == authorizing_user_id
|
busy-beaver
|
positive
|
def test_excludes(tmpdir):
with tmpdir.as_cwd():
<DeepExtract>
tree = dtr.Tree('sequoia').makedirs()
for file_ in ['hello.txt', 'data/hello.txt', 'data/world.dat', 'world.dat']:
py.path.local(os.path.join(tree.abspath, file_)).write('hello', ensure=True)
sequoia = tree
</DeepExtract>
sequoia2 = dtr.Tree('sequoia2').makedirs()
sequoia3 = dtr.Tree('sequoia3').makedirs()
sequoia.sync(sequoia2, exclude='*.txt')
assert os.path.exists('sequoia2/world.dat')
assert os.path.exists('sequoia2/data/world.dat')
assert not os.path.exists('sequoia2/hello.txt')
assert not os.path.exists('sequoia2/data/hello.txt')
sequoia.sync(sequoia3, exclude=['*.txt', '*.dat'])
assert not os.path.exists('sequoia3/hello.txt')
assert not os.path.exists('sequoia3/world.dat')
assert os.path.exists('sequoia3/data/')
|
def test_excludes(tmpdir):
with tmpdir.as_cwd():
tree = dtr.Tree('sequoia').makedirs()
for file_ in ['hello.txt', 'data/hello.txt', 'data/world.dat', 'world.dat']:
py.path.local(os.path.join(tree.abspath, file_)).write('hello', ensure=True)
sequoia = tree
sequoia2 = dtr.Tree('sequoia2').makedirs()
sequoia3 = dtr.Tree('sequoia3').makedirs()
sequoia.sync(sequoia2, exclude='*.txt')
assert os.path.exists('sequoia2/world.dat')
assert os.path.exists('sequoia2/data/world.dat')
assert not os.path.exists('sequoia2/hello.txt')
assert not os.path.exists('sequoia2/data/hello.txt')
sequoia.sync(sequoia3, exclude=['*.txt', '*.dat'])
assert not os.path.exists('sequoia3/hello.txt')
assert not os.path.exists('sequoia3/world.dat')
assert os.path.exists('sequoia3/data/')
|
datreant
|
positive
|
def test_missing_new_password(self):
<DeepExtract>
form = ProfileForm(user=self.standard_user, data={'full_name': '', 'bio': 'My bio', 'roles': [self.mentor.id, self.mentee.id]})
</DeepExtract>
errors = form['new_password'].errors.as_data()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].code, 'required')
|
def test_missing_new_password(self):
form = ProfileForm(user=self.standard_user, data={'full_name': '', 'bio': 'My bio', 'roles': [self.mentor.id, self.mentee.id]})
errors = form['new_password'].errors.as_data()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].code, 'required')
|
connect
|
positive
|
def generate_database_bfa(bvh_path, output_path, window, window_step, downsample=4, dataset_config='bfa_dataset.yml'):
with open(dataset_config, 'r') as f:
cfg = yaml.load(f, Loader=yaml.Loader)
style_names = cfg['style_names']
style_name_to_idx = {name: i for (i, name) in enumerate(style_names)}
skel = Skel()
<DeepExtract>
bvh_files = [os.path.join(bvh_path, f) for f in sorted(list(os.listdir(bvh_path))) if os.path.isfile(os.path.join(bvh_path, f)) and f.endswith('.bvh') and (f != 'rest.bvh')]
</DeepExtract>
train_inputs = []
test_inputs = []
trainfull_inputs = []
group_size = 10
test_window = window * 2
for (i, item) in enumerate(bvh_files):
print('Processing %i of %i (%s)' % (i, len(bvh_files), item))
filename = item.split('/')[-1]
(style, _) = filename.split('_')
style_idx = style_name_to_idx[style]
<DeepExtract>
anim = AnimationData.from_BVH(item, downsample=downsample, skel=skel)
full = anim.get_full()
phases = anim.get_phases()
raw = np.concatenate((full, phases), axis=-1)
</DeepExtract>
total_length = len(raw)
group_length = test_window * group_size
for st in range(0, total_length, group_length):
ed = st + group_length
if ed <= total_length:
<DeepExtract>
output = []
for full in [raw[ed - test_window:ed]]:
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
test_clips = output
</DeepExtract>
test_inputs += test_clips
<DeepExtract>
output = []
for full in divide_clip_bfa(raw[st:ed - test_window], window=window, window_step=window_step, divide=True):
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
train_clips = output
</DeepExtract>
<DeepExtract>
output = []
for full in divide_clip_bfa(raw[st:ed - test_window], window=test_window, window_step=test_window, divide=True):
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
trainfull_clips = output
</DeepExtract>
train_inputs += train_clips
trainfull_inputs += trainfull_clips
data_dict = {}
data_info = {}
for (subset, inputs) in zip(['train', 'test', 'trainfull'], [train_inputs, test_inputs, trainfull_inputs]):
motions = [input['motion'] for input in inputs]
styles = [input['style'] for input in inputs]
meta = {key: [input['meta'][key] for input in inputs] for key in inputs[0]['meta'].keys()}
data_dict[subset] = {'motion': motions, 'style': styles, 'meta': meta}
'compute meta info'
num_clips = len(motions)
info = {'num_clips': num_clips, 'distribution': {style: len([i for i in range(num_clips) if meta['style'][i] == style]) for style in style_names}}
data_info[subset] = info
np.savez_compressed(output_path + '.npz', **data_dict)
info_file = output_path + '.info'
with open(info_file, 'w') as f:
yaml.dump(data_info, f, sort_keys=False)
|
def generate_database_bfa(bvh_path, output_path, window, window_step, downsample=4, dataset_config='bfa_dataset.yml'):
with open(dataset_config, 'r') as f:
cfg = yaml.load(f, Loader=yaml.Loader)
style_names = cfg['style_names']
style_name_to_idx = {name: i for (i, name) in enumerate(style_names)}
skel = Skel()
bvh_files = [os.path.join(bvh_path, f) for f in sorted(list(os.listdir(bvh_path))) if os.path.isfile(os.path.join(bvh_path, f)) and f.endswith('.bvh') and (f != 'rest.bvh')]
train_inputs = []
test_inputs = []
trainfull_inputs = []
group_size = 10
test_window = window * 2
for (i, item) in enumerate(bvh_files):
print('Processing %i of %i (%s)' % (i, len(bvh_files), item))
filename = item.split('/')[-1]
(style, _) = filename.split('_')
style_idx = style_name_to_idx[style]
anim = AnimationData.from_BVH(item, downsample=downsample, skel=skel)
full = anim.get_full()
phases = anim.get_phases()
raw = np.concatenate((full, phases), axis=-1)
total_length = len(raw)
group_length = test_window * group_size
for st in range(0, total_length, group_length):
ed = st + group_length
if ed <= total_length:
output = []
for full in [raw[ed - test_window:ed]]:
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
test_clips = output
test_inputs += test_clips
output = []
for full in divide_clip_bfa(raw[st:ed - test_window], window=window, window_step=window_step, divide=True):
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
train_clips = output
output = []
for full in divide_clip_bfa(raw[st:ed - test_window], window=test_window, window_step=test_window, divide=True):
(motion, phase) = (full[:, :-1], full[:, -1])
phase_label = phase[len(phase) // 2]
meta_copy = deepcopy({'style': style})
meta_copy['phase'] = phase_label
output.append({'motion': motion, 'style': style_idx, 'meta': meta_copy})
trainfull_clips = output
train_inputs += train_clips
trainfull_inputs += trainfull_clips
data_dict = {}
data_info = {}
for (subset, inputs) in zip(['train', 'test', 'trainfull'], [train_inputs, test_inputs, trainfull_inputs]):
motions = [input['motion'] for input in inputs]
styles = [input['style'] for input in inputs]
meta = {key: [input['meta'][key] for input in inputs] for key in inputs[0]['meta'].keys()}
data_dict[subset] = {'motion': motions, 'style': styles, 'meta': meta}
'compute meta info'
num_clips = len(motions)
info = {'num_clips': num_clips, 'distribution': {style: len([i for i in range(num_clips) if meta['style'][i] == style]) for style in style_names}}
data_info[subset] = info
np.savez_compressed(output_path + '.npz', **data_dict)
info_file = output_path + '.info'
with open(info_file, 'w') as f:
yaml.dump(data_info, f, sort_keys=False)
|
deep-motion-editing
|
positive
|
def get(self, request, *args, **kwargs):
<DeepExtract>
if 'view' not in kwargs:
kwargs['view'] = self
kwargs.update(self.add_context())
context = kwargs
</DeepExtract>
return self.render_to_response(context)
|
def get(self, request, *args, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
kwargs.update(self.add_context())
context = kwargs
return self.render_to_response(context)
|
django-mcbv
|
positive
|
def _newfeature(self, cls, **kwargs):
"""Creates a new feature from the given class and attaches it to this
feature.
"""
feat = cls(**kwargs)
feat._parent = self
if isinstance(feat, Geometry):
self._features.append(feat._placemark)
feat._parent = self
if feat._style is not None:
<DeepExtract>
if feat._style not in self._styles:
self._styles.append(feat._style)
</DeepExtract>
else:
self._features.append(feat)
return feat
|
def _newfeature(self, cls, **kwargs):
"""Creates a new feature from the given class and attaches it to this
feature.
"""
feat = cls(**kwargs)
feat._parent = self
if isinstance(feat, Geometry):
self._features.append(feat._placemark)
feat._parent = self
if feat._style is not None:
if feat._style not in self._styles:
self._styles.append(feat._style)
else:
self._features.append(feat)
return feat
|
DROP
|
positive
|
def train_step(self, initial_env_step: dataset_lib.EnvStep, experience: dataset_lib.EnvStep, target_policy: tf_policy.TFPolicy):
"""Performs a single training step based on batch.
Args:
initial_env_step: A batch of initial steps.
experience: A batch of transitions. Elements must have shape [batch_size,
2, ...].
target_policy: The policy whose value we want to estimate.
Returns:
The losses and the train op.
"""
env_step = tf.nest.map_structure(lambda t: t[:, 0, ...], experience)
next_env_step = tf.nest.map_structure(lambda t: t[:, 1, ...], experience)
with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:
tape.watch(self._nu_network.variables)
tape.watch(self._zeta_network.variables)
tape.watch(self._weight_network.variables)
tape.watch([self._alpha])
<DeepExtract>
nu_values = self._get_value(self._nu_network, env_step)
initial_nu_values = self._get_average_value(self._nu_network, initial_env_step, target_policy)
next_nu_values = self._get_average_value(self._nu_network, next_env_step, target_policy)
zeta_values = self._get_value(self._zeta_network, env_step)
rewards = self._reward_fn(env_step)
discounts = self._gamma * next_env_step.discount
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
policy_log_probabilities = target_policy.distribution(tfagents_step).action.log_prob(env_step.action)
policy_ratio = tf.exp(policy_log_probabilities - env_step.get_log_probability())
bellman_residuals = -nu_values + common_lib.reverse_broadcast(rewards, nu_values) + common_lib.reverse_broadcast(discounts * policy_ratio, nu_values) * next_nu_values
bellman_residuals *= self._algae_alpha_sign
zeta_loss = self._algae_alpha_abs * self._fstar_fn(zeta_values) - bellman_residuals * zeta_values
init_nu_loss = (1 - self._gamma) * initial_nu_values * self._algae_alpha_sign
if self._primal_form:
nu_loss = self._algae_alpha_abs * self._f_fn(bellman_residuals / self._algae_alpha_abs) + init_nu_loss
else:
nu_loss = -zeta_loss + init_nu_loss
if self._weight_by_gamma:
weights = self._gamma ** tf.cast(env_step.step_num, tf.float32)[:, None]
weights /= 1e-06 + tf.reduce_mean(weights)
nu_loss *= weights
zeta_loss *= weights
(nu_loss, zeta_loss) = (nu_loss, zeta_loss)
</DeepExtract>
if not self._unbias_algae_alpha:
nu_loss *= tf.constant([1.0, 0.0, 1.0, 0.0])
zeta_loss *= tf.constant([1.0, 0.0, 1.0, 0.0])
nu_reg = self._nu_regularizer * self._orthogonal_regularization(self._nu_network)
zeta_reg = self._zeta_regularizer * self._orthogonal_regularization(self._zeta_network)
left = self._alpha - 1 * tf.ones_like(self._two_sided_limit)
right = self._alpha + 1 * tf.ones_like(self._two_sided_limit)
for _ in range(4):
mid = 0.5 * (left + right)
<DeepExtract>
if mid is None:
mid = self._alpha
if not self._closed_form_weights:
network_output = self._weight_network((initial_env_step.observation, env_step.observation, env_step.action, next_env_step.observation))[0]
log_weights = network_output
elif self._divergence_type in ['kl', 'rkl']:
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
multiplied_loss = weight_loss_multiplier * nu_loss
combined_loss = tf.reduce_mean(tf.reshape(multiplied_loss, [-1, self._num_limits, 2]), axis=-1)
log_weights = -combined_loss / tf.exp(mid)
else:
raise ValueError('Divergence is not implemented.')
batch_size = tf.cast(tf.shape(log_weights)[0], tf.float32)
(weights, log_weights) = (batch_size * tf.nn.softmax(log_weights, axis=0), tf.math.log(batch_size) + tf.nn.log_softmax(log_weights, 0))
</DeepExtract>
<DeepExtract>
if self._divergence_type == 'kl':
divergence = tf.reduce_mean(2 * weights * log_weights - 2 * weights + 2, axis=0)
elif self._divergence_type == 'rkl':
divergence = tf.reduce_mean(2 * -log_weights + 2 * weights - 2, axis=0)
else:
raise ValueError('Divergence is not implemented.')
</DeepExtract>
divergence_violation = divergence - self._two_sided_limit
left = tf.where(divergence_violation > 0.0, mid, left)
right = tf.where(divergence_violation > 0.0, right, mid)
best_alpha = 0.5 * (left + right)
self._alpha.assign(0.05 * best_alpha + 0.95 * self._alpha)
<DeepExtract>
if alpha is None:
alpha = self._alpha
if not self._closed_form_weights:
network_output = self._weight_network((initial_env_step.observation, env_step.observation, env_step.action, next_env_step.observation))[0]
log_weights = network_output
elif self._divergence_type in ['kl', 'rkl']:
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
multiplied_loss = weight_loss_multiplier * nu_loss
combined_loss = tf.reduce_mean(tf.reshape(multiplied_loss, [-1, self._num_limits, 2]), axis=-1)
log_weights = -combined_loss / tf.exp(alpha)
else:
raise ValueError('Divergence is not implemented.')
batch_size = tf.cast(tf.shape(log_weights)[0], tf.float32)
(weights, log_weights) = (batch_size * tf.nn.softmax(log_weights, axis=0), tf.math.log(batch_size) + tf.nn.log_softmax(log_weights, 0))
</DeepExtract>
<DeepExtract>
if self._divergence_type == 'kl':
divergence = tf.reduce_mean(2 * weights * log_weights - 2 * weights + 2, axis=0)
elif self._divergence_type == 'rkl':
divergence = tf.reduce_mean(2 * -log_weights + 2 * weights - 2, axis=0)
else:
raise ValueError('Divergence is not implemented.')
</DeepExtract>
divergence_violation = divergence - self._two_sided_limit
weighted_nu_loss = tf.reshape(nu_loss, [-1, self._num_limits, 2]) * weights[:, :, None]
weighted_zeta_loss = tf.reshape(zeta_loss, [-1, self._num_limits, 2]) * weights[:, :, None]
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
weight_loss = tf.reduce_mean(tf.reshape(weight_loss_multiplier * nu_loss, [-1, self._num_limits, 2]), 1)
weight_loss += tf.exp(self._alpha) * divergence_violation
reg_weighted_nu_loss = weighted_nu_loss + nu_reg
reg_weighted_zeta_loss = weighted_zeta_loss + nu_reg
nu_grads = tape.gradient(reg_weighted_nu_loss, self._nu_network.variables)
nu_grad_op = self._nu_optimizer.apply_gradients(zip(nu_grads, self._nu_network.variables))
zeta_grads = tape.gradient(reg_weighted_zeta_loss, self._zeta_network.variables)
zeta_grad_op = self._zeta_optimizer.apply_gradients(zip(zeta_grads, self._zeta_network.variables))
if not self._closed_form_weights:
weight_grads = tape.gradient(weight_loss, self._weight_network.variables)
weight_grad_op = self._weight_optimizer.apply_gradients(zip(weight_grads, self._weight_network.variables))
else:
weight_grad_op = tf.group()
for idx in range(self._num_limits):
tf.summary.scalar('divergence%d' % idx, divergence[idx])
tf.summary.scalar('nu_loss%d' % idx, tf.reduce_mean(nu_loss, 0)[idx])
tf.summary.scalar('zeta_loss%d' % idx, tf.reduce_mean(zeta_loss, 0)[idx])
tf.summary.scalar('exp_alpha%d' % idx, tf.exp(self._alpha[idx]))
tf.summary.histogram('weights%d' % idx, weights[:, idx])
estimate = tf.reduce_mean(weighted_nu_loss * tf.reshape(self._algae_alpha_sign, [self._num_limits, 2]), axis=[0, -1])
if not self._unbias_algae_alpha:
estimate = 2 * estimate
return ((estimate, tf.reshape(tf.reduce_mean(weighted_nu_loss, [0]), [-1]), tf.reshape(tf.reduce_mean(weighted_zeta_loss, [0]), [-1]), tf.reduce_mean(weight_loss, 0), divergence), tf.group(nu_grad_op, zeta_grad_op, weight_grad_op))
|
def train_step(self, initial_env_step: dataset_lib.EnvStep, experience: dataset_lib.EnvStep, target_policy: tf_policy.TFPolicy):
"""Performs a single training step based on batch.
Args:
initial_env_step: A batch of initial steps.
experience: A batch of transitions. Elements must have shape [batch_size,
2, ...].
target_policy: The policy whose value we want to estimate.
Returns:
The losses and the train op.
"""
env_step = tf.nest.map_structure(lambda t: t[:, 0, ...], experience)
next_env_step = tf.nest.map_structure(lambda t: t[:, 1, ...], experience)
with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:
tape.watch(self._nu_network.variables)
tape.watch(self._zeta_network.variables)
tape.watch(self._weight_network.variables)
tape.watch([self._alpha])
nu_values = self._get_value(self._nu_network, env_step)
initial_nu_values = self._get_average_value(self._nu_network, initial_env_step, target_policy)
next_nu_values = self._get_average_value(self._nu_network, next_env_step, target_policy)
zeta_values = self._get_value(self._zeta_network, env_step)
rewards = self._reward_fn(env_step)
discounts = self._gamma * next_env_step.discount
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_step = dataset_lib.convert_to_tfagents_timestep(env_step)
policy_log_probabilities = target_policy.distribution(tfagents_step).action.log_prob(env_step.action)
policy_ratio = tf.exp(policy_log_probabilities - env_step.get_log_probability())
bellman_residuals = -nu_values + common_lib.reverse_broadcast(rewards, nu_values) + common_lib.reverse_broadcast(discounts * policy_ratio, nu_values) * next_nu_values
bellman_residuals *= self._algae_alpha_sign
zeta_loss = self._algae_alpha_abs * self._fstar_fn(zeta_values) - bellman_residuals * zeta_values
init_nu_loss = (1 - self._gamma) * initial_nu_values * self._algae_alpha_sign
if self._primal_form:
nu_loss = self._algae_alpha_abs * self._f_fn(bellman_residuals / self._algae_alpha_abs) + init_nu_loss
else:
nu_loss = -zeta_loss + init_nu_loss
if self._weight_by_gamma:
weights = self._gamma ** tf.cast(env_step.step_num, tf.float32)[:, None]
weights /= 1e-06 + tf.reduce_mean(weights)
nu_loss *= weights
zeta_loss *= weights
(nu_loss, zeta_loss) = (nu_loss, zeta_loss)
if not self._unbias_algae_alpha:
nu_loss *= tf.constant([1.0, 0.0, 1.0, 0.0])
zeta_loss *= tf.constant([1.0, 0.0, 1.0, 0.0])
nu_reg = self._nu_regularizer * self._orthogonal_regularization(self._nu_network)
zeta_reg = self._zeta_regularizer * self._orthogonal_regularization(self._zeta_network)
left = self._alpha - 1 * tf.ones_like(self._two_sided_limit)
right = self._alpha + 1 * tf.ones_like(self._two_sided_limit)
for _ in range(4):
mid = 0.5 * (left + right)
if mid is None:
mid = self._alpha
if not self._closed_form_weights:
network_output = self._weight_network((initial_env_step.observation, env_step.observation, env_step.action, next_env_step.observation))[0]
log_weights = network_output
elif self._divergence_type in ['kl', 'rkl']:
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
multiplied_loss = weight_loss_multiplier * nu_loss
combined_loss = tf.reduce_mean(tf.reshape(multiplied_loss, [-1, self._num_limits, 2]), axis=-1)
log_weights = -combined_loss / tf.exp(mid)
else:
raise ValueError('Divergence is not implemented.')
batch_size = tf.cast(tf.shape(log_weights)[0], tf.float32)
(weights, log_weights) = (batch_size * tf.nn.softmax(log_weights, axis=0), tf.math.log(batch_size) + tf.nn.log_softmax(log_weights, 0))
if self._divergence_type == 'kl':
divergence = tf.reduce_mean(2 * weights * log_weights - 2 * weights + 2, axis=0)
elif self._divergence_type == 'rkl':
divergence = tf.reduce_mean(2 * -log_weights + 2 * weights - 2, axis=0)
else:
raise ValueError('Divergence is not implemented.')
divergence_violation = divergence - self._two_sided_limit
left = tf.where(divergence_violation > 0.0, mid, left)
right = tf.where(divergence_violation > 0.0, right, mid)
best_alpha = 0.5 * (left + right)
self._alpha.assign(0.05 * best_alpha + 0.95 * self._alpha)
if alpha is None:
alpha = self._alpha
if not self._closed_form_weights:
network_output = self._weight_network((initial_env_step.observation, env_step.observation, env_step.action, next_env_step.observation))[0]
log_weights = network_output
elif self._divergence_type in ['kl', 'rkl']:
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
multiplied_loss = weight_loss_multiplier * nu_loss
combined_loss = tf.reduce_mean(tf.reshape(multiplied_loss, [-1, self._num_limits, 2]), axis=-1)
log_weights = -combined_loss / tf.exp(alpha)
else:
raise ValueError('Divergence is not implemented.')
batch_size = tf.cast(tf.shape(log_weights)[0], tf.float32)
(weights, log_weights) = (batch_size * tf.nn.softmax(log_weights, axis=0), tf.math.log(batch_size) + tf.nn.log_softmax(log_weights, 0))
if self._divergence_type == 'kl':
divergence = tf.reduce_mean(2 * weights * log_weights - 2 * weights + 2, axis=0)
elif self._divergence_type == 'rkl':
divergence = tf.reduce_mean(2 * -log_weights + 2 * weights - 2, axis=0)
else:
raise ValueError('Divergence is not implemented.')
divergence_violation = divergence - self._two_sided_limit
weighted_nu_loss = tf.reshape(nu_loss, [-1, self._num_limits, 2]) * weights[:, :, None]
weighted_zeta_loss = tf.reshape(zeta_loss, [-1, self._num_limits, 2]) * weights[:, :, None]
weight_loss_multiplier = self._algae_alpha_sign * tf.concat(2 * [tf.ones_like(self._divergence_limit)] + 2 * [-tf.ones_like(self._divergence_limit)], axis=-1)
weight_loss = tf.reduce_mean(tf.reshape(weight_loss_multiplier * nu_loss, [-1, self._num_limits, 2]), 1)
weight_loss += tf.exp(self._alpha) * divergence_violation
reg_weighted_nu_loss = weighted_nu_loss + nu_reg
reg_weighted_zeta_loss = weighted_zeta_loss + nu_reg
nu_grads = tape.gradient(reg_weighted_nu_loss, self._nu_network.variables)
nu_grad_op = self._nu_optimizer.apply_gradients(zip(nu_grads, self._nu_network.variables))
zeta_grads = tape.gradient(reg_weighted_zeta_loss, self._zeta_network.variables)
zeta_grad_op = self._zeta_optimizer.apply_gradients(zip(zeta_grads, self._zeta_network.variables))
if not self._closed_form_weights:
weight_grads = tape.gradient(weight_loss, self._weight_network.variables)
weight_grad_op = self._weight_optimizer.apply_gradients(zip(weight_grads, self._weight_network.variables))
else:
weight_grad_op = tf.group()
for idx in range(self._num_limits):
tf.summary.scalar('divergence%d' % idx, divergence[idx])
tf.summary.scalar('nu_loss%d' % idx, tf.reduce_mean(nu_loss, 0)[idx])
tf.summary.scalar('zeta_loss%d' % idx, tf.reduce_mean(zeta_loss, 0)[idx])
tf.summary.scalar('exp_alpha%d' % idx, tf.exp(self._alpha[idx]))
tf.summary.histogram('weights%d' % idx, weights[:, idx])
estimate = tf.reduce_mean(weighted_nu_loss * tf.reshape(self._algae_alpha_sign, [self._num_limits, 2]), axis=[0, -1])
if not self._unbias_algae_alpha:
estimate = 2 * estimate
return ((estimate, tf.reshape(tf.reduce_mean(weighted_nu_loss, [0]), [-1]), tf.reshape(tf.reduce_mean(weighted_zeta_loss, [0]), [-1]), tf.reduce_mean(weight_loss, 0), divergence), tf.group(nu_grad_op, zeta_grad_op, weight_grad_op))
|
dice_rl
|
positive
|
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
<DeepExtract>
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, env=env).communicate()[0]
out = out
</DeepExtract>
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
|
def get_git_hash():
def _minimal_ext_cmd(cmd):
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, env=env).communicate()[0]
out = out
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
|
DNL-Object-Detection
|
positive
|
def run(script, jobs, machines=None, key=None, email=False, rm_status=True):
if not _executable_exists('parallel'):
raise RuntimeError('GNU Parallel executable not found.')
if not hasattr(config, 'JOBS_PATH'):
raise RuntimeError('Need to specify JOBS_PATH in config.py')
if not os.path.exists(config.JOBS_PATH):
raise RuntimeError('Path chosen for config.JOBS_PATH does not exist: %s' % config.JOBS_PATH)
if key is not None:
if not os.path.exists(_status_path(key)):
os.mkdir(_status_path(key))
outstr = open(_status_file(key), 'w')
for job in jobs:
(print >> outstr, 'queued:', job)
outstr.close()
if rm_status:
<DeepExtract>
fnames = os.listdir(_status_path(key))
for fname in fnames:
if re.match('status-.*.txt', fname):
full_path = os.path.join(_status_path(key), fname)
os.remove(full_path)
</DeepExtract>
command = 'python parallel.py %s %s' % (key, script)
<DeepExtract>
args = ['parallel', '--gnu']
if machines is not None:
for m in machines:
args += ['--sshlogin', m]
if os.getcwd() is not None:
command = 'cd %s; %s' % (os.getcwd(), command)
args += [command]
p = subprocess.Popen(args, shell=False, stdin=subprocess.PIPE)
p.communicate('\n'.join(map(escape, jobs)))
</DeepExtract>
if email:
if key is not None:
subject = '%s jobs finished' % key
p = subprocess.Popen(['check_status', key], stdout=subprocess.PIPE)
(body, _) = p.communicate()
else:
subject = 'jobs finished'
body = ''
msg = '\r\n'.join(['From: %s' % config.EMAIL, 'To: %s' % config.EMAIL, 'Subject: %s' % subject, '', body])
s = smtplib.SMTP('localhost')
s.sendmail(config.EMAIL, [config.EMAIL], msg)
s.quit()
|
def run(script, jobs, machines=None, key=None, email=False, rm_status=True):
if not _executable_exists('parallel'):
raise RuntimeError('GNU Parallel executable not found.')
if not hasattr(config, 'JOBS_PATH'):
raise RuntimeError('Need to specify JOBS_PATH in config.py')
if not os.path.exists(config.JOBS_PATH):
raise RuntimeError('Path chosen for config.JOBS_PATH does not exist: %s' % config.JOBS_PATH)
if key is not None:
if not os.path.exists(_status_path(key)):
os.mkdir(_status_path(key))
outstr = open(_status_file(key), 'w')
for job in jobs:
(print >> outstr, 'queued:', job)
outstr.close()
if rm_status:
fnames = os.listdir(_status_path(key))
for fname in fnames:
if re.match('status-.*.txt', fname):
full_path = os.path.join(_status_path(key), fname)
os.remove(full_path)
command = 'python parallel.py %s %s' % (key, script)
args = ['parallel', '--gnu']
if machines is not None:
for m in machines:
args += ['--sshlogin', m]
if os.getcwd() is not None:
command = 'cd %s; %s' % (os.getcwd(), command)
args += [command]
p = subprocess.Popen(args, shell=False, stdin=subprocess.PIPE)
p.communicate('\n'.join(map(escape, jobs)))
if email:
if key is not None:
subject = '%s jobs finished' % key
p = subprocess.Popen(['check_status', key], stdout=subprocess.PIPE)
(body, _) = p.communicate()
else:
subject = 'jobs finished'
body = ''
msg = '\r\n'.join(['From: %s' % config.EMAIL, 'To: %s' % config.EMAIL, 'Subject: %s' % subject, '', body])
s = smtplib.SMTP('localhost')
s.sendmail(config.EMAIL, [config.EMAIL], msg)
s.quit()
|
compositional_structure_search
|
positive
|
def evaluate_metrics(all_prediction, from_which, slot_temp):
(total, turn_acc, joint_acc, F1_pred, F1_count) = (0, 0, 0, 0, 0)
for (d, v) in all_prediction.items():
for t in range(len(v)):
cv = v[t]
if set(cv['turn_belief']) == set(cv[from_which]):
joint_acc += 1
total += 1
<DeepExtract>
miss_gold = 0
miss_slot = []
for g in set(cv['turn_belief']):
if g not in set(cv[from_which]):
miss_gold += 1
miss_slot.append(g.rsplit('-', 1)[0])
wrong_pred = 0
for p in set(cv[from_which]):
if p not in set(cv['turn_belief']) and p.rsplit('-', 1)[0] not in miss_slot:
wrong_pred += 1
ACC_TOTAL = len(slot_temp)
ACC = len(slot_temp) - miss_gold - wrong_pred
ACC = ACC / float(ACC_TOTAL)
temp_acc = ACC
</DeepExtract>
turn_acc += temp_acc
<DeepExtract>
(TP, FP, FN) = (0, 0, 0)
if len(set(cv['turn_belief'])) != 0:
count = 1
for g in set(cv['turn_belief']):
if g in set(cv[from_which]):
TP += 1
else:
FN += 1
for p in set(cv[from_which]):
if p not in set(cv['turn_belief']):
FP += 1
precision = TP / float(TP + FP) if TP + FP != 0 else 0
recall = TP / float(TP + FN) if TP + FN != 0 else 0
F1 = 2 * precision * recall / float(precision + recall) if precision + recall != 0 else 0
elif len(set(cv[from_which])) == 0:
(precision, recall, F1, count) = (1, 1, 1, 1)
else:
(precision, recall, F1, count) = (0, 0, 0, 1)
(temp_f1, temp_r, temp_p, count) = (F1, recall, precision, count)
</DeepExtract>
F1_pred += temp_f1
F1_count += count
joint_acc_score = joint_acc / float(total) if total != 0 else 0
turn_acc_score = turn_acc / float(total) if total != 0 else 0
F1_score = F1_pred / float(F1_count) if F1_count != 0 else 0
return (joint_acc_score, F1_score, turn_acc_score)
|
def evaluate_metrics(all_prediction, from_which, slot_temp):
(total, turn_acc, joint_acc, F1_pred, F1_count) = (0, 0, 0, 0, 0)
for (d, v) in all_prediction.items():
for t in range(len(v)):
cv = v[t]
if set(cv['turn_belief']) == set(cv[from_which]):
joint_acc += 1
total += 1
miss_gold = 0
miss_slot = []
for g in set(cv['turn_belief']):
if g not in set(cv[from_which]):
miss_gold += 1
miss_slot.append(g.rsplit('-', 1)[0])
wrong_pred = 0
for p in set(cv[from_which]):
if p not in set(cv['turn_belief']) and p.rsplit('-', 1)[0] not in miss_slot:
wrong_pred += 1
ACC_TOTAL = len(slot_temp)
ACC = len(slot_temp) - miss_gold - wrong_pred
ACC = ACC / float(ACC_TOTAL)
temp_acc = ACC
turn_acc += temp_acc
(TP, FP, FN) = (0, 0, 0)
if len(set(cv['turn_belief'])) != 0:
count = 1
for g in set(cv['turn_belief']):
if g in set(cv[from_which]):
TP += 1
else:
FN += 1
for p in set(cv[from_which]):
if p not in set(cv['turn_belief']):
FP += 1
precision = TP / float(TP + FP) if TP + FP != 0 else 0
recall = TP / float(TP + FN) if TP + FN != 0 else 0
F1 = 2 * precision * recall / float(precision + recall) if precision + recall != 0 else 0
elif len(set(cv[from_which])) == 0:
(precision, recall, F1, count) = (1, 1, 1, 1)
else:
(precision, recall, F1, count) = (0, 0, 0, 1)
(temp_f1, temp_r, temp_p, count) = (F1, recall, precision, count)
F1_pred += temp_f1
F1_count += count
joint_acc_score = joint_acc / float(total) if total != 0 else 0
turn_acc_score = turn_acc / float(total) if total != 0 else 0
F1_score = F1_pred / float(F1_count) if F1_count != 0 else 0
return (joint_acc_score, F1_score, turn_acc_score)
|
CrossWOZ
|
positive
|
def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh):
"""Compute P and R for predicted bounding boxes. Ignores classes!
Args:
labels: (N x bbox) ground-truth bounding boxes (6 dims)
pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification
Returns:
TP, FP, FN
"""
gt_bboxes = labels[:, :6]
num_scene_bboxes = gt_bboxes.shape[0]
conf = pred[:, 6]
conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6]
num_conf_pred_bboxes = conf_pred_bbox.shape[0]
iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes])
for g_idx in range(num_conf_pred_bboxes):
for s_idx in range(num_scene_bboxes):
<DeepExtract>
max_a = conf_pred_bbox[g_idx, :][0:3] + conf_pred_bbox[g_idx, :][3:6] / 2
max_b = gt_bboxes[s_idx, :][0:3] + gt_bboxes[s_idx, :][3:6] / 2
min_max = np.array([max_a, max_b]).min(0)
min_a = conf_pred_bbox[g_idx, :][0:3] - conf_pred_bbox[g_idx, :][3:6] / 2
min_b = gt_bboxes[s_idx, :][0:3] - gt_bboxes[s_idx, :][3:6] / 2
max_min = np.array([min_a, min_b]).max(0)
if not (min_max > max_min).all():
iou_arr[g_idx, s_idx] = 0.0
intersection = (min_max - max_min).prod()
vol_a = conf_pred_bbox[g_idx, :][3:6].prod()
vol_b = gt_bboxes[s_idx, :][3:6].prod()
union = vol_a + vol_b - intersection
iou_arr[g_idx, s_idx] = 1.0 * intersection / union
</DeepExtract>
good_match_arr = iou_arr >= iou_thresh
TP = good_match_arr.any(axis=1).sum()
FP = num_conf_pred_bboxes - TP
FN = num_scene_bboxes - good_match_arr.any(axis=0).sum()
return (TP, FP, FN)
|
def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh):
"""Compute P and R for predicted bounding boxes. Ignores classes!
Args:
labels: (N x bbox) ground-truth bounding boxes (6 dims)
pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification
Returns:
TP, FP, FN
"""
gt_bboxes = labels[:, :6]
num_scene_bboxes = gt_bboxes.shape[0]
conf = pred[:, 6]
conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6]
num_conf_pred_bboxes = conf_pred_bbox.shape[0]
iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes])
for g_idx in range(num_conf_pred_bboxes):
for s_idx in range(num_scene_bboxes):
max_a = conf_pred_bbox[g_idx, :][0:3] + conf_pred_bbox[g_idx, :][3:6] / 2
max_b = gt_bboxes[s_idx, :][0:3] + gt_bboxes[s_idx, :][3:6] / 2
min_max = np.array([max_a, max_b]).min(0)
min_a = conf_pred_bbox[g_idx, :][0:3] - conf_pred_bbox[g_idx, :][3:6] / 2
min_b = gt_bboxes[s_idx, :][0:3] - gt_bboxes[s_idx, :][3:6] / 2
max_min = np.array([min_a, min_b]).max(0)
if not (min_max > max_min).all():
iou_arr[g_idx, s_idx] = 0.0
intersection = (min_max - max_min).prod()
vol_a = conf_pred_bbox[g_idx, :][3:6].prod()
vol_b = gt_bboxes[s_idx, :][3:6].prod()
union = vol_a + vol_b - intersection
iou_arr[g_idx, s_idx] = 1.0 * intersection / union
good_match_arr = iou_arr >= iou_thresh
TP = good_match_arr.any(axis=1).sum()
FP = num_conf_pred_bboxes - TP
FN = num_scene_bboxes - good_match_arr.any(axis=0).sum()
return (TP, FP, FN)
|
3DIoUMatch
|
positive
|
def __iter__(self):
size = float(len(self.iterable))
for (i, obj) in enumerate(self.iterable):
yield obj
if self.stats is not None and i > 0 and (self.log_interval is not None) and (i % self.log_interval == 0):
update = self.epoch - 1 + float(i / size) if self.epoch is not None else None
<DeepExtract>
postfix = OrderedDict(self.stats)
for key in postfix.keys():
if isinstance(postfix[key], Number):
postfix[key] = '{:g}'.format(postfix[key])
elif isinstance(postfix[key], AverageMeter):
postfix[key] = '{:.2f} ({:.2f})'.format(postfix[key].val, postfix[key].avg)
elif not isinstance(postfix[key], str):
postfix[key] = str(postfix[key])
self.stats = postfix
</DeepExtract>
<DeepExtract>
raise NotImplementedError
</DeepExtract>
|
def __iter__(self):
size = float(len(self.iterable))
for (i, obj) in enumerate(self.iterable):
yield obj
if self.stats is not None and i > 0 and (self.log_interval is not None) and (i % self.log_interval == 0):
update = self.epoch - 1 + float(i / size) if self.epoch is not None else None
postfix = OrderedDict(self.stats)
for key in postfix.keys():
if isinstance(postfix[key], Number):
postfix[key] = '{:g}'.format(postfix[key])
elif isinstance(postfix[key], AverageMeter):
postfix[key] = '{:.2f} ({:.2f})'.format(postfix[key].val, postfix[key].avg)
elif not isinstance(postfix[key], str):
postfix[key] = str(postfix[key])
self.stats = postfix
raise NotImplementedError
</DeepExtract>
|
crosentgec
|
positive
|
def _add_recursive_graph_node(graph_model, entry_uuid, entry_model, with_parameters):
"""
Recursive function to visualize model setup. For every model in a
MultiCompartmentModel or a distributed model it will check if it is
a distribution, in which case the function will call itself with the
sub-model as input and continue until it has found the bottom of the
model setup.
Parameters
----------
graph_model: graphviz model instance,
Instantiated model instance to keep growing with nodes.
entry_uuid: string,
Entry model unique identifier from which to keep growing the graph.
entry_model: dmipy model instance,
Entry dmipy model from which to keep growing the graph.
"""
for sub_model in entry_model.models:
model_name = sub_model.__class__.__name__
model_uuid = str(uuid4())
graph_model.node(model_uuid, model_name)
graph_model.edge(model_uuid, entry_uuid)
if sub_model._model_type == 'SphericalDistributedModel' or sub_model._model_type == 'SpatialDistributedModel' or sub_model._model_type == 'BundleModel':
<DeepExtract>
for sub_model in sub_model.models:
model_name = sub_model.__class__.__name__
model_uuid = str(uuid4())
graph_model.node(model_uuid, model_name)
graph_model.edge(model_uuid, model_uuid)
if sub_model._model_type == 'SphericalDistributedModel' or sub_model._model_type == 'SpatialDistributedModel' or sub_model._model_type == 'BundleModel':
self._add_recursive_graph_node(graph_model, model_uuid, sub_model, with_parameters)
elif with_parameters:
self._add_parameter_nodes(graph_model, model_uuid, sub_model)
if hasattr(sub_model, 'distribution'):
dist_name = sub_model.distribution.__class__.__name__
dist_uuid = str(uuid4())
graph_model.node(dist_uuid, dist_name)
graph_model.edge(dist_uuid, model_uuid)
if with_parameters:
self._add_parameter_nodes(graph_model, dist_uuid, sub_model.distribution)
</DeepExtract>
elif with_parameters:
<DeepExtract>
for parameter_name in sub_model.parameter_names:
parameter_uuid = str(uuid4())
graph_model.node(parameter_uuid, parameter_name)
graph_model.edge(parameter_uuid, model_uuid)
</DeepExtract>
if hasattr(entry_model, 'distribution'):
dist_name = entry_model.distribution.__class__.__name__
dist_uuid = str(uuid4())
graph_model.node(dist_uuid, dist_name)
graph_model.edge(dist_uuid, entry_uuid)
if with_parameters:
<DeepExtract>
for parameter_name in entry_model.distribution.parameter_names:
parameter_uuid = str(uuid4())
graph_model.node(parameter_uuid, parameter_name)
graph_model.edge(parameter_uuid, dist_uuid)
</DeepExtract>
|
def _add_recursive_graph_node(graph_model, entry_uuid, entry_model, with_parameters):
"""
Recursive function to visualize model setup. For every model in a
MultiCompartmentModel or a distributed model it will check if it is
a distribution, in which case the function will call itself with the
sub-model as input and continue until it has found the bottom of the
model setup.
Parameters
----------
graph_model: graphviz model instance,
Instantiated model instance to keep growing with nodes.
entry_uuid: string,
Entry model unique identifier from which to keep growing the graph.
entry_model: dmipy model instance,
Entry dmipy model from which to keep growing the graph.
"""
for sub_model in entry_model.models:
model_name = sub_model.__class__.__name__
model_uuid = str(uuid4())
graph_model.node(model_uuid, model_name)
graph_model.edge(model_uuid, entry_uuid)
if sub_model._model_type == 'SphericalDistributedModel' or sub_model._model_type == 'SpatialDistributedModel' or sub_model._model_type == 'BundleModel':
for sub_model in sub_model.models:
model_name = sub_model.__class__.__name__
model_uuid = str(uuid4())
graph_model.node(model_uuid, model_name)
graph_model.edge(model_uuid, model_uuid)
if sub_model._model_type == 'SphericalDistributedModel' or sub_model._model_type == 'SpatialDistributedModel' or sub_model._model_type == 'BundleModel':
self._add_recursive_graph_node(graph_model, model_uuid, sub_model, with_parameters)
elif with_parameters:
self._add_parameter_nodes(graph_model, model_uuid, sub_model)
if hasattr(sub_model, 'distribution'):
dist_name = sub_model.distribution.__class__.__name__
dist_uuid = str(uuid4())
graph_model.node(dist_uuid, dist_name)
graph_model.edge(dist_uuid, model_uuid)
if with_parameters:
self._add_parameter_nodes(graph_model, dist_uuid, sub_model.distribution)
elif with_parameters:
for parameter_name in sub_model.parameter_names:
parameter_uuid = str(uuid4())
graph_model.node(parameter_uuid, parameter_name)
graph_model.edge(parameter_uuid, model_uuid)
if hasattr(entry_model, 'distribution'):
dist_name = entry_model.distribution.__class__.__name__
dist_uuid = str(uuid4())
graph_model.node(dist_uuid, dist_name)
graph_model.edge(dist_uuid, entry_uuid)
if with_parameters:
for parameter_name in entry_model.distribution.parameter_names:
parameter_uuid = str(uuid4())
graph_model.node(parameter_uuid, parameter_name)
graph_model.edge(parameter_uuid, dist_uuid)
</DeepExtract>
|
dmipy
|
positive
|
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
<DeepExtract>
assert pred[:, i].shape[0] == target[..., i].shape[0]
pred[:, i] = pred[:, i].reshape(pred[:, i].shape[0], -1)
target[..., i] = target[..., i].reshape(target[..., i].shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred[:, i], target[..., i]) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred[:, i].pow(exponent) + target[..., i].pow(exponent), dim=1) + smooth
dice_loss = 1 - num / den
</DeepExtract>
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
|
@weighted_loss
def dice_loss(pred, target, valid_mask, smooth=1, exponent=2, class_weight=None, ignore_index=255):
assert pred.shape[0] == target.shape[0]
total_loss = 0
num_classes = pred.shape[1]
for i in range(num_classes):
if i != ignore_index:
assert pred[:, i].shape[0] == target[..., i].shape[0]
pred[:, i] = pred[:, i].reshape(pred[:, i].shape[0], -1)
target[..., i] = target[..., i].reshape(target[..., i].shape[0], -1)
valid_mask = valid_mask.reshape(valid_mask.shape[0], -1)
num = torch.sum(torch.mul(pred[:, i], target[..., i]) * valid_mask, dim=1) * 2 + smooth
den = torch.sum(pred[:, i].pow(exponent) + target[..., i].pow(exponent), dim=1) + smooth
dice_loss = 1 - num / den
if class_weight is not None:
dice_loss *= class_weight[i]
total_loss += dice_loss
return total_loss / num_classes
|
Auto-Seg-Loss
|
positive
|
def ifgm_attack(x, preds, loss_fn, y=None, eps=None, model=None, steps=3, alpha=None, randinit=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if eps is None:
eps = 0.062
if alpha is None:
alpha = eps * 1.25 / steps
if y is None:
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
x_adv = x.clone()
if randinit:
x_adv += torch.sign(2.0 * torch.rand_like(x_adv) - 1.0) * eps
x_adv.requires_grad = True
for _ in range(steps):
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.data.add_(alpha * torch.sign(grad0.data))
<DeepExtract>
return tensor_clamp(x_adv, min=x - eps, max=x + eps, in_place=True)
</DeepExtract>
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.201]
min_range = []
max_range = []
for i in range(3):
max_range.append((1.0 - mean[i]) / std[i])
min_range.append((0.0 - mean[i]) / std[i])
<DeepExtract>
(N, C, H, W) = x_adv.shape
xadv = x_adv.data.clone()
for i in range(C):
xadv[:, i, :, :] = torch.clamp(x_adv[:, i, :, :], max=max_range[i], min=min_range[i])
x_adv = xadv
</DeepExtract>
return x_adv
|
def ifgm_attack(x, preds, loss_fn, y=None, eps=None, model=None, steps=3, alpha=None, randinit=False, **kwargs):
if len(kwargs) > 0:
assert set(kwargs.keys()).issubset(_extra_args)
if eps is None:
eps = 0.062
if alpha is None:
alpha = eps * 1.25 / steps
if y is None:
preds_max = preds.data.max(1)[1]
y = torch.equal(preds, preds_max).float()
x_adv = x.clone()
if randinit:
x_adv += torch.sign(2.0 * torch.rand_like(x_adv) - 1.0) * eps
x_adv.requires_grad = True
for _ in range(steps):
loss_adv0 = loss_fn(model(x_adv), y, reduction='sum')
grad0 = torch.autograd.grad(loss_adv0, x_adv, only_inputs=True)[0]
x_adv.data.add_(alpha * torch.sign(grad0.data))
return tensor_clamp(x_adv, min=x - eps, max=x + eps, in_place=True)
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.201]
min_range = []
max_range = []
for i in range(3):
max_range.append((1.0 - mean[i]) / std[i])
min_range.append((0.0 - mean[i]) / std[i])
(N, C, H, W) = x_adv.shape
xadv = x_adv.data.clone()
for i in range(C):
xadv[:, i, :, :] = torch.clamp(x_adv[:, i, :, :], max=max_range[i], min=min_range[i])
x_adv = xadv
return x_adv
|
ATMC
|
positive
|
def test_get_win_version_with_win_put(self):
"""Test version window is initialized, updated and cleared correctly with win put."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn('Skip {} due to size 1'.format(fname))
return
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2 ** i) % size for i in range(indegree)]
dims = [1, 2, 3]
for (dtype, dim) in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*[23] * dim).fill_(1).mul_(rank)
<DeepExtract>
if dtype.is_cuda:
if bf.nccl_built() and bf.local_size() > torch.cuda.device_count():
raise EnvironmentError('Cannot run number of processes in one machine more than GPU device count in NCCL environment')
tensor = tensor.cuda(bf.local_rank() % torch.cuda.device_count()).type(dtype)
tensor = tensor.type(dtype)
</DeepExtract>
window_name = 'win_version_put_{}_{}'.format(dim, dtype)
bf.win_create(tensor, window_name)
original_versions = list(bf.get_win_version(window_name).values())
bf.barrier()
bf.win_put(tensor, window_name)
bf.barrier()
versions_after_win_get = list(bf.get_win_version(window_name).values())
bf.win_update(window_name)
versions_after_win_update = list(bf.get_win_version(window_name).values())
neighbor_ranks_number = len(neighbor_ranks)
zero_number_in_original_versions = len(original_versions) - np.count_nonzero(original_versions)
assert zero_number_in_original_versions == neighbor_ranks_number, 'version initialization is wrong.'
zero_number_after_win_update = len(versions_after_win_update) - np.count_nonzero(versions_after_win_update)
assert zero_number_after_win_update == neighbor_ranks_number, 'version clear up is wrong.'
expected_versions_after_win_get = [1] * neighbor_ranks_number
assert versions_after_win_get == expected_versions_after_win_get, 'version after win put is wrong.'
for (dtype, dim) in itertools.product(dtypes, dims):
window_name = 'win_version_put_{}_{}'.format(dim, dtype)
is_freed = bf.win_free(window_name)
assert is_freed, 'bf.win_free do not free window object successfully.'
|
def test_get_win_version_with_win_put(self):
"""Test version window is initialized, updated and cleared correctly with win put."""
size = bf.size()
rank = bf.rank()
if size <= 1:
fname = inspect.currentframe().f_code.co_name
warnings.warn('Skip {} due to size 1'.format(fname))
return
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if TEST_ON_GPU:
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor]
indegree = int(np.ceil(np.log2(size)))
neighbor_ranks = [(rank - 2 ** i) % size for i in range(indegree)]
dims = [1, 2, 3]
for (dtype, dim) in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*[23] * dim).fill_(1).mul_(rank)
if dtype.is_cuda:
if bf.nccl_built() and bf.local_size() > torch.cuda.device_count():
raise EnvironmentError('Cannot run number of processes in one machine more than GPU device count in NCCL environment')
tensor = tensor.cuda(bf.local_rank() % torch.cuda.device_count()).type(dtype)
tensor = tensor.type(dtype)
window_name = 'win_version_put_{}_{}'.format(dim, dtype)
bf.win_create(tensor, window_name)
original_versions = list(bf.get_win_version(window_name).values())
bf.barrier()
bf.win_put(tensor, window_name)
bf.barrier()
versions_after_win_get = list(bf.get_win_version(window_name).values())
bf.win_update(window_name)
versions_after_win_update = list(bf.get_win_version(window_name).values())
neighbor_ranks_number = len(neighbor_ranks)
zero_number_in_original_versions = len(original_versions) - np.count_nonzero(original_versions)
assert zero_number_in_original_versions == neighbor_ranks_number, 'version initialization is wrong.'
zero_number_after_win_update = len(versions_after_win_update) - np.count_nonzero(versions_after_win_update)
assert zero_number_after_win_update == neighbor_ranks_number, 'version clear up is wrong.'
expected_versions_after_win_get = [1] * neighbor_ranks_number
assert versions_after_win_get == expected_versions_after_win_get, 'version after win put is wrong.'
for (dtype, dim) in itertools.product(dtypes, dims):
window_name = 'win_version_put_{}_{}'.format(dim, dtype)
is_freed = bf.win_free(window_name)
assert is_freed, 'bf.win_free do not free window object successfully.'
|
bluefog
|
positive
|
def handle_tx(self, tx):
if tx not in self.mempool:
<DeepExtract>
in_sum = 0
out_sum = 0
for (index, tx_in) in enumerate(tx.tx_ins):
assert tx_in.outpoint in self.utxo_set
tx_out = self.utxo_set[tx_in.outpoint]
public_key = tx_out.public_key
tx.verify_input(index, public_key)
amount = tx_out.amount
in_sum += amount
for tx_out in tx.tx_outs:
out_sum += tx_out.amount
assert in_sum >= out_sum
</DeepExtract>
self.mempool.append(tx)
for peer in self.peers:
<DeepExtract>
message = prepare_message('tx', tx)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(peer)
s.sendall(message)
if response:
return read_message(s)
</DeepExtract>
|
def handle_tx(self, tx):
if tx not in self.mempool:
in_sum = 0
out_sum = 0
for (index, tx_in) in enumerate(tx.tx_ins):
assert tx_in.outpoint in self.utxo_set
tx_out = self.utxo_set[tx_in.outpoint]
public_key = tx_out.public_key
tx.verify_input(index, public_key)
amount = tx_out.amount
in_sum += amount
for tx_out in tx.tx_outs:
out_sum += tx_out.amount
assert in_sum >= out_sum
self.mempool.append(tx)
for peer in self.peers:
message = prepare_message('tx', tx)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(peer)
s.sendall(message)
if response:
return read_message(s)
</DeepExtract>
|
digital-cash
|
positive
|
@classmethod
def emit_signal(cls, connection, object_path, interface_name, signal_name, parameters, destination=None):
"""Emit a DBus signal.
GLib doesn't seem to support Unix file descriptors in signals.
Swap Unix file descriptors with indexes into a list of Unix file
descriptors, but emit just the indexes. Log a warning to inform
users about the limited support.
"""
<DeepExtract>
if parameters is None:
(parameters, fd_list) = (None, None)
fd_list = []
def _get_idx(fd):
fd_list.append(fd)
(parameters, fd_list) = len(fd_list) - 1
variant_without_fds = UnixFDSwap.apply(parameters, _get_idx)
if not fd_list:
(parameters, fd_list) = (parameters, None)
(parameters, fd_list) = (variant_without_fds, Gio.UnixFDList.new_from_array(fd_list))
</DeepExtract>
if fd_list:
log.warning('Unix file descriptors in signals are unsupported.')
connection.emit_signal(destination, object_path, interface_name, signal_name, parameters)
|
@classmethod
def emit_signal(cls, connection, object_path, interface_name, signal_name, parameters, destination=None):
"""Emit a DBus signal.
GLib doesn't seem to support Unix file descriptors in signals.
Swap Unix file descriptors with indexes into a list of Unix file
descriptors, but emit just the indexes. Log a warning to inform
users about the limited support.
"""
if parameters is None:
(parameters, fd_list) = (None, None)
fd_list = []
def _get_idx(fd):
fd_list.append(fd)
(parameters, fd_list) = len(fd_list) - 1
variant_without_fds = UnixFDSwap.apply(parameters, _get_idx)
if not fd_list:
(parameters, fd_list) = (parameters, None)
(parameters, fd_list) = (variant_without_fds, Gio.UnixFDList.new_from_array(fd_list))
if fd_list:
log.warning('Unix file descriptors in signals are unsupported.')
connection.emit_signal(destination, object_path, interface_name, signal_name, parameters)
|
dasbus
|
positive
|
def test_acm_service_manager_delete_certificate_failure(acm_service_mgr):
arn = 'arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012'
<DeepExtract>
acm_service_mgr.client.delete_certificate.side_effect = botocore.exceptions.ClientError({'Error': {'Code': code}}, 'Certificate')
</DeepExtract>
with pytest.raises(SystemExit):
acm_service_mgr.delete_certificate(arn=arn)
|
def test_acm_service_manager_delete_certificate_failure(acm_service_mgr):
arn = 'arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012'
acm_service_mgr.client.delete_certificate.side_effect = botocore.exceptions.ClientError({'Error': {'Code': code}}, 'Certificate')
with pytest.raises(SystemExit):
acm_service_mgr.delete_certificate(arn=arn)
|
amazon.aws
|
positive
|
def transformed_points(self, transform: numpy.ndarray) -> Iterable[Tuple[float, Optional[numpy.ndarray], Optional[numpy.ndarray]]]:
"""
Get the locations of the transformed blocks and the source blocks they came from.
:param transform: The matrix that this box will be transformed by.
:return: An iterable of two Nx3 numpy arrays of the source block locations and the destination block locations. The destination locations will be unique but the source may not be and some may not be included.
"""
for (progress, box, mask, original) in self._iter_transformed_boxes(transform):
if isinstance(mask, bool) and mask:
new_points = numpy.transpose(numpy.mgrid[box.min_x:box.max_x, box.min_y:box.max_y, box.min_z:box.max_z], (1, 2, 3, 0)).reshape(-1, 3)
<DeepExtract>
assert isinstance(new_points, numpy.ndarray) and len(new_points.shape) == 2 and (new_points.shape[1] == 3)
assert isinstance(numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)), numpy.ndarray) and numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)).shape == (4, 4)
points_array = numpy.ones((new_points.shape[0], 4))
points_array[:, :3] = new_points
old_points = numpy.matmul(numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)), points_array.T).T[:, :3]
</DeepExtract>
yield (progress, old_points, new_points)
elif isinstance(mask, numpy.ndarray) and numpy.any(mask):
yield (progress, original[mask], box.min_array + numpy.argwhere(mask))
else:
yield (progress, None, None)
|
def transformed_points(self, transform: numpy.ndarray) -> Iterable[Tuple[float, Optional[numpy.ndarray], Optional[numpy.ndarray]]]:
"""
Get the locations of the transformed blocks and the source blocks they came from.
:param transform: The matrix that this box will be transformed by.
:return: An iterable of two Nx3 numpy arrays of the source block locations and the destination block locations. The destination locations will be unique but the source may not be and some may not be included.
"""
for (progress, box, mask, original) in self._iter_transformed_boxes(transform):
if isinstance(mask, bool) and mask:
new_points = numpy.transpose(numpy.mgrid[box.min_x:box.max_x, box.min_y:box.max_y, box.min_z:box.max_z], (1, 2, 3, 0)).reshape(-1, 3)
assert isinstance(new_points, numpy.ndarray) and len(new_points.shape) == 2 and (new_points.shape[1] == 3)
assert isinstance(numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)), numpy.ndarray) and numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)).shape == (4, 4)
points_array = numpy.ones((new_points.shape[0], 4))
points_array[:, :3] = new_points
old_points = numpy.matmul(numpy.linalg.inv(numpy.matmul(displacement_matrix(-0.5, -0.5, -0.5), transform)), points_array.T).T[:, :3]
yield (progress, old_points, new_points)
elif isinstance(mask, numpy.ndarray) and numpy.any(mask):
yield (progress, original[mask], box.min_array + numpy.argwhere(mask))
else:
yield (progress, None, None)
|
Amulet-Core
|
positive
|
def test_picamera_input():
picam.picamera_override = picamera_override
avsource = AVSourceElement(uri='picamera', type='video')
<DeepExtract>
_dir = os.path.dirname(os.path.abspath(__file__))
_good_tflite_model = os.path.join(_dir, '../ai/mobilenet_ssd_v2_coco_quant_postprocess.tflite')
_good_edgetpu_model = os.path.join(_dir, '../ai/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
_good_labels = os.path.join(_dir, '../ai/coco_labels.txt')
config = {'model': {'tflite': _good_tflite_model, 'edgetpu': _good_edgetpu_model}, 'labels': _good_labels, 'top_k': 3, 'confidence_threshold': 0.8}
object_config = config
</DeepExtract>
detection_received = threading.Event()
sample_image = None
detections = None
def sample_callback(image=None, inference_result=None, **kwargs):
nonlocal sample_image
nonlocal detection_received
sample_image = image
nonlocal detections
detections = inference_result
print(f'detections: {detections}')
print(f'len(detections): {len(detections)}')
if detections:
label = detections[0]['label']
confidence = detections[0]['confidence']
if label == 'person' and confidence > 0.9:
detection_received.set()
object_detector = ObjectDetector(**object_config)
avsource.connect_to_next_element(object_detector)
output = _OutPipeElement(sample_callback=sample_callback)
object_detector.connect_to_next_element(output)
t = threading.Thread(name='Test AVSourceElement', target=avsource.start, daemon=True)
t.start()
detection_received.wait(timeout=10)
assert sample_image
assert sample_image.size[0] == 1280
assert sample_image.size[1] == 720
assert detections
assert len(detections) == 1
label = detections[0]['label']
confidence = detections[0]['confidence']
(x0, y0) = (detections[0]['box']['xmin'], detections[0]['box']['ymin'])
(x1, y1) = (detections[0]['box']['xmax'], detections[0]['box']['ymax'])
assert label == 'person'
assert confidence > 0.9
assert x0 > 0 and x0 < x1
assert y0 > 0 and y0 < y1
avsource.stop()
t.join(timeout=10)
assert not t.is_alive()
|
def test_picamera_input():
picam.picamera_override = picamera_override
avsource = AVSourceElement(uri='picamera', type='video')
_dir = os.path.dirname(os.path.abspath(__file__))
_good_tflite_model = os.path.join(_dir, '../ai/mobilenet_ssd_v2_coco_quant_postprocess.tflite')
_good_edgetpu_model = os.path.join(_dir, '../ai/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite')
_good_labels = os.path.join(_dir, '../ai/coco_labels.txt')
config = {'model': {'tflite': _good_tflite_model, 'edgetpu': _good_edgetpu_model}, 'labels': _good_labels, 'top_k': 3, 'confidence_threshold': 0.8}
object_config = config
detection_received = threading.Event()
sample_image = None
detections = None
def sample_callback(image=None, inference_result=None, **kwargs):
nonlocal sample_image
nonlocal detection_received
sample_image = image
nonlocal detections
detections = inference_result
print(f'detections: {detections}')
print(f'len(detections): {len(detections)}')
if detections:
label = detections[0]['label']
confidence = detections[0]['confidence']
if label == 'person' and confidence > 0.9:
detection_received.set()
object_detector = ObjectDetector(**object_config)
avsource.connect_to_next_element(object_detector)
output = _OutPipeElement(sample_callback=sample_callback)
object_detector.connect_to_next_element(output)
t = threading.Thread(name='Test AVSourceElement', target=avsource.start, daemon=True)
t.start()
detection_received.wait(timeout=10)
assert sample_image
assert sample_image.size[0] == 1280
assert sample_image.size[1] == 720
assert detections
assert len(detections) == 1
label = detections[0]['label']
confidence = detections[0]['confidence']
(x0, y0) = (detections[0]['box']['xmin'], detections[0]['box']['ymin'])
(x1, y1) = (detections[0]['box']['xmax'], detections[0]['box']['ymax'])
assert label == 'person'
assert confidence > 0.9
assert x0 > 0 and x0 < x1
assert y0 > 0 and y0 < y1
avsource.stop()
t.join(timeout=10)
assert not t.is_alive()
|
ambianic-edge
|
positive
|
def strategy(context, data):
<DeepExtract>
try:
pipeline_results = pipeline_output('strategy_pipeline')
except NoFurtherDataError:
context.long_securities = []
context.short_securities = []
return
p = context.params['percentile']
momentum = pipeline_results
long_candidates = momentum[momentum > 0].dropna().sort_values('momentum')
short_candidates = momentum[momentum < 0].dropna().sort_values('momentum')
n_long = len(long_candidates)
n_short = len(short_candidates)
n = int(min(n_long, n_short) * p)
if n == 0:
print('{}, no signals'.format(get_datetime()))
context.long_securities = []
context.short_securities = []
context.long_securities = long_candidates.index[-n:]
context.short_securities = short_candidates.index[:n]
</DeepExtract>
<DeepExtract>
n = len(context.long_securities)
if n < 1:
return
weight = 0.5 / n
for security in context.portfolio.positions:
if security not in context.long_securities and security not in context.short_securities:
order_target_percent(security, 0)
for security in context.long_securities:
order_target_percent(security, weight)
for security in context.short_securities:
order_target_percent(security, -weight)
</DeepExtract>
|
def strategy(context, data):
try:
pipeline_results = pipeline_output('strategy_pipeline')
except NoFurtherDataError:
context.long_securities = []
context.short_securities = []
return
p = context.params['percentile']
momentum = pipeline_results
long_candidates = momentum[momentum > 0].dropna().sort_values('momentum')
short_candidates = momentum[momentum < 0].dropna().sort_values('momentum')
n_long = len(long_candidates)
n_short = len(short_candidates)
n = int(min(n_long, n_short) * p)
if n == 0:
print('{}, no signals'.format(get_datetime()))
context.long_securities = []
context.short_securities = []
context.long_securities = long_candidates.index[-n:]
context.short_securities = short_candidates.index[:n]
n = len(context.long_securities)
if n < 1:
return
weight = 0.5 / n
for security in context.portfolio.positions:
if security not in context.long_securities and security not in context.short_securities:
order_target_percent(security, 0)
for security in context.long_securities:
order_target_percent(security, weight)
for security in context.short_securities:
order_target_percent(security, -weight)
</DeepExtract>
|
blueshift-demo-strategies
|
positive
|
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ['TZ']
except KeyError:
pass
if name is None or name == ':':
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(':'):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
<DeepExtract>
tz = None
if not name:
try:
name = os.environ['TZ']
except KeyError:
pass
if name is None or name == ':':
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(':'):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
if c in '0123456789':
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ('GMT', 'UTC'):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
tz = tz
</DeepExtract>
if not tz:
for c in name:
if c in '0123456789':
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ('GMT', 'UTC'):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
|
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ['TZ']
except KeyError:
pass
if name is None or name == ':':
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(':'):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
tz = None
if not name:
try:
name = os.environ['TZ']
except KeyError:
pass
if name is None or name == ':':
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(':'):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin is not None:
try:
tz = tzwin(name)
except WindowsError:
tz = None
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
if c in '0123456789':
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ('GMT', 'UTC'):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
tz = tz
if not tz:
for c in name:
if c in '0123456789':
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ('GMT', 'UTC'):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
|
Android-Free-Forensic-Toolkit
|
positive
|
def clean_up_previous_stunnel_pids(state_file_dir=STATE_FILE_DIR):
"""
Cleans up stunnel pids created by mount watchdog spawned by a previous efs-csi-driver pod after driver restart, upgrade
or crash. This method attempts to clean PIDs from persisted state files after efs-csi-driver restart to
ensure watchdog creates a new stunnel.
"""
<DeepExtract>
state_files = {}
if os.path.isdir(state_file_dir):
for sf in os.listdir(state_file_dir):
if not sf.startswith('fs-') or os.path.isdir(os.path.join(state_file_dir, sf)):
continue
first_period = sf.find('.')
mount_point_and_port = sf[first_period + 1:]
logging.debug('Translating "%s" into mount point and port "%s"', sf, mount_point_and_port)
state_files[mount_point_and_port] = sf
state_files = state_files
</DeepExtract>
logging.debug('Persisted state files in "%s": %s', state_file_dir, list(state_files.values()))
for state_file in state_files.values():
state_file_path = os.path.join(state_file_dir, state_file)
with open(state_file_path) as f:
try:
state = json.load(f)
except ValueError:
logging.exception('Unable to parse json in %s', state_file_path)
continue
try:
pid = state['pid']
except KeyError:
logging.debug('No PID found in state file %s', state_file)
continue
<DeepExtract>
if not check_if_running_on_macos():
cmd = ['cat', '/proc/{pid}/cmdline'.format(pid=pid)]
else:
cmd = ['ps', '-p', str(pid), '-o', 'command=']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
out = p.communicate()[0]
</DeepExtract>
if out and 'stunnel' in str(out):
logging.debug('PID %s in state file %s is active. Skipping clean up', pid, state_file)
continue
state.pop('pid')
logging.debug('Cleaning up pid %s in state file %s', pid, state_file)
<DeepExtract>
tmp_state_file = os.path.join(state_file_dir, '~%s' % state_file)
logging.debug('Rewriting state file: writing ' + str(len(json.dumps(state))) + ' characters into the state file ' + str(tmp_state_file))
with open(tmp_state_file, 'w') as f:
json.dump(state, f)
os.rename(tmp_state_file, os.path.join(state_file_dir, state_file))
</DeepExtract>
|
def clean_up_previous_stunnel_pids(state_file_dir=STATE_FILE_DIR):
"""
Cleans up stunnel pids created by mount watchdog spawned by a previous efs-csi-driver pod after driver restart, upgrade
or crash. This method attempts to clean PIDs from persisted state files after efs-csi-driver restart to
ensure watchdog creates a new stunnel.
"""
state_files = {}
if os.path.isdir(state_file_dir):
for sf in os.listdir(state_file_dir):
if not sf.startswith('fs-') or os.path.isdir(os.path.join(state_file_dir, sf)):
continue
first_period = sf.find('.')
mount_point_and_port = sf[first_period + 1:]
logging.debug('Translating "%s" into mount point and port "%s"', sf, mount_point_and_port)
state_files[mount_point_and_port] = sf
state_files = state_files
logging.debug('Persisted state files in "%s": %s', state_file_dir, list(state_files.values()))
for state_file in state_files.values():
state_file_path = os.path.join(state_file_dir, state_file)
with open(state_file_path) as f:
try:
state = json.load(f)
except ValueError:
logging.exception('Unable to parse json in %s', state_file_path)
continue
try:
pid = state['pid']
except KeyError:
logging.debug('No PID found in state file %s', state_file)
continue
if not check_if_running_on_macos():
cmd = ['cat', '/proc/{pid}/cmdline'.format(pid=pid)]
else:
cmd = ['ps', '-p', str(pid), '-o', 'command=']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
out = p.communicate()[0]
if out and 'stunnel' in str(out):
logging.debug('PID %s in state file %s is active. Skipping clean up', pid, state_file)
continue
state.pop('pid')
logging.debug('Cleaning up pid %s in state file %s', pid, state_file)
tmp_state_file = os.path.join(state_file_dir, '~%s' % state_file)
logging.debug('Rewriting state file: writing ' + str(len(json.dumps(state))) + ' characters into the state file ' + str(tmp_state_file))
with open(tmp_state_file, 'w') as f:
json.dump(state, f)
os.rename(tmp_state_file, os.path.join(state_file_dir, state_file))
</DeepExtract>
|
efs-utils
|
positive
|
def CSTLoss(A, P, N, aperture, margin, sv_requires_grad=False):
"""
Conceptor Similarity Triplet Loss
:param A: Anchor sample
:param P: Positive sample
:param N: Negative sample
:param aperture: Aperture
:param margin: Margin
"""
<DeepExtract>
x_length = A.size(0)
x_dim = A.size(1)
Rx = torch.mm(A.t(), A) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RA, CA, URA, SRA, UCA, SCA) = (Rx, Cx, URx, SRx, UCx, SCx)
</DeepExtract>
<DeepExtract>
x_length = P.size(0)
x_dim = P.size(1)
Rx = torch.mm(P.t(), P) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RP, CP, URP, SRP, UCP, SCP) = (Rx, Cx, URx, SRx, UCx, SCx)
</DeepExtract>
<DeepExtract>
x_length = N.size(0)
x_dim = N.size(1)
Rx = torch.mm(N.t(), N) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RN, CN, URN, SRN, UCN, SCN) = (Rx, Cx, URx, SRx, UCx, SCx)
</DeepExtract>
if not sv_requires_grad:
SCA = SCA.detach()
SCP = SCP.detach()
SCN = SCN.detach()
return gcsim(UCA, SCA, UCN, SCN) - gcsim(UCA, SCA, UCP, SCP) + margin
|
def CSTLoss(A, P, N, aperture, margin, sv_requires_grad=False):
"""
Conceptor Similarity Triplet Loss
:param A: Anchor sample
:param P: Positive sample
:param N: Negative sample
:param aperture: Aperture
:param margin: Margin
"""
x_length = A.size(0)
x_dim = A.size(1)
Rx = torch.mm(A.t(), A) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RA, CA, URA, SRA, UCA, SCA) = (Rx, Cx, URx, SRx, UCx, SCx)
x_length = P.size(0)
x_dim = P.size(1)
Rx = torch.mm(P.t(), P) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RP, CP, URP, SRP, UCP, SCP) = (Rx, Cx, URx, SRx, UCx, SCx)
x_length = N.size(0)
x_dim = N.size(1)
Rx = torch.mm(N.t(), N) / x_length
Cx = torch.mm(torch.inverse(Rx + math.pow(aperture, -2) * torch.eye(x_dim)), Rx)
(URx, SRx, _) = torch.svd(Rx)
(UCx, SCx, _) = torch.svd(Cx)
(RN, CN, URN, SRN, UCN, SCN) = (Rx, Cx, URx, SRx, UCx, SCx)
if not sv_requires_grad:
SCA = SCA.detach()
SCP = SCP.detach()
SCN = SCN.detach()
return gcsim(UCA, SCA, UCN, SCN) - gcsim(UCA, SCA, UCP, SCP) + margin
|
EchoTorch
|
positive
|
def test_quotient_dihedral_even(self):
N = 4
dg = dihedral_group(N)
for n in range(1, int(round(np.sqrt(N))) + 1):
if N % n == 0:
for f in range(N // n):
sg_id = (f, n)
(sg, _, _) = dg.subgroup(sg_id)
<DeepExtract>
(subgroup, parent, child) = dg.subgroup(sg_id)
assert sg.trivial_representation.group == subgroup
induced_repr = dg.induced_representation(sg_id, sg.trivial_representation)
assert induced_repr.group == dg
self.assertTrue(np.allclose(induced_repr.change_of_basis.T @ induced_repr.change_of_basis, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis @ induced_repr.change_of_basis.T, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis, induced_repr.change_of_basis_inv.T), 'Change of Basis not orthonormal')
restricted_repr = dg.restrict_representation(sg_id, induced_repr)
for e in subgroup.testing_elements():
repr_a = sg.trivial_representation(e)
repr_b = induced_repr(parent(e))[:sg.trivial_representation.size, :sg.trivial_representation.size]
repr_c = restricted_repr(e)[:sg.trivial_representation.size, :sg.trivial_representation.size]
np.set_printoptions(precision=2, threshold=2 * repr_a.size ** 2, suppress=True, linewidth=10 * repr_a.size + 3)
self.assertTrue(np.allclose(repr_a, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_a}\ndifferent from\n {repr_b}\n')
if not np.allclose(repr_c, repr_b):
print(e, parent(e))
print(induced_repr.change_of_basis_inv @ induced_repr(parent(e)) @ induced_repr.change_of_basis)
print(restricted_repr.change_of_basis_inv @ restricted_repr(e) @ restricted_repr.change_of_basis)
print(induced_repr.irreps)
print(restricted_repr.irreps)
print(np.allclose(induced_repr.change_of_basis, restricted_repr.change_of_basis))
self.assertTrue(np.allclose(repr_c, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_c}\ndifferent from\n {repr_b}\n')
quotient_size = int(dg.order() / subgroup.order())
size = sg.trivial_representation.size * quotient_size
cosets = {}
representatives = defaultdict(lambda : [])
for e in dg.elements:
if e not in cosets:
representatives[e] = []
for g in subgroup.elements:
eg = dg.combine(e, parent(g))
cosets[eg] = e
representatives[e].append(eg)
index = {e: i for (i, e) in enumerate(representatives)}
P = directsum([dg.irreps[irr] for irr in induced_repr.irreps], name='irreps')
for g in dg.testing_elements():
repr_g = np.zeros((size, size), dtype=np.float)
for r in representatives:
gr = dg.combine(g, r)
g_r = cosets[gr]
i = index[r]
j = index[g_r]
hp = dg.combine(dg.inverse(g_r), gr)
h = child(hp)
assert h is not None, (g, r, gr, g_r, dg.inverse(g_r), hp)
repr_g[j * sg.trivial_representation.size:(j + 1) * sg.trivial_representation.size, i * sg.trivial_representation.size:(i + 1) * sg.trivial_representation.size] = sg.trivial_representation(h)
ind_g = induced_repr(g)
self.assertTrue(np.allclose(repr_g, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{repr_g}\ndifferent from\n {ind_g}\n')
ind_g2 = induced_repr.change_of_basis @ P(g) @ induced_repr.change_of_basis_inv
self.assertTrue(np.allclose(ind_g2, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{ind_g2}\ndifferent from\n {ind_g}\n')
</DeepExtract>
sg_id = (None, n)
(sg, _, _) = dg.subgroup(sg_id)
<DeepExtract>
(subgroup, parent, child) = dg.subgroup(sg_id)
assert sg.trivial_representation.group == subgroup
induced_repr = dg.induced_representation(sg_id, sg.trivial_representation)
assert induced_repr.group == dg
self.assertTrue(np.allclose(induced_repr.change_of_basis.T @ induced_repr.change_of_basis, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis @ induced_repr.change_of_basis.T, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis, induced_repr.change_of_basis_inv.T), 'Change of Basis not orthonormal')
restricted_repr = dg.restrict_representation(sg_id, induced_repr)
for e in subgroup.testing_elements():
repr_a = sg.trivial_representation(e)
repr_b = induced_repr(parent(e))[:sg.trivial_representation.size, :sg.trivial_representation.size]
repr_c = restricted_repr(e)[:sg.trivial_representation.size, :sg.trivial_representation.size]
np.set_printoptions(precision=2, threshold=2 * repr_a.size ** 2, suppress=True, linewidth=10 * repr_a.size + 3)
self.assertTrue(np.allclose(repr_a, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_a}\ndifferent from\n {repr_b}\n')
if not np.allclose(repr_c, repr_b):
print(e, parent(e))
print(induced_repr.change_of_basis_inv @ induced_repr(parent(e)) @ induced_repr.change_of_basis)
print(restricted_repr.change_of_basis_inv @ restricted_repr(e) @ restricted_repr.change_of_basis)
print(induced_repr.irreps)
print(restricted_repr.irreps)
print(np.allclose(induced_repr.change_of_basis, restricted_repr.change_of_basis))
self.assertTrue(np.allclose(repr_c, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_c}\ndifferent from\n {repr_b}\n')
quotient_size = int(dg.order() / subgroup.order())
size = sg.trivial_representation.size * quotient_size
cosets = {}
representatives = defaultdict(lambda : [])
for e in dg.elements:
if e not in cosets:
representatives[e] = []
for g in subgroup.elements:
eg = dg.combine(e, parent(g))
cosets[eg] = e
representatives[e].append(eg)
index = {e: i for (i, e) in enumerate(representatives)}
P = directsum([dg.irreps[irr] for irr in induced_repr.irreps], name='irreps')
for g in dg.testing_elements():
repr_g = np.zeros((size, size), dtype=np.float)
for r in representatives:
gr = dg.combine(g, r)
g_r = cosets[gr]
i = index[r]
j = index[g_r]
hp = dg.combine(dg.inverse(g_r), gr)
h = child(hp)
assert h is not None, (g, r, gr, g_r, dg.inverse(g_r), hp)
repr_g[j * sg.trivial_representation.size:(j + 1) * sg.trivial_representation.size, i * sg.trivial_representation.size:(i + 1) * sg.trivial_representation.size] = sg.trivial_representation(h)
ind_g = induced_repr(g)
self.assertTrue(np.allclose(repr_g, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{repr_g}\ndifferent from\n {ind_g}\n')
ind_g2 = induced_repr.change_of_basis @ P(g) @ induced_repr.change_of_basis_inv
self.assertTrue(np.allclose(ind_g2, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{ind_g2}\ndifferent from\n {ind_g}\n')
</DeepExtract>
|
def test_quotient_dihedral_even(self):
N = 4
dg = dihedral_group(N)
for n in range(1, int(round(np.sqrt(N))) + 1):
if N % n == 0:
for f in range(N // n):
sg_id = (f, n)
(sg, _, _) = dg.subgroup(sg_id)
(subgroup, parent, child) = dg.subgroup(sg_id)
assert sg.trivial_representation.group == subgroup
induced_repr = dg.induced_representation(sg_id, sg.trivial_representation)
assert induced_repr.group == dg
self.assertTrue(np.allclose(induced_repr.change_of_basis.T @ induced_repr.change_of_basis, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis @ induced_repr.change_of_basis.T, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis, induced_repr.change_of_basis_inv.T), 'Change of Basis not orthonormal')
restricted_repr = dg.restrict_representation(sg_id, induced_repr)
for e in subgroup.testing_elements():
repr_a = sg.trivial_representation(e)
repr_b = induced_repr(parent(e))[:sg.trivial_representation.size, :sg.trivial_representation.size]
repr_c = restricted_repr(e)[:sg.trivial_representation.size, :sg.trivial_representation.size]
np.set_printoptions(precision=2, threshold=2 * repr_a.size ** 2, suppress=True, linewidth=10 * repr_a.size + 3)
self.assertTrue(np.allclose(repr_a, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_a}\ndifferent from\n {repr_b}\n')
if not np.allclose(repr_c, repr_b):
print(e, parent(e))
print(induced_repr.change_of_basis_inv @ induced_repr(parent(e)) @ induced_repr.change_of_basis)
print(restricted_repr.change_of_basis_inv @ restricted_repr(e) @ restricted_repr.change_of_basis)
print(induced_repr.irreps)
print(restricted_repr.irreps)
print(np.allclose(induced_repr.change_of_basis, restricted_repr.change_of_basis))
self.assertTrue(np.allclose(repr_c, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_c}\ndifferent from\n {repr_b}\n')
quotient_size = int(dg.order() / subgroup.order())
size = sg.trivial_representation.size * quotient_size
cosets = {}
representatives = defaultdict(lambda : [])
for e in dg.elements:
if e not in cosets:
representatives[e] = []
for g in subgroup.elements:
eg = dg.combine(e, parent(g))
cosets[eg] = e
representatives[e].append(eg)
index = {e: i for (i, e) in enumerate(representatives)}
P = directsum([dg.irreps[irr] for irr in induced_repr.irreps], name='irreps')
for g in dg.testing_elements():
repr_g = np.zeros((size, size), dtype=np.float)
for r in representatives:
gr = dg.combine(g, r)
g_r = cosets[gr]
i = index[r]
j = index[g_r]
hp = dg.combine(dg.inverse(g_r), gr)
h = child(hp)
assert h is not None, (g, r, gr, g_r, dg.inverse(g_r), hp)
repr_g[j * sg.trivial_representation.size:(j + 1) * sg.trivial_representation.size, i * sg.trivial_representation.size:(i + 1) * sg.trivial_representation.size] = sg.trivial_representation(h)
ind_g = induced_repr(g)
self.assertTrue(np.allclose(repr_g, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{repr_g}\ndifferent from\n {ind_g}\n')
ind_g2 = induced_repr.change_of_basis @ P(g) @ induced_repr.change_of_basis_inv
self.assertTrue(np.allclose(ind_g2, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{ind_g2}\ndifferent from\n {ind_g}\n')
sg_id = (None, n)
(sg, _, _) = dg.subgroup(sg_id)
(subgroup, parent, child) = dg.subgroup(sg_id)
assert sg.trivial_representation.group == subgroup
induced_repr = dg.induced_representation(sg_id, sg.trivial_representation)
assert induced_repr.group == dg
self.assertTrue(np.allclose(induced_repr.change_of_basis.T @ induced_repr.change_of_basis, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis @ induced_repr.change_of_basis.T, np.eye(induced_repr.size)), 'Change of Basis not orthonormal')
self.assertTrue(np.allclose(induced_repr.change_of_basis, induced_repr.change_of_basis_inv.T), 'Change of Basis not orthonormal')
restricted_repr = dg.restrict_representation(sg_id, induced_repr)
for e in subgroup.testing_elements():
repr_a = sg.trivial_representation(e)
repr_b = induced_repr(parent(e))[:sg.trivial_representation.size, :sg.trivial_representation.size]
repr_c = restricted_repr(e)[:sg.trivial_representation.size, :sg.trivial_representation.size]
np.set_printoptions(precision=2, threshold=2 * repr_a.size ** 2, suppress=True, linewidth=10 * repr_a.size + 3)
self.assertTrue(np.allclose(repr_a, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_a}\ndifferent from\n {repr_b}\n')
if not np.allclose(repr_c, repr_b):
print(e, parent(e))
print(induced_repr.change_of_basis_inv @ induced_repr(parent(e)) @ induced_repr.change_of_basis)
print(restricted_repr.change_of_basis_inv @ restricted_repr(e) @ restricted_repr.change_of_basis)
print(induced_repr.irreps)
print(restricted_repr.irreps)
print(np.allclose(induced_repr.change_of_basis, restricted_repr.change_of_basis))
self.assertTrue(np.allclose(repr_c, repr_b), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {e}:\n{repr_c}\ndifferent from\n {repr_b}\n')
quotient_size = int(dg.order() / subgroup.order())
size = sg.trivial_representation.size * quotient_size
cosets = {}
representatives = defaultdict(lambda : [])
for e in dg.elements:
if e not in cosets:
representatives[e] = []
for g in subgroup.elements:
eg = dg.combine(e, parent(g))
cosets[eg] = e
representatives[e].append(eg)
index = {e: i for (i, e) in enumerate(representatives)}
P = directsum([dg.irreps[irr] for irr in induced_repr.irreps], name='irreps')
for g in dg.testing_elements():
repr_g = np.zeros((size, size), dtype=np.float)
for r in representatives:
gr = dg.combine(g, r)
g_r = cosets[gr]
i = index[r]
j = index[g_r]
hp = dg.combine(dg.inverse(g_r), gr)
h = child(hp)
assert h is not None, (g, r, gr, g_r, dg.inverse(g_r), hp)
repr_g[j * sg.trivial_representation.size:(j + 1) * sg.trivial_representation.size, i * sg.trivial_representation.size:(i + 1) * sg.trivial_representation.size] = sg.trivial_representation(h)
ind_g = induced_repr(g)
self.assertTrue(np.allclose(repr_g, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{repr_g}\ndifferent from\n {ind_g}\n')
ind_g2 = induced_repr.change_of_basis @ P(g) @ induced_repr.change_of_basis_inv
self.assertTrue(np.allclose(ind_g2, ind_g), msg=f'{dg.name}\\{subgroup.name}: {sg.trivial_representation.name} - {g}:\n{ind_g2}\ndifferent from\n {ind_g}\n')
</DeepExtract>
|
e2cnn
|
positive
|
def load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs):
"""Loads a network checkpoint file.
Can be called in two different ways:
load_checkpoint(network_dir):
Loads the checkpoint file given by the path. I checkpoint_dir is a directory,
it tries to find the latest checkpoint in that directory.
load_checkpoint(network_dir, checkpoint=epoch_num):
Loads the network at the given epoch number (int).
The extra keyword arguments are supplied to the network constructor to replace saved ones.
"""
if network_dir is not None:
net_path = Path(network_dir)
else:
net_path = None
if net_path.is_file():
checkpoint = str(net_path)
if checkpoint is None:
checkpoint_list = sorted(net_path.glob('*.pth.tar'))
if checkpoint_list:
checkpoint_path = checkpoint_list[-1]
else:
raise Exception('No matching checkpoint file found')
elif isinstance(checkpoint, int):
checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint)))
if not checkpoint_list or len(checkpoint_list) == 0:
raise Exception('No matching checkpoint file found')
if len(checkpoint_list) > 1:
raise Exception('Multiple matching checkpoint files found')
else:
checkpoint_path = checkpoint_list[0]
elif isinstance(checkpoint, str):
checkpoint_path = os.path.expanduser(checkpoint)
else:
raise TypeError
<DeepExtract>
_setup_legacy_env()
checkpoint_dict = torch.load(checkpoint_path)
_cleanup_legacy_env()
checkpoint_dict = checkpoint_dict
</DeepExtract>
if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:
net_constr = checkpoint_dict['constructor']
if constructor_fun_name is not None:
net_constr.fun_name = constructor_fun_name
if constructor_module is not None:
net_constr.fun_module = constructor_module
for (arg, val) in kwargs.items():
if arg in net_constr.kwds.keys():
net_constr.kwds[arg] = val
else:
print('WARNING: Keyword argument "{}" not found when loading network.'.format(arg))
if net_constr.fun_module.startswith('dlframework.'):
net_constr.fun_module = net_constr.fun_module[len('dlframework.'):]
net = net_constr.get()
else:
raise RuntimeError('No constructor for the given network.')
net.load_state_dict(checkpoint_dict['net'])
net.constructor = checkpoint_dict['constructor']
if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:
net.info = checkpoint_dict['net_info']
return (net, checkpoint_dict)
|
def load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs):
"""Loads a network checkpoint file.
Can be called in two different ways:
load_checkpoint(network_dir):
Loads the checkpoint file given by the path. I checkpoint_dir is a directory,
it tries to find the latest checkpoint in that directory.
load_checkpoint(network_dir, checkpoint=epoch_num):
Loads the network at the given epoch number (int).
The extra keyword arguments are supplied to the network constructor to replace saved ones.
"""
if network_dir is not None:
net_path = Path(network_dir)
else:
net_path = None
if net_path.is_file():
checkpoint = str(net_path)
if checkpoint is None:
checkpoint_list = sorted(net_path.glob('*.pth.tar'))
if checkpoint_list:
checkpoint_path = checkpoint_list[-1]
else:
raise Exception('No matching checkpoint file found')
elif isinstance(checkpoint, int):
checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint)))
if not checkpoint_list or len(checkpoint_list) == 0:
raise Exception('No matching checkpoint file found')
if len(checkpoint_list) > 1:
raise Exception('Multiple matching checkpoint files found')
else:
checkpoint_path = checkpoint_list[0]
elif isinstance(checkpoint, str):
checkpoint_path = os.path.expanduser(checkpoint)
else:
raise TypeError
_setup_legacy_env()
checkpoint_dict = torch.load(checkpoint_path)
_cleanup_legacy_env()
checkpoint_dict = checkpoint_dict
if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:
net_constr = checkpoint_dict['constructor']
if constructor_fun_name is not None:
net_constr.fun_name = constructor_fun_name
if constructor_module is not None:
net_constr.fun_module = constructor_module
for (arg, val) in kwargs.items():
if arg in net_constr.kwds.keys():
net_constr.kwds[arg] = val
else:
print('WARNING: Keyword argument "{}" not found when loading network.'.format(arg))
if net_constr.fun_module.startswith('dlframework.'):
net_constr.fun_module = net_constr.fun_module[len('dlframework.'):]
net = net_constr.get()
else:
raise RuntimeError('No constructor for the given network.')
net.load_state_dict(checkpoint_dict['net'])
net.constructor = checkpoint_dict['constructor']
if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:
net.info = checkpoint_dict['net_info']
return (net, checkpoint_dict)
|
d3s
|
positive
|
@position.setter
def position(self, new_position_ns: int):
new_position_ns = max(0, new_position_ns)
<DeepExtract>
success = False
counter = 0
while not success and counter < 10:
(success, value) = self._player.query_duration(Gst.Format.TIME)
if success:
duration = value
else:
counter += 1
time.sleep(0.01)
duration = None
</DeepExtract>
if duration:
new_position_ns = min(new_position_ns, duration)
<DeepExtract>
counter = 0
seeked = False
while not seeked and counter < 500:
seeked = self._player.seek(self._playback_speed, Gst.Format.TIME, Gst.SeekFlags.FLUSH, Gst.SeekType.SET, new_position_ns, Gst.SeekType.NONE, 0)
if not seeked:
counter += 1
time.sleep(0.01)
if not seeked:
log.info('Failed to seek, counter expired.')
reporter.warning('gst_player', 'Failed to seek, counter expired.')
</DeepExtract>
|
@position.setter
def position(self, new_position_ns: int):
new_position_ns = max(0, new_position_ns)
success = False
counter = 0
while not success and counter < 10:
(success, value) = self._player.query_duration(Gst.Format.TIME)
if success:
duration = value
else:
counter += 1
time.sleep(0.01)
duration = None
if duration:
new_position_ns = min(new_position_ns, duration)
counter = 0
seeked = False
while not seeked and counter < 500:
seeked = self._player.seek(self._playback_speed, Gst.Format.TIME, Gst.SeekFlags.FLUSH, Gst.SeekType.SET, new_position_ns, Gst.SeekType.NONE, 0)
if not seeked:
counter += 1
time.sleep(0.01)
if not seeked:
log.info('Failed to seek, counter expired.')
reporter.warning('gst_player', 'Failed to seek, counter expired.')
</DeepExtract>
|
cozy
|
positive
|
def _get_DQN_prediction(self, state):
shape = state.shape.as_list()
net = tf.reshape(state, [-1, shape[-1]])
net = tf.concat([conv_block(net[:, :180], 32, 180, [[128, 3, 'identity'], [128, 3, 'identity'], [128, 3, 'downsampling'], [128, 3, 'identity'], [128, 3, 'identity'], [256, 3, 'downsampling'], [256, 3, 'identity'], [256, 3, 'identity']], 'handcards'), net[:, 180:]], -1)
units = [512, 512, 256, 256, 128, 128]
for (i, unit) in enumerate(units):
with tf.variable_scope('block%i' % i):
<DeepExtract>
residual = net
for i in range(stack):
residual = FullyConnected('fc%d' % i, residual, unit, activation=tf.nn.relu)
x = net
if net.shape[1].value != unit:
x = FullyConnected('fc', x, unit, activation=tf.nn.relu)
net = tf.contrib.layers.layer_norm(residual + x, scale=False)
</DeepExtract>
l = net
if self.method != 'Dueling':
Q = FullyConnected('fct', l, len(action_space))
else:
V = FullyConnected('fctV', l, 1)
As = FullyConnected('fctA', l, len(action_space))
Q = tf.add(As, V - tf.reduce_mean(As, 1, keep_dims=True))
return tf.identity(Q, name='Qvalue')
|
def _get_DQN_prediction(self, state):
shape = state.shape.as_list()
net = tf.reshape(state, [-1, shape[-1]])
net = tf.concat([conv_block(net[:, :180], 32, 180, [[128, 3, 'identity'], [128, 3, 'identity'], [128, 3, 'downsampling'], [128, 3, 'identity'], [128, 3, 'identity'], [256, 3, 'downsampling'], [256, 3, 'identity'], [256, 3, 'identity']], 'handcards'), net[:, 180:]], -1)
units = [512, 512, 256, 256, 128, 128]
for (i, unit) in enumerate(units):
with tf.variable_scope('block%i' % i):
residual = net
for i in range(stack):
residual = FullyConnected('fc%d' % i, residual, unit, activation=tf.nn.relu)
x = net
if net.shape[1].value != unit:
x = FullyConnected('fc', x, unit, activation=tf.nn.relu)
net = tf.contrib.layers.layer_norm(residual + x, scale=False)
l = net
if self.method != 'Dueling':
Q = FullyConnected('fct', l, len(action_space))
else:
V = FullyConnected('fctV', l, 1)
As = FullyConnected('fctA', l, len(action_space))
Q = tf.add(As, V - tf.reduce_mean(As, 1, keep_dims=True))
return tf.identity(Q, name='Qvalue')
|
doudizhu-C
|
positive
|
def getYesorNoResponse(handler_input, textType):
table = boto3.resource('dynamodb').Table('AdvgStoryDetails')
speak_output = GAME_END
try:
question_record = table.query(KeyConditionExpression=Key('CountryId').eq(get_country_id(handler_input.attributes_manager.session_attributes['country'])) & Key('QuestionNumber').eq(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] + 1))
if question_record['Count'] == 1:
speak_output = question_record['Items'][0][textType]
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">' + speak_output + ' </voice>'
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] += 1
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] += 1
if textType == 'YesResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['YesWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['YesEnergyImpact']
elif textType == 'NoResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['NoWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['NoEnergyImpact']
current_wealth = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel']
current_energy = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel']
if is_game_over(handler_input.attributes_manager.session_attributes['stats_record']):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Oh no adventurer, you don\'t have enough wealth or energy to continue on your adventure! This means your adventure is over. </voice> '
<DeepExtract>
if is_user_on_session(handler_input) and has_active_adventure(handler_input):
table = boto3.resource('dynamodb').Table('AdvgGameStats')
table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set ActiveFlag=:n', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':n': 'N', ':a': 'Y'})
</DeepExtract>
elif is_warning_needed(current_wealth, current_energy):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Be careful adventurer, you are running low on wealth or energy. If you need a travel tip, say speak to the guide.</voice> '
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
logger.error("That question number doesn't exist: {}".format(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber']))
except:
logger.error('An error in getYesorNoResponse for text type {} -- {}'.format(textType, handler_input))
speak_output = "Sorry, adventurer! I don't understand what you want to do. {}".format(VISIT_COUNTRY_REPROMPT)
return speak_output
|
def getYesorNoResponse(handler_input, textType):
table = boto3.resource('dynamodb').Table('AdvgStoryDetails')
speak_output = GAME_END
try:
question_record = table.query(KeyConditionExpression=Key('CountryId').eq(get_country_id(handler_input.attributes_manager.session_attributes['country'])) & Key('QuestionNumber').eq(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] + 1))
if question_record['Count'] == 1:
speak_output = question_record['Items'][0][textType]
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">' + speak_output + ' </voice>'
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] += 1
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] += 1
if textType == 'YesResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['YesWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['YesEnergyImpact']
elif textType == 'NoResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['NoWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['NoEnergyImpact']
current_wealth = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel']
current_energy = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel']
if is_game_over(handler_input.attributes_manager.session_attributes['stats_record']):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Oh no adventurer, you don\'t have enough wealth or energy to continue on your adventure! This means your adventure is over. </voice> '
if is_user_on_session(handler_input) and has_active_adventure(handler_input):
table = boto3.resource('dynamodb').Table('AdvgGameStats')
table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set ActiveFlag=:n', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':n': 'N', ':a': 'Y'})
elif is_warning_needed(current_wealth, current_energy):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Be careful adventurer, you are running low on wealth or energy. If you need a travel tip, say speak to the guide.</voice> '
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
logger.error("That question number doesn't exist: {}".format(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber']))
except:
logger.error('An error in getYesorNoResponse for text type {} -- {}'.format(textType, handler_input))
speak_output = "Sorry, adventurer! I don't understand what you want to do. {}".format(VISIT_COUNTRY_REPROMPT)
return speak_output
|
Course_Alexa_Skill_Builder
|
positive
|
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file))
return None
if resolved_archive_file == archive_file:
logger.info('loading archive file {}'.format(archive_file))
else:
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info('Model config {}'.format(config))
model = cls(config, *inputs, **kwargs)
if state_dict is None and (not from_tf):
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
shutil.rmtree(tempdir)
if from_tf:
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if child is not None:
<DeepExtract>
local_metadata = {} if metadata is None else metadata.get(prefix + name + '.'[:-1], {})
child._load_from_state_dict(state_dict, prefix + name + '.', local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in child._modules.items():
if child is not None:
load(child, prefix + name + '.' + name + '.')
</DeepExtract>
start_prefix = ''
if not hasattr(model, 'bert') and any((s.startswith('bert.') for s in state_dict.keys())):
start_prefix = 'bert.'
<DeepExtract>
local_metadata = {} if metadata is None else metadata.get(start_prefix[:-1], {})
model._load_from_state_dict(state_dict, start_prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in model._modules.items():
if child is not None:
load(child, start_prefix + name + '.')
</DeepExtract>
if len(missing_keys) > 0:
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(model.__class__.__name__, '\n\t'.join(error_msgs)))
return model
|
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs):
"""
Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name_or_path: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `model.chkpt` a TensorFlow checkpoint
from_tf: should we load the weights from a locally saved TensorFlow checkpoint
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
archive_file = pretrained_model_name_or_path
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name_or_path, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file))
return None
if resolved_archive_file == archive_file:
logger.info('loading archive file {}'.format(archive_file))
else:
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file) or from_tf:
serialization_dir = resolved_archive_file
else:
tempdir = tempfile.mkdtemp()
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info('Model config {}'.format(config))
model = cls(config, *inputs, **kwargs)
if state_dict is None and (not from_tf):
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None)
if tempdir:
shutil.rmtree(tempdir)
if from_tf:
weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
return load_tf_weights_in_bert(model, weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if child is not None:
local_metadata = {} if metadata is None else metadata.get(prefix + name + '.'[:-1], {})
child._load_from_state_dict(state_dict, prefix + name + '.', local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in child._modules.items():
if child is not None:
load(child, prefix + name + '.' + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any((s.startswith('bert.') for s in state_dict.keys())):
start_prefix = 'bert.'
local_metadata = {} if metadata is None else metadata.get(start_prefix[:-1], {})
model._load_from_state_dict(state_dict, start_prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in model._modules.items():
if child is not None:
load(child, start_prefix + name + '.')
if len(missing_keys) > 0:
logger.info('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(model.__class__.__name__, '\n\t'.join(error_msgs)))
return model
|
Chinese-clinical-NER
|
positive
|
def __init__(self, n_inputs=None, n_outputs=None, input_shape=None, n_bypass=0, density='mog', n_hiddens=(10, 10), impute_missing=True, seed=None, n_filters=(), filter_sizes=3, pool_sizes=2, n_rnn=0, **density_opts):
"""Initialize a mixture density network with custom layers
Parameters
----------
n_inputs : int
Total input dimensionality (data/summary stats)
n_outputs : int
Dimensionality of output (simulator parameters)
input_shape : tuple
Size to which data are reshaped before CNN or RNN
n_bypass : int
Number of elements at end of input which bypass CNN or RNN
density : string
Type of density condition on the network, can be 'mog' or 'maf'
n_components : int
Number of components of the mixture density
n_filters : list of ints
Number of filters per convolutional layer
n_hiddens : list of ints
Number of hidden units per fully connected layer
n_rnn : None or int
Number of RNN units
impute_missing : bool
If set to True, learns replacement value for NaNs, otherwise those
inputs are set to zero
seed : int or None
If provided, random number generator will be seeded
density_opts : dict
Options for the density estimator
"""
if n_rnn > 0 and len(n_filters) > 0:
raise NotImplementedError
assert isint(n_inputs) and isint(n_outputs) and (n_inputs > 0) and (n_outputs > 0)
self.density = density.lower()
self.impute_missing = impute_missing
self.n_hiddens = list(n_hiddens)
(self.n_outputs, self.n_inputs) = (n_outputs, n_inputs)
self.n_bypass = n_bypass
self.n_rnn = n_rnn
(self.n_filters, self.filter_sizes, self.pool_sizes, n_cnn) = (list(n_filters), filter_sizes, pool_sizes, len(n_filters))
if type(self.filter_sizes) is int:
self.filter_sizes = [self.filter_sizes for _ in range(n_cnn)]
else:
assert len(self.filter_sizes) >= n_cnn
if type(self.pool_sizes) is int:
self.pool_sizes = [self.pool_sizes for _ in range(n_cnn)]
else:
assert len(self.pool_sizes) >= n_cnn
self.iws = tt.vector('iws', dtype=dtype)
self.seed = seed
if seed is not None:
self.rng = np.random.RandomState(seed=seed)
else:
self.rng = np.random.RandomState()
lasagne.random.set_rng(self.rng)
self.input_shape = (n_inputs,) if input_shape is None else input_shape
assert np.prod(self.input_shape) + self.n_bypass == self.n_inputs
assert 1 <= len(self.input_shape) <= 3
self.params = tensorN(2, name='params', dtype=dtype)
self.stats = tensorN(2, name='stats', dtype=dtype)
self.layer = collections.OrderedDict()
self.layer['input'] = ll.InputLayer((None, self.n_inputs), input_var=self.stats)
if self.impute_missing:
self.layer['missing'] = dl.ImputeMissingLayer(last(self.layer), n_inputs=(self.n_inputs,))
else:
self.layer['missing'] = dl.ReplaceMissingLayer(last(self.layer), n_inputs=(self.n_inputs,))
if self.n_bypass > 0 and (self.n_rnn > 0 or n_cnn > 0):
last_layer = last(self.layer)
bypass_slice = slice(self.n_inputs - self.n_bypass, self.n_inputs)
direct_slice = slice(0, self.n_inputs - self.n_bypass)
self.layer['bypass'] = ll.SliceLayer(last_layer, bypass_slice)
self.layer['direct'] = ll.SliceLayer(last_layer, direct_slice)
if self.n_rnn > 0 or n_cnn > 0:
if len(n_filters) > 0 and len(self.input_shape) == 2:
rs = (-1, 1, *self.input_shape)
else:
if self.n_rnn > 0:
assert len(self.input_shape) == 2
else:
assert len(self.input_shape) == 3
rs = (-1, *self.input_shape)
self.layer['reshape'] = ll.ReshapeLayer(last(self.layer), rs)
if self.n_rnn > 0:
self.layer['rnn'] = ll.GRULayer(last(self.layer), n_rnn, only_return_final=True)
if n_cnn > 0:
for l in range(n_cnn):
if self.pool_sizes[l] == 1:
padding = (self.filter_sizes[l] - 1) // 2
else:
padding = 0
self.layer['conv_' + str(l + 1)] = ll.Conv2DLayer(name='c' + str(l + 1), incoming=last(self.layer), num_filters=self.n_filters[l], filter_size=self.filter_sizes[l], stride=(1, 1), pad=padding, untie_biases=False, W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.0), nonlinearity=lnl.rectify, flip_filters=True, convolution=tt.nnet.conv2d)
if self.pool_sizes[l] > 1:
self.layer['pool_' + str(l + 1)] = ll.MaxPool2DLayer(name='p' + str(l + 1), incoming=last(self.layer), pool_size=self.pool_sizes[l], stride=None, ignore_border=True)
self.layer['flatten'] = ll.FlattenLayer(incoming=last(self.layer), outdim=2)
if self.n_bypass > 0 and (self.n_rnn > 0 or n_cnn > 0):
self.layer['bypass_merge'] = lasagne.layers.ConcatLayer([self.layer['bypass'], last(self.layer)], axis=1)
if self.density == 'mog':
<DeepExtract>
(self.svi, self.n_components, self.rank, self.mdn_actfun, self.homoscedastic, self.min_precisions) = (svi, n_components, rank, mdn_actfun, homoscedastic, min_precisions)
for key in unused_kwargs.keys():
print('MDN ignoring unused input {0}'.format(key))
for l in range(len(self.n_hiddens)):
self.layer['hidden_' + str(l + 1)] = dl.FullyConnectedLayer(last(self.layer), n_units=self.n_hiddens[l], actfun=self.mdn_actfun, svi=self.svi, name='h' + str(l + 1))
last_hidden = last(self.layer)
self.layer['mixture_weights'] = dl.MixtureWeightsLayer(last_hidden, n_units=self.n_components, actfun=lnl.softmax, svi=self.svi, name='weights')
self.layer['mixture_means'] = dl.MixtureMeansLayer(last_hidden, n_components=self.n_components, n_dim=self.n_outputs, svi=self.svi, name='means')
if self.homoscedastic:
PrecisionsLayer = dl.MixtureHomoscedasticPrecisionsLayer
else:
PrecisionsLayer = dl.MixturePrecisionsLayer
self.layer['mixture_precisions'] = PrecisionsLayer(last_hidden, n_components=self.n_components, n_dim=self.n_outputs, svi=self.svi, name='precisions', rank=self.rank, homoscedastic=self.homoscedastic, min_precisions=min_precisions)
last_mog = [self.layer['mixture_weights'], self.layer['mixture_means'], self.layer['mixture_precisions']]
(self.a, self.ms, precision_out) = ll.get_output(last_mog, deterministic=False)
self.Us = precision_out['Us']
self.ldetUs = precision_out['ldetUs']
self.comps = {**{'a': self.a}, **{'m' + str(i): self.ms[i] for i in range(self.n_components)}, **{'U' + str(i): self.Us[i] for i in range(self.n_components)}}
self.lprobs_comps = [-0.5 * tt.sum(tt.sum((self.params - m).dimshuffle([0, 'x', 1]) * U, axis=2) ** 2, axis=1) + ldetU for (m, U, ldetU) in zip(self.ms, self.Us, self.ldetUs)]
self.lprobs = (MyLogSumExp(tt.stack(self.lprobs_comps, axis=1) + tt.log(self.a), axis=1) - 0.5 * self.n_outputs * np.log(2 * np.pi)).squeeze()
(self.da, self.dms, dprecision_out) = ll.get_output(last_mog, deterministic=True)
self.dUs = dprecision_out['Us']
self.dldetUs = dprecision_out['ldetUs']
self.dcomps = {**{'a': self.da}, **{'m' + str(i): self.dms[i] for i in range(self.n_components)}, **{'U' + str(i): self.dUs[i] for i in range(self.n_components)}}
self.dlprobs_comps = [-0.5 * tt.sum(tt.sum((self.params - m).dimshuffle([0, 'x', 1]) * U, axis=2) ** 2, axis=1) + ldetU for (m, U, ldetU) in zip(self.dms, self.dUs, self.dldetUs)]
self.dlprobs = (MyLogSumExp(tt.stack(self.dlprobs_comps, axis=1) + tt.log(self.da), axis=1) - 0.5 * self.n_outputs * np.log(2 * np.pi)).squeeze()
self.aps = ll.get_all_params(last_mog)
self.mps = ll.get_all_params(last_mog, mp=True)
self.sps = ll.get_all_params(last_mog, sp=True)
self.mps_wp = ll.get_all_params(last_mog, mp=True, wp=True)
self.sps_wp = ll.get_all_params(last_mog, sp=True, wp=True)
self.mps_bp = ll.get_all_params(last_mog, mp=True, bp=True)
self.sps_bp = ll.get_all_params(last_mog, sp=True, bp=True)
</DeepExtract>
elif self.density == 'maf':
<DeepExtract>
if batch_norm:
raise NotImplementedError
(self.n_mades, self.batch_norm, self.output_order, self.maf_mode) = (n_mades, batch_norm, output_order, maf_mode)
self.maf_actfun = maf_actfun
for key in unused_kwargs.keys():
print('CMAF ignoring unused input {0}'.format(key))
self.maf_input = ll.get_output(last(self.layer))
prev_params = ll.get_all_params(last(self.layer))
input_shape_cmaf = last(self.layer).output_shape
assert len(input_shape_cmaf) == 2
n_inputs_cmaf = input_shape_cmaf[1]
rng_maf = np.random.RandomState(seed=self.gen_newseed())
self.cmaf = ConditionalMaskedAutoregressiveFlow(n_inputs=n_inputs_cmaf, n_outputs=self.n_outputs, n_hiddens=self.n_hiddens, act_fun=self.maf_actfun, n_mades=self.n_mades, batch_norm=self.batch_norm, output_order=self.output_order, mode=self.maf_mode, input=self.maf_input, output=self.params, rng=rng_maf)
self.aps = prev_params + self.cmaf.parms
self.lprobs = self.cmaf.L
self.dlprobs = self.lprobs
</DeepExtract>
else:
raise NotImplementedError
<DeepExtract>
self._f_eval_lprobs = theano.function(inputs=[self.params, self.stats], outputs=self.lprobs)
self._f_eval_dlprobs = theano.function(inputs=[self.params, self.stats], outputs=self.dlprobs)
if self.density == 'mog':
self._f_eval_comps = theano.function(inputs=[self.stats], outputs=self.comps)
self._f_eval_dcomps = theano.function(inputs=[self.stats], outputs=self.dcomps)
elif self.density == 'maf':
self._f_eval_maf_input = theano.function(inputs=[self.stats], outputs=self.maf_input)
</DeepExtract>
|
def __init__(self, n_inputs=None, n_outputs=None, input_shape=None, n_bypass=0, density='mog', n_hiddens=(10, 10), impute_missing=True, seed=None, n_filters=(), filter_sizes=3, pool_sizes=2, n_rnn=0, **density_opts):
"""Initialize a mixture density network with custom layers
Parameters
----------
n_inputs : int
Total input dimensionality (data/summary stats)
n_outputs : int
Dimensionality of output (simulator parameters)
input_shape : tuple
Size to which data are reshaped before CNN or RNN
n_bypass : int
Number of elements at end of input which bypass CNN or RNN
density : string
Type of density condition on the network, can be 'mog' or 'maf'
n_components : int
Number of components of the mixture density
n_filters : list of ints
Number of filters per convolutional layer
n_hiddens : list of ints
Number of hidden units per fully connected layer
n_rnn : None or int
Number of RNN units
impute_missing : bool
If set to True, learns replacement value for NaNs, otherwise those
inputs are set to zero
seed : int or None
If provided, random number generator will be seeded
density_opts : dict
Options for the density estimator
"""
if n_rnn > 0 and len(n_filters) > 0:
raise NotImplementedError
assert isint(n_inputs) and isint(n_outputs) and (n_inputs > 0) and (n_outputs > 0)
self.density = density.lower()
self.impute_missing = impute_missing
self.n_hiddens = list(n_hiddens)
(self.n_outputs, self.n_inputs) = (n_outputs, n_inputs)
self.n_bypass = n_bypass
self.n_rnn = n_rnn
(self.n_filters, self.filter_sizes, self.pool_sizes, n_cnn) = (list(n_filters), filter_sizes, pool_sizes, len(n_filters))
if type(self.filter_sizes) is int:
self.filter_sizes = [self.filter_sizes for _ in range(n_cnn)]
else:
assert len(self.filter_sizes) >= n_cnn
if type(self.pool_sizes) is int:
self.pool_sizes = [self.pool_sizes for _ in range(n_cnn)]
else:
assert len(self.pool_sizes) >= n_cnn
self.iws = tt.vector('iws', dtype=dtype)
self.seed = seed
if seed is not None:
self.rng = np.random.RandomState(seed=seed)
else:
self.rng = np.random.RandomState()
lasagne.random.set_rng(self.rng)
self.input_shape = (n_inputs,) if input_shape is None else input_shape
assert np.prod(self.input_shape) + self.n_bypass == self.n_inputs
assert 1 <= len(self.input_shape) <= 3
self.params = tensorN(2, name='params', dtype=dtype)
self.stats = tensorN(2, name='stats', dtype=dtype)
self.layer = collections.OrderedDict()
self.layer['input'] = ll.InputLayer((None, self.n_inputs), input_var=self.stats)
if self.impute_missing:
self.layer['missing'] = dl.ImputeMissingLayer(last(self.layer), n_inputs=(self.n_inputs,))
else:
self.layer['missing'] = dl.ReplaceMissingLayer(last(self.layer), n_inputs=(self.n_inputs,))
if self.n_bypass > 0 and (self.n_rnn > 0 or n_cnn > 0):
last_layer = last(self.layer)
bypass_slice = slice(self.n_inputs - self.n_bypass, self.n_inputs)
direct_slice = slice(0, self.n_inputs - self.n_bypass)
self.layer['bypass'] = ll.SliceLayer(last_layer, bypass_slice)
self.layer['direct'] = ll.SliceLayer(last_layer, direct_slice)
if self.n_rnn > 0 or n_cnn > 0:
if len(n_filters) > 0 and len(self.input_shape) == 2:
rs = (-1, 1, *self.input_shape)
else:
if self.n_rnn > 0:
assert len(self.input_shape) == 2
else:
assert len(self.input_shape) == 3
rs = (-1, *self.input_shape)
self.layer['reshape'] = ll.ReshapeLayer(last(self.layer), rs)
if self.n_rnn > 0:
self.layer['rnn'] = ll.GRULayer(last(self.layer), n_rnn, only_return_final=True)
if n_cnn > 0:
for l in range(n_cnn):
if self.pool_sizes[l] == 1:
padding = (self.filter_sizes[l] - 1) // 2
else:
padding = 0
self.layer['conv_' + str(l + 1)] = ll.Conv2DLayer(name='c' + str(l + 1), incoming=last(self.layer), num_filters=self.n_filters[l], filter_size=self.filter_sizes[l], stride=(1, 1), pad=padding, untie_biases=False, W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.0), nonlinearity=lnl.rectify, flip_filters=True, convolution=tt.nnet.conv2d)
if self.pool_sizes[l] > 1:
self.layer['pool_' + str(l + 1)] = ll.MaxPool2DLayer(name='p' + str(l + 1), incoming=last(self.layer), pool_size=self.pool_sizes[l], stride=None, ignore_border=True)
self.layer['flatten'] = ll.FlattenLayer(incoming=last(self.layer), outdim=2)
if self.n_bypass > 0 and (self.n_rnn > 0 or n_cnn > 0):
self.layer['bypass_merge'] = lasagne.layers.ConcatLayer([self.layer['bypass'], last(self.layer)], axis=1)
if self.density == 'mog':
(self.svi, self.n_components, self.rank, self.mdn_actfun, self.homoscedastic, self.min_precisions) = (svi, n_components, rank, mdn_actfun, homoscedastic, min_precisions)
for key in unused_kwargs.keys():
print('MDN ignoring unused input {0}'.format(key))
for l in range(len(self.n_hiddens)):
self.layer['hidden_' + str(l + 1)] = dl.FullyConnectedLayer(last(self.layer), n_units=self.n_hiddens[l], actfun=self.mdn_actfun, svi=self.svi, name='h' + str(l + 1))
last_hidden = last(self.layer)
self.layer['mixture_weights'] = dl.MixtureWeightsLayer(last_hidden, n_units=self.n_components, actfun=lnl.softmax, svi=self.svi, name='weights')
self.layer['mixture_means'] = dl.MixtureMeansLayer(last_hidden, n_components=self.n_components, n_dim=self.n_outputs, svi=self.svi, name='means')
if self.homoscedastic:
PrecisionsLayer = dl.MixtureHomoscedasticPrecisionsLayer
else:
PrecisionsLayer = dl.MixturePrecisionsLayer
self.layer['mixture_precisions'] = PrecisionsLayer(last_hidden, n_components=self.n_components, n_dim=self.n_outputs, svi=self.svi, name='precisions', rank=self.rank, homoscedastic=self.homoscedastic, min_precisions=min_precisions)
last_mog = [self.layer['mixture_weights'], self.layer['mixture_means'], self.layer['mixture_precisions']]
(self.a, self.ms, precision_out) = ll.get_output(last_mog, deterministic=False)
self.Us = precision_out['Us']
self.ldetUs = precision_out['ldetUs']
self.comps = {**{'a': self.a}, **{'m' + str(i): self.ms[i] for i in range(self.n_components)}, **{'U' + str(i): self.Us[i] for i in range(self.n_components)}}
self.lprobs_comps = [-0.5 * tt.sum(tt.sum((self.params - m).dimshuffle([0, 'x', 1]) * U, axis=2) ** 2, axis=1) + ldetU for (m, U, ldetU) in zip(self.ms, self.Us, self.ldetUs)]
self.lprobs = (MyLogSumExp(tt.stack(self.lprobs_comps, axis=1) + tt.log(self.a), axis=1) - 0.5 * self.n_outputs * np.log(2 * np.pi)).squeeze()
(self.da, self.dms, dprecision_out) = ll.get_output(last_mog, deterministic=True)
self.dUs = dprecision_out['Us']
self.dldetUs = dprecision_out['ldetUs']
self.dcomps = {**{'a': self.da}, **{'m' + str(i): self.dms[i] for i in range(self.n_components)}, **{'U' + str(i): self.dUs[i] for i in range(self.n_components)}}
self.dlprobs_comps = [-0.5 * tt.sum(tt.sum((self.params - m).dimshuffle([0, 'x', 1]) * U, axis=2) ** 2, axis=1) + ldetU for (m, U, ldetU) in zip(self.dms, self.dUs, self.dldetUs)]
self.dlprobs = (MyLogSumExp(tt.stack(self.dlprobs_comps, axis=1) + tt.log(self.da), axis=1) - 0.5 * self.n_outputs * np.log(2 * np.pi)).squeeze()
self.aps = ll.get_all_params(last_mog)
self.mps = ll.get_all_params(last_mog, mp=True)
self.sps = ll.get_all_params(last_mog, sp=True)
self.mps_wp = ll.get_all_params(last_mog, mp=True, wp=True)
self.sps_wp = ll.get_all_params(last_mog, sp=True, wp=True)
self.mps_bp = ll.get_all_params(last_mog, mp=True, bp=True)
self.sps_bp = ll.get_all_params(last_mog, sp=True, bp=True)
elif self.density == 'maf':
if batch_norm:
raise NotImplementedError
(self.n_mades, self.batch_norm, self.output_order, self.maf_mode) = (n_mades, batch_norm, output_order, maf_mode)
self.maf_actfun = maf_actfun
for key in unused_kwargs.keys():
print('CMAF ignoring unused input {0}'.format(key))
self.maf_input = ll.get_output(last(self.layer))
prev_params = ll.get_all_params(last(self.layer))
input_shape_cmaf = last(self.layer).output_shape
assert len(input_shape_cmaf) == 2
n_inputs_cmaf = input_shape_cmaf[1]
rng_maf = np.random.RandomState(seed=self.gen_newseed())
self.cmaf = ConditionalMaskedAutoregressiveFlow(n_inputs=n_inputs_cmaf, n_outputs=self.n_outputs, n_hiddens=self.n_hiddens, act_fun=self.maf_actfun, n_mades=self.n_mades, batch_norm=self.batch_norm, output_order=self.output_order, mode=self.maf_mode, input=self.maf_input, output=self.params, rng=rng_maf)
self.aps = prev_params + self.cmaf.parms
self.lprobs = self.cmaf.L
self.dlprobs = self.lprobs
else:
raise NotImplementedError
self._f_eval_lprobs = theano.function(inputs=[self.params, self.stats], outputs=self.lprobs)
self._f_eval_dlprobs = theano.function(inputs=[self.params, self.stats], outputs=self.dlprobs)
if self.density == 'mog':
self._f_eval_comps = theano.function(inputs=[self.stats], outputs=self.comps)
self._f_eval_dcomps = theano.function(inputs=[self.stats], outputs=self.dcomps)
elif self.density == 'maf':
self._f_eval_maf_input = theano.function(inputs=[self.stats], outputs=self.maf_input)
</DeepExtract>
|
delfi
|
positive
|
def _data_response(request, query, media, privileged=False, strict=False):
orm_models = get_models(request)
if query.model_name not in orm_models:
raise http.Http404(f'{query.model_name} does not exist')
bound_query = BoundQuery.bind(query, orm_models)
if strict and (not all((f.is_valid for f in bound_query.filters))):
return http.HttpResponseBadRequest()
if media == 'csv':
results = get_results(request, bound_query, orm_models, False)
csv_rows = get_csv_rows(bound_query, results)
writer = csv.writer(Echo())
response = http.StreamingHttpResponse((writer.writerow(row) for row in csv_rows), content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename={query.model_name}-{timezone.now().isoformat()}.csv'
return response
elif media == 'json':
results = get_results(request, bound_query, orm_models, True)
resp = _get_query_data(bound_query) if privileged else {}
resp.update(results)
return JsonResponse(resp)
elif privileged and media == 'query':
<DeepExtract>
resp = {'filters': [{'pathStr': filter_.path_str, 'lookup': filter_.lookup, 'value': filter_.value} for filter_ in bound_query.filters], 'filterErrors': [filter_.err_message for filter_ in bound_query.filters], 'parsedFilterValues': [filter_.formatted_value() for filter_ in bound_query.filters], 'fields': [{'pathStr': field.path_str, 'sort': field.direction, 'priority': field.priority, 'pivoted': field.pivoted} for field in bound_query.fields], 'model': bound_query.model_name, 'limit': bound_query.limit}
</DeepExtract>
return JsonResponse(resp)
elif privileged and media in ['sql', 'explain', 'qs']:
query_set = get_result_queryset(request, bound_query, media == 'qs')
if isinstance(query_set, list):
res = 'Not available for pure aggregates'
elif media == 'sql':
res = '/* This is an approximation of the main query.\nPages with pivoted or calculated data may do additional queries. */\n\n'
res += sqlparse.format(str(query_set.query), reindent=True, keyword_case='upper')
elif media == 'explain':
res = query_set.explain()
elif media == 'qs':
res = '# This is an approximation of the main queryset.\n# Pages with pivoted or calculated data may do additional queries.\n\n'
res += str(query_set)
else:
assert False
return HttpResponse(res, content_type='text/plain')
else:
raise http.Http404(f'Bad file format {media} requested')
|
def _data_response(request, query, media, privileged=False, strict=False):
orm_models = get_models(request)
if query.model_name not in orm_models:
raise http.Http404(f'{query.model_name} does not exist')
bound_query = BoundQuery.bind(query, orm_models)
if strict and (not all((f.is_valid for f in bound_query.filters))):
return http.HttpResponseBadRequest()
if media == 'csv':
results = get_results(request, bound_query, orm_models, False)
csv_rows = get_csv_rows(bound_query, results)
writer = csv.writer(Echo())
response = http.StreamingHttpResponse((writer.writerow(row) for row in csv_rows), content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename={query.model_name}-{timezone.now().isoformat()}.csv'
return response
elif media == 'json':
results = get_results(request, bound_query, orm_models, True)
resp = _get_query_data(bound_query) if privileged else {}
resp.update(results)
return JsonResponse(resp)
elif privileged and media == 'query':
resp = {'filters': [{'pathStr': filter_.path_str, 'lookup': filter_.lookup, 'value': filter_.value} for filter_ in bound_query.filters], 'filterErrors': [filter_.err_message for filter_ in bound_query.filters], 'parsedFilterValues': [filter_.formatted_value() for filter_ in bound_query.filters], 'fields': [{'pathStr': field.path_str, 'sort': field.direction, 'priority': field.priority, 'pivoted': field.pivoted} for field in bound_query.fields], 'model': bound_query.model_name, 'limit': bound_query.limit}
return JsonResponse(resp)
elif privileged and media in ['sql', 'explain', 'qs']:
query_set = get_result_queryset(request, bound_query, media == 'qs')
if isinstance(query_set, list):
res = 'Not available for pure aggregates'
elif media == 'sql':
res = '/* This is an approximation of the main query.\nPages with pivoted or calculated data may do additional queries. */\n\n'
res += sqlparse.format(str(query_set.query), reindent=True, keyword_case='upper')
elif media == 'explain':
res = query_set.explain()
elif media == 'qs':
res = '# This is an approximation of the main queryset.\n# Pages with pivoted or calculated data may do additional queries.\n\n'
res += str(query_set)
else:
assert False
return HttpResponse(res, content_type='text/plain')
else:
raise http.Http404(f'Bad file format {media} requested')
|
django-data-browser
|
positive
|
def do_for_file(self, file_path):
try:
self.log.info('Extracting to {0}'.format(self.OUT_DIR))
<DeepExtract>
os.system('tar -C {0} -xzf {1}'.format(self.OUT_DIR, file_path))
</DeepExtract>
self.log.info('Archiving.')
self.archive_file(file_path)
except:
self.log.warning('Something went wrong with the file extraction or archival. Rejecting {0}'.format(os.path.basename(file_path)))
self.reject_file(file_path)
|
def do_for_file(self, file_path):
try:
self.log.info('Extracting to {0}'.format(self.OUT_DIR))
os.system('tar -C {0} -xzf {1}'.format(self.OUT_DIR, file_path))
self.log.info('Archiving.')
self.archive_file(file_path)
except:
self.log.warning('Something went wrong with the file extraction or archival. Rejecting {0}'.format(os.path.basename(file_path)))
self.reject_file(file_path)
|
datacommons
|
positive
|
def get_track_name(self):
<DeepExtract>
if not self.metadata:
self.metadata = self.proxy.get_property('metadata')
if self.metadata:
try:
self.track_name = self.return_best_string(self.metadata['xesam:title'])
except KeyError:
self.track_name = ''
try:
self.album_name = self.return_best_string(self.metadata['xesam:album'])
except KeyError:
self.album_name = ''
try:
self.artist_name = self.return_best_string(self.metadata['xesam:albumArtist'])
except KeyError:
try:
self.artist_name = self.return_best_string(self.metadata['xesam:artist'])
except:
self.artist_name = ''
try:
self.albumart_url = self.return_best_string(self.metadata['mpris:artUrl'])
except KeyError:
self.albumart_url = ''
</DeepExtract>
return self.track_name
|
def get_track_name(self):
if not self.metadata:
self.metadata = self.proxy.get_property('metadata')
if self.metadata:
try:
self.track_name = self.return_best_string(self.metadata['xesam:title'])
except KeyError:
self.track_name = ''
try:
self.album_name = self.return_best_string(self.metadata['xesam:album'])
except KeyError:
self.album_name = ''
try:
self.artist_name = self.return_best_string(self.metadata['xesam:albumArtist'])
except KeyError:
try:
self.artist_name = self.return_best_string(self.metadata['xesam:artist'])
except:
self.artist_name = ''
try:
self.albumart_url = self.return_best_string(self.metadata['mpris:artUrl'])
except KeyError:
self.albumart_url = ''
return self.track_name
|
cinnamon-screensaver
|
positive
|
def matches(self, t, dummy=False):
<DeepExtract>
if not isinstance(t, Textbound):
m = False
self_equiv_entities = [self]
other_equiv_entities = [t]
if self.equivs is not None:
self_equiv_entities += self.equivs
if t.equivs is not None:
other_equiv_entities += t.equivs
for se in self_equiv_entities:
for te in other_equiv_entities:
if se.matches_self(te):
m = True
m = False
</DeepExtract>
if m and t is not self and (t not in self.matching):
self.matching.append(t)
return m
|
def matches(self, t, dummy=False):
if not isinstance(t, Textbound):
m = False
self_equiv_entities = [self]
other_equiv_entities = [t]
if self.equivs is not None:
self_equiv_entities += self.equivs
if t.equivs is not None:
other_equiv_entities += t.equivs
for se in self_equiv_entities:
for te in other_equiv_entities:
if se.matches_self(te):
m = True
m = False
if m and t is not self and (t not in self.matching):
self.matching.append(t)
return m
|
DeepEventMine
|
positive
|
@pytest.mark.skipif(subprocess.Popen('type singularity', shell=True).wait(), reason='Singularity not installed')
def test_example1_exec_singularity(self):
<DeepExtract>
fls = os.listdir('./')
for fl in fls:
if (fl.startswith('log') or fl.startswith('config')) and fl.endswith('.txt'):
os.remove(fl)
</DeepExtract>
self.assert_successful_return(bosh.execute('launch', self.get_file_path('example1_sing.json'), self.get_file_path('invocation_sing.json'), '--skip-data-collection', '-v', '{}:/test_mount1'.format(self.get_file_path('example1_mount1')), '-v', '{}:/test_mount2'.format(self.get_file_path('example1_mount2'))), ['log-4.txt'], 2, self.assert_reflected_output)
<DeepExtract>
fls = os.listdir('./')
for fl in fls:
if (fl.startswith('log') or fl.startswith('config')) and fl.endswith('.txt'):
os.remove(fl)
</DeepExtract>
self.assert_successful_return(bosh.execute('launch', self.get_file_path('example1_sing.json'), '-x', self.get_file_path('invocation_sing.json'), '-v', '{}:/test_mount1'.format(self.get_file_path('example1_mount1')), '-v', '{}:/test_mount2'.format(self.get_file_path('example1_mount2'))), ['log-4.txt'], 2, self.assert_reflected_output)
|
@pytest.mark.skipif(subprocess.Popen('type singularity', shell=True).wait(), reason='Singularity not installed')
def test_example1_exec_singularity(self):
fls = os.listdir('./')
for fl in fls:
if (fl.startswith('log') or fl.startswith('config')) and fl.endswith('.txt'):
os.remove(fl)
self.assert_successful_return(bosh.execute('launch', self.get_file_path('example1_sing.json'), self.get_file_path('invocation_sing.json'), '--skip-data-collection', '-v', '{}:/test_mount1'.format(self.get_file_path('example1_mount1')), '-v', '{}:/test_mount2'.format(self.get_file_path('example1_mount2'))), ['log-4.txt'], 2, self.assert_reflected_output)
fls = os.listdir('./')
for fl in fls:
if (fl.startswith('log') or fl.startswith('config')) and fl.endswith('.txt'):
os.remove(fl)
self.assert_successful_return(bosh.execute('launch', self.get_file_path('example1_sing.json'), '-x', self.get_file_path('invocation_sing.json'), '-v', '{}:/test_mount1'.format(self.get_file_path('example1_mount1')), '-v', '{}:/test_mount2'.format(self.get_file_path('example1_mount2'))), ['log-4.txt'], 2, self.assert_reflected_output)
|
boutiques
|
positive
|
@pytest.mark.parametrize('name', ['paired', 'unpaired', 'grouped'])
def test_close(self, name):
<DeepExtract>
if name in ['paired', 'unpaired', 'grouped']:
dir_paths = [f'./data/test/h5/{name}/test']
name = 'fixed_images' if name == 'paired' else 'images'
grouped = name == 'grouped'
elif name == 'multi_dirs_grouped':
dir_paths = ['./data/test/h5/grouped/train', './data/test/h5/grouped/test']
name = 'images'
grouped = True
else:
raise ValueError
loader = H5FileLoader(dir_paths=dir_paths, name=name, grouped=grouped)
loader = loader
</DeepExtract>
loader.close()
for f in loader.h5_files.values():
assert not f.__bool__()
|
@pytest.mark.parametrize('name', ['paired', 'unpaired', 'grouped'])
def test_close(self, name):
if name in ['paired', 'unpaired', 'grouped']:
dir_paths = [f'./data/test/h5/{name}/test']
name = 'fixed_images' if name == 'paired' else 'images'
grouped = name == 'grouped'
elif name == 'multi_dirs_grouped':
dir_paths = ['./data/test/h5/grouped/train', './data/test/h5/grouped/test']
name = 'images'
grouped = True
else:
raise ValueError
loader = H5FileLoader(dir_paths=dir_paths, name=name, grouped=grouped)
loader = loader
loader.close()
for f in loader.h5_files.values():
assert not f.__bool__()
|
DeepReg
|
positive
|
def forward(self, query, key, value, mask=None):
"""Implements Figure 2"""
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
(query, key, value) = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
<DeepExtract>
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask.to(device) if type(mask) != type(None) else mask is not None:
scores = scores.masked_fill(mask.to(device) if type(mask) != type(None) else mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
(x, self.attn) = (torch.matmul(p_attn, value), p_attn)
</DeepExtract>
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
|
def forward(self, query, key, value, mask=None):
"""Implements Figure 2"""
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
(query, key, value) = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask.to(device) if type(mask) != type(None) else mask is not None:
scores = scores.masked_fill(mask.to(device) if type(mask) != type(None) else mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
(x, self.attn) = (torch.matmul(p_attn, value), p_attn)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
|
DuaLUG
|
positive
|
def dst(self, dt):
if dt is None or dt.tzinfo is None:
return ZERO
assert dt.tzinfo is self
<DeepExtract>
days_to_go = 6 - DSTSTART.replace(year=dt.year).weekday()
if days_to_go:
DSTSTART.replace(year=dt.year) += timedelta(days_to_go)
start = DSTSTART.replace(year=dt.year)
</DeepExtract>
<DeepExtract>
days_to_go = 6 - DSTEND.replace(year=dt.year).weekday()
if days_to_go:
DSTEND.replace(year=dt.year) += timedelta(days_to_go)
end = DSTEND.replace(year=dt.year)
</DeepExtract>
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
|
def dst(self, dt):
if dt is None or dt.tzinfo is None:
return ZERO
assert dt.tzinfo is self
days_to_go = 6 - DSTSTART.replace(year=dt.year).weekday()
if days_to_go:
DSTSTART.replace(year=dt.year) += timedelta(days_to_go)
start = DSTSTART.replace(year=dt.year)
days_to_go = 6 - DSTEND.replace(year=dt.year).weekday()
if days_to_go:
DSTEND.replace(year=dt.year) += timedelta(days_to_go)
end = DSTEND.replace(year=dt.year)
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
|
blue-channel
|
positive
|
def gds_validate_decimal_list(self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
<DeepExtract>
if node is not None:
'Requires sequence of decimal values' = '%s (element %s/line %d)' % ('Requires sequence of decimal values', node.tag, node.sourceline)
raise GDSParseError('Requires sequence of decimal values')
</DeepExtract>
return values
|
def gds_validate_decimal_list(self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
if node is not None:
'Requires sequence of decimal values' = '%s (element %s/line %d)' % ('Requires sequence of decimal values', node.tag, node.sourceline)
raise GDSParseError('Requires sequence of decimal values')
return values
|
autopkg
|
positive
|
def save(self):
"""Saves node to database"""
conn = self.connect_db()
if self.node_id != self.__original__['node_id']:
raise ValueError('One cannot change the node_id')
if self.__new:
(data, sql) = self._save_new_with_geometry()
else:
<DeepExtract>
data = []
txts = []
for (key, val) in self.__dict__.items():
if key not in self.__original__:
continue
if val != self.__original__[key]:
if key == 'geometry' and val is not None:
data.append(val.wkb)
txts.append(f'geometry=GeomFromWKB(?, {self.__srid__})')
else:
data.append(val)
txts.append(f'"{key}"=?')
if not data:
self._logger.warning(f'Nothing to update for node {self.node_id}')
(data, sql) = ([], '')
txts = ','.join(txts) + ' where node_id=?'
data.append(self.node_id)
sql = f'Update Nodes set {txts}'
(data, sql) = (data, sql)
</DeepExtract>
if data:
conn.execute(sql, data)
conn.commit()
conn.close()
self.__new = False
|
def save(self):
"""Saves node to database"""
conn = self.connect_db()
if self.node_id != self.__original__['node_id']:
raise ValueError('One cannot change the node_id')
if self.__new:
(data, sql) = self._save_new_with_geometry()
else:
data = []
txts = []
for (key, val) in self.__dict__.items():
if key not in self.__original__:
continue
if val != self.__original__[key]:
if key == 'geometry' and val is not None:
data.append(val.wkb)
txts.append(f'geometry=GeomFromWKB(?, {self.__srid__})')
else:
data.append(val)
txts.append(f'"{key}"=?')
if not data:
self._logger.warning(f'Nothing to update for node {self.node_id}')
(data, sql) = ([], '')
txts = ','.join(txts) + ' where node_id=?'
data.append(self.node_id)
sql = f'Update Nodes set {txts}'
(data, sql) = (data, sql)
if data:
conn.execute(sql, data)
conn.commit()
conn.close()
self.__new = False
|
aequilibrae
|
positive
|
def tidy(x):
if x is None:
return x
if isinstance(x, (list, tuple)):
return [tidy(y) for y in x]
if isinstance(x, dict):
r = {}
for (k, v) in x.items():
<DeepExtract>
if v is None:
r[str(k)] = v
if isinstance(v, (list, tuple)):
r[str(k)] = [tidy(y) for y in v]
if isinstance(v, dict):
r = {}
for (k, v) in v.items():
r[str(k)] = tidy(v)
r[str(k)] = r
if isinstance(v, (int, float, str)):
r[str(k)] = v
import re
r[str(k)] = re.sub(' object at x\\w+', repr(v), '')
</DeepExtract>
return r
if isinstance(x, (int, float, str)):
return x
import re
return re.sub(' object at x\\w+', repr(x), '')
|
def tidy(x):
if x is None:
return x
if isinstance(x, (list, tuple)):
return [tidy(y) for y in x]
if isinstance(x, dict):
r = {}
for (k, v) in x.items():
if v is None:
r[str(k)] = v
if isinstance(v, (list, tuple)):
r[str(k)] = [tidy(y) for y in v]
if isinstance(v, dict):
r = {}
for (k, v) in v.items():
r[str(k)] = tidy(v)
r[str(k)] = r
if isinstance(v, (int, float, str)):
r[str(k)] = v
import re
r[str(k)] = re.sub(' object at x\\w+', repr(v), '')
return r
if isinstance(x, (int, float, str)):
return x
import re
return re.sub(' object at x\\w+', repr(x), '')
|
climetlab
|
positive
|
def _load_coco_keypoint_annotation_kernal(self, index):
"""
coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
iscrowd:
crowd instances are handled by marking their overlaps with all categories to -1
and later excluded in training
bbox:
[x1, y1, w, h]
:param index: coco image id
:return: db entry
"""
im_ann = self.coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)
objs = self.coco.loadAnns(annIds)
valid_objs = []
for obj in objs:
(x, y, w, h) = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and (y2 >= y1):
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
rec = []
for obj in objs:
cls = self._coco_ind_to_class_ind[obj['category_id']]
if cls != 1:
continue
if max(obj['keypoints']) == 0:
continue
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
for ipt in range(self.num_joints):
joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]
joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]
joints_3d[ipt, 2] = 0
t_vis = obj['keypoints'][ipt * 3 + 2]
if t_vis > 1:
t_vis = 1
joints_3d_vis[ipt, 0] = t_vis
joints_3d_vis[ipt, 1] = t_vis
joints_3d_vis[ipt, 2] = 0
<DeepExtract>
(x, y, w, h) = obj['clean_bbox'][:4][:4]
(center, scale) = self._xywh2cs(x, y, w, h)
</DeepExtract>
rec.append({'image': self.image_path_from_index(index), 'center': center, 'scale': scale, 'joints_3d': joints_3d, 'joints_3d_vis': joints_3d_vis, 'filename': '', 'imgnum': 0})
return rec
|
def _load_coco_keypoint_annotation_kernal(self, index):
"""
coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
iscrowd:
crowd instances are handled by marking their overlaps with all categories to -1
and later excluded in training
bbox:
[x1, y1, w, h]
:param index: coco image id
:return: db entry
"""
im_ann = self.coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)
objs = self.coco.loadAnns(annIds)
valid_objs = []
for obj in objs:
(x, y, w, h) = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and (y2 >= y1):
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
rec = []
for obj in objs:
cls = self._coco_ind_to_class_ind[obj['category_id']]
if cls != 1:
continue
if max(obj['keypoints']) == 0:
continue
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
for ipt in range(self.num_joints):
joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]
joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]
joints_3d[ipt, 2] = 0
t_vis = obj['keypoints'][ipt * 3 + 2]
if t_vis > 1:
t_vis = 1
joints_3d_vis[ipt, 0] = t_vis
joints_3d_vis[ipt, 1] = t_vis
joints_3d_vis[ipt, 2] = 0
(x, y, w, h) = obj['clean_bbox'][:4][:4]
(center, scale) = self._xywh2cs(x, y, w, h)
rec.append({'image': self.image_path_from_index(index), 'center': center, 'scale': scale, 'joints_3d': joints_3d, 'joints_3d_vis': joints_3d_vis, 'filename': '', 'imgnum': 0})
return rec
|
cvToolkit
|
positive
|
def test_link_type_keep_if_protected_type(self):
<DeepExtract>
for query in self.queries:
if 'link_type_keep_if_protected_type' in query:
cmd = query
raise FileNotFoundError('QUERY DOES NOT EXIST')
</DeepExtract>
self.curr.execute(cmd)
with self.assertRaises(sqlite3.IntegrityError):
self.curr.execute('update link_types set link_type="xsdfg" where link_type_id="z"')
with self.assertRaises(sqlite3.IntegrityError):
self.curr.execute('update link_types set link_type="xsdfg" where link_type_id="y"')
|
def test_link_type_keep_if_protected_type(self):
for query in self.queries:
if 'link_type_keep_if_protected_type' in query:
cmd = query
raise FileNotFoundError('QUERY DOES NOT EXIST')
self.curr.execute(cmd)
with self.assertRaises(sqlite3.IntegrityError):
self.curr.execute('update link_types set link_type="xsdfg" where link_type_id="z"')
with self.assertRaises(sqlite3.IntegrityError):
self.curr.execute('update link_types set link_type="xsdfg" where link_type_id="y"')
|
aequilibrae
|
positive
|
def setup_file(file):
"""Prepare new HDF5 file for writing."""
file.attrs['set_number'] = self.set_num
file.attrs['handler_name'] = self.name
file.attrs['writes'] = self.file_write_num
file.create_group('scales')
file['scales'].create_dataset(name='constant', data=np.zeros(1), dtype=np.float64)
file['scales']['constant'].make_scale('constant')
for name in ['sim_time', 'timestep', 'world_time', 'wall_time']:
file['scales'].create_dataset(name=name, shape=(0,), maxshape=(self.max_writes,), dtype=np.float64)
file['scales'][name].make_scale(name)
for name in ['iteration', 'write_number']:
file['scales'].create_dataset(name=name, shape=(0,), maxshape=(self.max_writes,), dtype=int)
file['scales'][name].make_scale(name)
file.create_group('tasks')
for task in self.tasks:
op = task['operator']
layout = task['layout']
scales = task['scales']
<DeepExtract>
shape = (1,) + task['global_shape']
maxshape = (self.max_writes,) + task['global_shape']
dset = file['tasks'].create_dataset(name=task['name'], shape=shape, maxshape=maxshape, dtype=task['dtype'])
dset = dset
</DeepExtract>
dset.attrs['constant'] = op.domain.constant
dset.attrs['grid_space'] = layout.grid_space
dset.attrs['scales'] = scales
dset.dims[0].label = 't'
for sn in ['sim_time', 'world_time', 'wall_time', 'timestep', 'iteration', 'write_number']:
dset.dims[0].attach_scale(file['scales'][sn])
rank = len(op.tensorsig)
for axis in range(self.dist.dim):
basis = op.domain.full_bases[axis]
if basis is None:
sn = lookup = 'constant'
else:
subaxis = axis - basis.axis
if layout.grid_space[axis]:
sn = basis.coordsystem.coords[subaxis].name
data = basis.global_grids(scales)[subaxis].ravel()
else:
sn = 'k' + basis.coordsystem.coords[subaxis].name
data = layout.global_group_arrays(op.domain, scales)[subaxis]
scale_hash = hashlib.sha1(data).hexdigest()
lookup = f'{sn}_hash_{scale_hash}'
if lookup not in file['scales']:
file['scales'].create_dataset(name=lookup, data=data)
file['scales'][lookup].make_scale(sn)
scale = file['scales'][lookup]
dset.dims[1 + rank + axis].label = sn
dset.dims[1 + rank + axis].attach_scale(scale)
|
def setup_file(file):
"""Prepare new HDF5 file for writing."""
file.attrs['set_number'] = self.set_num
file.attrs['handler_name'] = self.name
file.attrs['writes'] = self.file_write_num
file.create_group('scales')
file['scales'].create_dataset(name='constant', data=np.zeros(1), dtype=np.float64)
file['scales']['constant'].make_scale('constant')
for name in ['sim_time', 'timestep', 'world_time', 'wall_time']:
file['scales'].create_dataset(name=name, shape=(0,), maxshape=(self.max_writes,), dtype=np.float64)
file['scales'][name].make_scale(name)
for name in ['iteration', 'write_number']:
file['scales'].create_dataset(name=name, shape=(0,), maxshape=(self.max_writes,), dtype=int)
file['scales'][name].make_scale(name)
file.create_group('tasks')
for task in self.tasks:
op = task['operator']
layout = task['layout']
scales = task['scales']
shape = (1,) + task['global_shape']
maxshape = (self.max_writes,) + task['global_shape']
dset = file['tasks'].create_dataset(name=task['name'], shape=shape, maxshape=maxshape, dtype=task['dtype'])
dset = dset
dset.attrs['constant'] = op.domain.constant
dset.attrs['grid_space'] = layout.grid_space
dset.attrs['scales'] = scales
dset.dims[0].label = 't'
for sn in ['sim_time', 'world_time', 'wall_time', 'timestep', 'iteration', 'write_number']:
dset.dims[0].attach_scale(file['scales'][sn])
rank = len(op.tensorsig)
for axis in range(self.dist.dim):
basis = op.domain.full_bases[axis]
if basis is None:
sn = lookup = 'constant'
else:
subaxis = axis - basis.axis
if layout.grid_space[axis]:
sn = basis.coordsystem.coords[subaxis].name
data = basis.global_grids(scales)[subaxis].ravel()
else:
sn = 'k' + basis.coordsystem.coords[subaxis].name
data = layout.global_group_arrays(op.domain, scales)[subaxis]
scale_hash = hashlib.sha1(data).hexdigest()
lookup = f'{sn}_hash_{scale_hash}'
if lookup not in file['scales']:
file['scales'].create_dataset(name=lookup, data=data)
file['scales'][lookup].make_scale(sn)
scale = file['scales'][lookup]
dset.dims[1 + rank + axis].label = sn
dset.dims[1 + rank + axis].attach_scale(scale)
|
dedalus
|
positive
|
def get_node_pos(string_key):
"""Given a string key a corresponding node in the hash ring is returned
along with it's position in the ring.
If the hash ring is empty, (`None`, `None`) is returned.
"""
if not self.ring:
return None
<DeepExtract>
b_key = self._hash_digest(string_key)
string_key = self._hash_val(b_key, lambda x: x)
</DeepExtract>
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
else:
return pos
|
def get_node_pos(string_key):
"""Given a string key a corresponding node in the hash ring is returned
along with it's position in the ring.
If the hash ring is empty, (`None`, `None`) is returned.
"""
if not self.ring:
return None
b_key = self._hash_digest(string_key)
string_key = self._hash_val(b_key, lambda x: x)
nodes = self._sorted_keys
pos = bisect(nodes, key)
if pos == len(nodes):
return 0
else:
return pos
|
cola
|
positive
|
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
(data, client_address) = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
<DeepExtract>
(data, client_address) = (data, client_address)
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error('receive_datagram - BAD REQUEST')
rst = Message()
rst.destination = client_address
rst.type = defines.Types['RST']
rst.code = message
self.send_datagram(rst)
return
logger.info('receive_datagram - ' + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug('message duplicated,transaction completed')
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and (not transaction.completed):
logger.debug('message duplicated,transaction NOT completed')
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"\n call to the cache layer to check if there's a cached response for the request\n if not, call the forward layer\n "
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
logger.debug(transaction.request)
transaction = self._forwardLayer.receive_request_reverse(transaction)
logger.debug(transaction.response)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request_reverse(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types['CON']:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else:
logger.error('Received response from %s', message.source)
</DeepExtract>
except RuntimeError:
logger.exception('Exception with Executor')
self._socket.close()
|
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
(data, client_address) = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
(data, client_address) = (data, client_address)
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error('receive_datagram - BAD REQUEST')
rst = Message()
rst.destination = client_address
rst.type = defines.Types['RST']
rst.code = message
self.send_datagram(rst)
return
logger.info('receive_datagram - ' + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug('message duplicated,transaction completed')
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and (not transaction.completed):
logger.debug('message duplicated,transaction NOT completed')
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"\n call to the cache layer to check if there's a cached response for the request\n if not, call the forward layer\n "
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
logger.debug(transaction.request)
transaction = self._forwardLayer.receive_request_reverse(transaction)
logger.debug(transaction.response)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request_reverse(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types['CON']:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else:
logger.error('Received response from %s', message.source)
except RuntimeError:
logger.exception('Exception with Executor')
self._socket.close()
|
CoAPthon3
|
positive
|
def test_10_msdos_basic(self):
""" Test that we can create a single partition on MSDOS partition table """
blivet_disk = self.get_blivet_device(self.vdevs[0])
self.create_part_table(blivet_disk, 'msdos')
<DeepExtract>
children = self.blivet_utils.get_disk_children(blivet_disk)
if logical:
free = next((p for p in children.logicals if p.type == 'free space' and p.size >= min_size), None)
else:
free = next((p for p in children.partitions if p.type == 'free space' and p.size >= min_size), None)
self.assertIsNotNone(free)
free = free
</DeepExtract>
<DeepExtract>
if size is None:
size = free.size
parent_selection = ProxyDataContainer(parent_device=free.parents[0], free_space=free, selected_size=size)
size_selection = ProxyDataContainer(total_size=size, parents=[parent_selection])
user_input = ProxyDataContainer(device_type='partition', size_selection=size_selection, filesystem=fstype, name=None, label=label, mountpoint=None, encrypt=False, passphrase=None, raid_level=None, advanced={'parttype': ptype})
ret = self.blivet_utils.add_device(user_input)
self.assertTrue(ret.success)
self.assertIsNone(ret.message)
self.assertIsNone(ret.exception)
self.assertIsNone(ret.traceback)
actions = ret.actions
</DeepExtract>
blivet_part = self.get_blivet_device(self.vdevs[0] + '1')
self.assertTrue(blivet_part.is_primary)
self.assertAlmostEqual(blivet_part.size, free.size, delta=SIZE_DELTA)
<DeepExtract>
self.assertEqual(len(actions), 2)
part_ac = next((ac for ac in actions if ac.is_device), None)
self.assertIsNotNone(part_ac)
fmt_ac = next((ac for ac in actions if ac.is_format), None)
self.assertIsNotNone(fmt_ac)
self.assertIsNotNone(blivet_part)
self.assertIsInstance(blivet_part, blivet.devices.PartitionDevice)
self.assertIsNotNone(blivet_part.format)
self.assertIsInstance(blivet_part.format, blivet.formats.fs.Ext4FS)
</DeepExtract>
children = self.blivet_utils.get_disk_children(blivet_disk)
self.assertEqual(len(children.partitions), 1)
self.assertEqual(children.partitions[0], blivet_part)
self.assertFalse(children.extended)
self.assertFalse(children.logicals)
|
def test_10_msdos_basic(self):
""" Test that we can create a single partition on MSDOS partition table """
blivet_disk = self.get_blivet_device(self.vdevs[0])
self.create_part_table(blivet_disk, 'msdos')
children = self.blivet_utils.get_disk_children(blivet_disk)
if logical:
free = next((p for p in children.logicals if p.type == 'free space' and p.size >= min_size), None)
else:
free = next((p for p in children.partitions if p.type == 'free space' and p.size >= min_size), None)
self.assertIsNotNone(free)
free = free
if size is None:
size = free.size
parent_selection = ProxyDataContainer(parent_device=free.parents[0], free_space=free, selected_size=size)
size_selection = ProxyDataContainer(total_size=size, parents=[parent_selection])
user_input = ProxyDataContainer(device_type='partition', size_selection=size_selection, filesystem=fstype, name=None, label=label, mountpoint=None, encrypt=False, passphrase=None, raid_level=None, advanced={'parttype': ptype})
ret = self.blivet_utils.add_device(user_input)
self.assertTrue(ret.success)
self.assertIsNone(ret.message)
self.assertIsNone(ret.exception)
self.assertIsNone(ret.traceback)
actions = ret.actions
blivet_part = self.get_blivet_device(self.vdevs[0] + '1')
self.assertTrue(blivet_part.is_primary)
self.assertAlmostEqual(blivet_part.size, free.size, delta=SIZE_DELTA)
self.assertEqual(len(actions), 2)
part_ac = next((ac for ac in actions if ac.is_device), None)
self.assertIsNotNone(part_ac)
fmt_ac = next((ac for ac in actions if ac.is_format), None)
self.assertIsNotNone(fmt_ac)
self.assertIsNotNone(blivet_part)
self.assertIsInstance(blivet_part, blivet.devices.PartitionDevice)
self.assertIsNotNone(blivet_part.format)
self.assertIsInstance(blivet_part.format, blivet.formats.fs.Ext4FS)
children = self.blivet_utils.get_disk_children(blivet_disk)
self.assertEqual(len(children.partitions), 1)
self.assertEqual(children.partitions[0], blivet_part)
self.assertFalse(children.extended)
self.assertFalse(children.logicals)
|
blivet-gui
|
positive
|
def do_effect(self, can_msg, args):
if args.get('action') == 'read':
<DeepExtract>
if can_msg.CANData:
self.CANList.append(copy.deepcopy(can_msg.CANFrame))
self.dprint(2, 'Wrote message!')
can_msg = can_msg
</DeepExtract>
elif args.get('action') == 'write':
<DeepExtract>
if len(self.CANList) > 0:
can_msg.CANData = True
can_msg.CANFrame = self.CANList.pop(0)
can_msg.bus = self._bus
self.dprint(2, 'Got message!')
return can_msg
</DeepExtract>
else:
self.dprint(1, 'Command ' + args['action'] + ' not implemented 8(')
return can_msg
|
def do_effect(self, can_msg, args):
if args.get('action') == 'read':
if can_msg.CANData:
self.CANList.append(copy.deepcopy(can_msg.CANFrame))
self.dprint(2, 'Wrote message!')
can_msg = can_msg
elif args.get('action') == 'write':
if len(self.CANList) > 0:
can_msg.CANData = True
can_msg.CANFrame = self.CANList.pop(0)
can_msg.bus = self._bus
self.dprint(2, 'Got message!')
return can_msg
else:
self.dprint(1, 'Command ' + args['action'] + ' not implemented 8(')
return can_msg
|
CANToolz
|
positive
|
def compare(a, b):
if isinstance(a, dict) and isinstance(b, dict):
all_keys = list(a.keys()) + list(b.keys())
for k in all_keys:
<DeepExtract>
if isinstance(a[k], dict) and isinstance(b[k], dict):
all_keys = list(a[k].keys()) + list(b[k].keys())
for k in all_keys:
compare(a[k][k], b[k][k])
elif isinstance(a[k], dict) or isinstance(b[k], dict):
assert False
else:
if isinstance(a[k], np.ndarray) and isinstance(b[k], np.ndarray):
a[k] = a[k].tolist()
b[k] = b[k].tolist()
elif isinstance(a[k], np.ndarray) or isinstance(b[k], np.ndarray):
assert False
assert a[k] == b[k]
</DeepExtract>
elif isinstance(a, dict) or isinstance(b, dict):
assert False
else:
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
a = a.tolist()
b = b.tolist()
elif isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
assert False
assert a == b
|
def compare(a, b):
if isinstance(a, dict) and isinstance(b, dict):
all_keys = list(a.keys()) + list(b.keys())
for k in all_keys:
if isinstance(a[k], dict) and isinstance(b[k], dict):
all_keys = list(a[k].keys()) + list(b[k].keys())
for k in all_keys:
compare(a[k][k], b[k][k])
elif isinstance(a[k], dict) or isinstance(b[k], dict):
assert False
else:
if isinstance(a[k], np.ndarray) and isinstance(b[k], np.ndarray):
a[k] = a[k].tolist()
b[k] = b[k].tolist()
elif isinstance(a[k], np.ndarray) or isinstance(b[k], np.ndarray):
assert False
assert a[k] == b[k]
elif isinstance(a, dict) or isinstance(b, dict):
assert False
else:
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
a = a.tolist()
b = b.tolist()
elif isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
assert False
assert a == b
|
amset
|
positive
|
def ExplicitlyExcludeFromIndex(client, database_id):
""" The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be scenarios where you want to exclude a specific doc from the index even though all other
documents are being indexed automatically.
This method demonstrates how to use an index directive to control this
"""
try:
<DeepExtract>
try:
collection_link = GetContainerLink(database_id, COLLECTION_ID)
client.DeleteContainer(collection_link)
print("Collection with id '{0}' was deleted".format(COLLECTION_ID))
except errors.HTTPFailure as e:
if e.status_code == 404:
pass
elif e.status_code == 400:
print('Bad request for collection link', collection_link)
raise
else:
raise
</DeepExtract>
<DeepExtract>
database_link = 'dbs' + '/' + database_id
</DeepExtract>
created_Container = client.CreateContainer(database_link, {'id': COLLECTION_ID})
print(created_Container)
print('\n' + '-' * 25 + '\n1. Collection created with index policy')
<DeepExtract>
for (k, v) in created_Container['indexingPolicy'].items():
print('{:<15}'.format(k), v)
print()
</DeepExtract>
<DeepExtract>
collection_link = GetDatabaseLink(database_id) + '/' + 'colls' + '/' + COLLECTION_ID
</DeepExtract>
doc = client.CreateItem(collection_link, {'id': 'doc1', 'orderId': 'order1'})
print('\n' + '-' * 25 + 'Document doc1 created with order1' + '-' * 25)
print(doc)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order1'}]}
<DeepExtract>
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
</DeepExtract>
doc2 = client.CreateItem(collection_link, {'id': 'doc2', 'orderId': 'order2'}, {'indexingDirective': documents.IndexingDirective.Exclude})
print('\n' + '-' * 25 + 'Document doc2 created with order2' + '-' * 25)
print(doc2)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order2'}]}
<DeepExtract>
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
</DeepExtract>
docRead = client.ReadItem(GetDocumentLink(database_id, COLLECTION_ID, 'doc2'))
print('Document read by ID: \n', docRead['id'])
client.DeleteContainer(collection_link)
print('\n')
except errors.HTTPFailure as e:
if e.status_code == 409:
print('Entity already exists')
elif e.status_code == 404:
print("Entity doesn't exist")
else:
raise
|
def ExplicitlyExcludeFromIndex(client, database_id):
""" The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added.
There may be scenarios where you want to exclude a specific doc from the index even though all other
documents are being indexed automatically.
This method demonstrates how to use an index directive to control this
"""
try:
try:
collection_link = GetContainerLink(database_id, COLLECTION_ID)
client.DeleteContainer(collection_link)
print("Collection with id '{0}' was deleted".format(COLLECTION_ID))
except errors.HTTPFailure as e:
if e.status_code == 404:
pass
elif e.status_code == 400:
print('Bad request for collection link', collection_link)
raise
else:
raise
database_link = 'dbs' + '/' + database_id
created_Container = client.CreateContainer(database_link, {'id': COLLECTION_ID})
print(created_Container)
print('\n' + '-' * 25 + '\n1. Collection created with index policy')
for (k, v) in created_Container['indexingPolicy'].items():
print('{:<15}'.format(k), v)
print()
collection_link = GetDatabaseLink(database_id) + '/' + 'colls' + '/' + COLLECTION_ID
doc = client.CreateItem(collection_link, {'id': 'doc1', 'orderId': 'order1'})
print('\n' + '-' * 25 + 'Document doc1 created with order1' + '-' * 25)
print(doc)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order1'}]}
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
doc2 = client.CreateItem(collection_link, {'id': 'doc2', 'orderId': 'order2'}, {'indexingDirective': documents.IndexingDirective.Exclude})
print('\n' + '-' * 25 + 'Document doc2 created with order2' + '-' * 25)
print(doc2)
query = {'query': 'SELECT * FROM r WHERE r.orderId=@orderNo', 'parameters': [{'name': '@orderNo', 'value': 'order2'}]}
try:
results = list(client.QueryItems(collection_link, query))
print(message)
for doc in results:
print(doc)
return results
except errors.HTTPFailure as e:
if e.status_code == 404:
print("Document doesn't exist")
elif e.status_code == 400:
print('Bad Request exception occured: ', e)
pass
else:
raise
finally:
print()
docRead = client.ReadItem(GetDocumentLink(database_id, COLLECTION_ID, 'doc2'))
print('Document read by ID: \n', docRead['id'])
client.DeleteContainer(collection_link)
print('\n')
except errors.HTTPFailure as e:
if e.status_code == 409:
print('Entity already exists')
elif e.status_code == 404:
print("Entity doesn't exist")
else:
raise
|
azure-cosmos-python
|
positive
|
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
<DeepExtract>
vocab = collections.OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as reader:
tokens = reader.readlines()
for (index, token) in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
self.vocab = vocab
</DeepExtract>
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
|
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
vocab = collections.OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as reader:
tokens = reader.readlines()
for (index, token) in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
self.vocab = vocab
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
|
DIAC2019-DQD-Based-on-Adversarial-Attack
|
positive
|
def _encode_constructLUT(sends):
<DeepExtract>
baseLUT = sorted(list(set([t[1] for t in sends])))
</DeepExtract>
lutNbits = math.ceil(math.log2(len(baseLUT)))
return {'nbits': lutNbits, 'addrs': baseLUT}
|
def _encode_constructLUT(sends):
baseLUT = sorted(list(set([t[1] for t in sends])))
lutNbits = math.ceil(math.log2(len(baseLUT)))
return {'nbits': lutNbits, 'addrs': baseLUT}
|
counterparty-lib
|
positive
|
def _lines_markdown(obj, **kwargs):
"""Yield lines for a Markdown report.
:param obj: Item, list of Items, or Document to publish
:param linkify: turn links into hyperlinks (for conversion to HTML)
:return: iterator of lines of text
"""
linkify = kwargs.get('linkify', False)
to_html = kwargs.get('to_html', False)
for item in iter_items(obj):
heading = '#' * item.depth
<DeepExtract>
text = str(item.level)
if text.endswith('.0') and len(text) > 3:
text = text[:-2]
item.level = text
</DeepExtract>
if item.heading:
text_lines = item.text.splitlines()
if item.header:
text_lines.insert(0, item.header)
if settings.PUBLISH_HEADING_LEVELS:
standard = '{h} {lev} {t}'.format(h=heading, lev=level, t=text_lines[0] if text_lines else '')
else:
standard = '{h} {t}'.format(h=heading, t=text_lines[0] if text_lines else '')
<DeepExtract>
attr_list = ' {{#{u} }}'.format(u=item.uid) if True else ''
</DeepExtract>
yield (standard + attr_list)
yield from text_lines[1:]
else:
uid = item.uid
if settings.ENABLE_HEADERS:
if item.header:
uid = '{h} <small>{u}</small>'.format(h=item.header, u=item.uid)
else:
uid = '{u}'.format(u=item.uid)
if settings.PUBLISH_BODY_LEVELS:
standard = '{h} {lev} {u}'.format(h=heading, lev=level, u=uid)
else:
standard = '{h} {u}'.format(h=heading, u=uid)
<DeepExtract>
attr_list = ' {{#{u} }}'.format(u=item.uid) if True else ''
</DeepExtract>
yield (standard + attr_list)
if item.text:
yield ''
yield from item.text.splitlines()
if item.ref:
yield ''
yield _format_md_ref(item)
if item.references:
yield ''
yield _format_md_references(item)
if item.links:
yield ''
items2 = item.parent_items
if settings.PUBLISH_CHILD_LINKS:
label = 'Parent links:'
else:
label = 'Links:'
<DeepExtract>
links = []
for item in items2:
if to_html:
link = _format_html_item_link(item, linkify=linkify)
else:
link = _format_md_item_link(item, linkify=linkify)
links.append(link)
links = ', '.join(links)
</DeepExtract>
<DeepExtract>
if linkify:
label_links = '*{lb}* {ls}'.format(lb=label, ls=links)
else:
label_links = '*{lb} {ls}*'.format(lb=label, ls=links)
</DeepExtract>
yield label_links
if settings.PUBLISH_CHILD_LINKS:
items2 = item.find_child_items()
if items2:
yield ''
label = 'Child links:'
<DeepExtract>
links = []
for item in items2:
if to_html:
link = _format_html_item_link(item, linkify=linkify)
else:
link = _format_md_item_link(item, linkify=linkify)
links.append(link)
links = ', '.join(links)
</DeepExtract>
<DeepExtract>
if linkify:
label_links = '*{lb}* {ls}'.format(lb=label, ls=links)
else:
label_links = '*{lb} {ls}*'.format(lb=label, ls=links)
</DeepExtract>
yield label_links
if item.document and item.document.publish:
header_printed = False
for attr in item.document.publish:
if not item.attribute(attr):
continue
if not header_printed:
header_printed = True
yield ''
yield '| Attribute | Value |'
yield '| --------- | ----- |'
yield '| {} | {} |'.format(attr, item.attribute(attr))
yield ''
yield ''
|
def _lines_markdown(obj, **kwargs):
"""Yield lines for a Markdown report.
:param obj: Item, list of Items, or Document to publish
:param linkify: turn links into hyperlinks (for conversion to HTML)
:return: iterator of lines of text
"""
linkify = kwargs.get('linkify', False)
to_html = kwargs.get('to_html', False)
for item in iter_items(obj):
heading = '#' * item.depth
text = str(item.level)
if text.endswith('.0') and len(text) > 3:
text = text[:-2]
item.level = text
if item.heading:
text_lines = item.text.splitlines()
if item.header:
text_lines.insert(0, item.header)
if settings.PUBLISH_HEADING_LEVELS:
standard = '{h} {lev} {t}'.format(h=heading, lev=level, t=text_lines[0] if text_lines else '')
else:
standard = '{h} {t}'.format(h=heading, t=text_lines[0] if text_lines else '')
attr_list = ' {{#{u} }}'.format(u=item.uid) if True else ''
yield (standard + attr_list)
yield from text_lines[1:]
else:
uid = item.uid
if settings.ENABLE_HEADERS:
if item.header:
uid = '{h} <small>{u}</small>'.format(h=item.header, u=item.uid)
else:
uid = '{u}'.format(u=item.uid)
if settings.PUBLISH_BODY_LEVELS:
standard = '{h} {lev} {u}'.format(h=heading, lev=level, u=uid)
else:
standard = '{h} {u}'.format(h=heading, u=uid)
attr_list = ' {{#{u} }}'.format(u=item.uid) if True else ''
yield (standard + attr_list)
if item.text:
yield ''
yield from item.text.splitlines()
if item.ref:
yield ''
yield _format_md_ref(item)
if item.references:
yield ''
yield _format_md_references(item)
if item.links:
yield ''
items2 = item.parent_items
if settings.PUBLISH_CHILD_LINKS:
label = 'Parent links:'
else:
label = 'Links:'
links = []
for item in items2:
if to_html:
link = _format_html_item_link(item, linkify=linkify)
else:
link = _format_md_item_link(item, linkify=linkify)
links.append(link)
links = ', '.join(links)
if linkify:
label_links = '*{lb}* {ls}'.format(lb=label, ls=links)
else:
label_links = '*{lb} {ls}*'.format(lb=label, ls=links)
yield label_links
if settings.PUBLISH_CHILD_LINKS:
items2 = item.find_child_items()
if items2:
yield ''
label = 'Child links:'
links = []
for item in items2:
if to_html:
link = _format_html_item_link(item, linkify=linkify)
else:
link = _format_md_item_link(item, linkify=linkify)
links.append(link)
links = ', '.join(links)
if linkify:
label_links = '*{lb}* {ls}'.format(lb=label, ls=links)
else:
label_links = '*{lb} {ls}*'.format(lb=label, ls=links)
yield label_links
if item.document and item.document.publish:
header_printed = False
for attr in item.document.publish:
if not item.attribute(attr):
continue
if not header_printed:
header_printed = True
yield ''
yield '| Attribute | Value |'
yield '| --------- | ----- |'
yield '| {} | {} |'.format(attr, item.attribute(attr))
yield ''
yield ''
|
doorstop
|
positive
|
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
real_images = features['images']
if self.conditional:
if self._use_soft_labels:
assert labels.shape[1] == self._dataset.num_classes, 'Need soft labels of dimension {} but got dimension {}'.format(self._dataset.num_classes, labels.shape[1])
real_labels = labels
else:
real_labels = self._get_one_hot_labels(labels)
fake_labels = self._get_one_hot_labels(features['sampled_labels'])
if self._experimental_joint_gen_for_disc:
assert 'generated' in features
fake_images = features['generated']
else:
logging.warning('Computing fake images for every sub step separately.')
fake_images = self.generator(features['z'], y=fake_labels, is_training=is_training)
bs = real_images.shape[0].value
if self._self_supervision:
assert bs % self._rotated_batch_fraction == 0, ("Rotated batch fraction is invalid: %d doesn't divide %d" % self._rotated_batch_fraction, bs)
rotated_bs = bs // self._rotated_batch_fraction
num_rot_examples = rotated_bs // NUM_ROTATIONS
logging.info('bs=%s, rotated_bs=%s, num_rot_examples=%s', bs, rotated_bs, num_rot_examples)
assert num_rot_examples > 0
if self._self_supervision == 'rotation':
assert num_rot_examples <= bs, (num_rot_examples, bs)
<DeepExtract>
(real_to_rot, fake_to_rot) = (real_images[-num_rot_examples:], fake_images[-num_rot_examples:])
real_rotated = utils.rotate_images(real_to_rot, rot90_scalars=(1, 2, 3))
fake_rotated = utils.rotate_images(fake_to_rot, rot90_scalars=(1, 2, 3))
all_features = tf.concat([real_images, real_rotated, fake_images, fake_rotated], 0)
all_labels = None
if self.conditional:
real_rotated_labels = tf.tile(real_labels[-num_rot_examples:], [3, 1])
fake_rotated_labels = tf.tile(fake_labels[-num_rot_examples:], [3, 1])
all_labels = tf.concat([real_labels, real_rotated_labels, fake_labels, fake_rotated_labels], 0)
(all_features, all_labels) = (all_features, all_labels)
</DeepExtract>
else:
all_features = tf.concat([real_images, fake_images], 0)
all_labels = None
if self.conditional:
all_labels = tf.concat([real_labels, fake_labels], axis=0)
<DeepExtract>
(d_probs, d_logits, x_rep) = self.discriminator(all_features, y=all_labels, is_training=is_training)
use_sn = self.discriminator._spectral_norm
is_label_available = tf.cast(tf.cast(tf.reduce_sum(all_labels, axis=1, keepdims=True), tf.float32) > 0.5, tf.float32)
assert x_rep.shape.ndims == 2, x_rep.shape
rotation_logits = None
if 'rotation' in self._self_supervision:
with tf.variable_scope('discriminator_rotation', reuse=tf.AUTO_REUSE):
rotation_logits = ops.linear(x_rep, NUM_ROTATIONS, scope='score_classify', use_sn=use_sn)
logging.info('[Discriminator] rotation head %s -> %s', x_rep.shape, rotation_logits)
if not self._project_y:
(d_predictions, d_logits, rot_logits, aux_logits, is_label_available) = (d_probs, d_logits, rotation_logits, None, is_label_available)
aux_logits = None
if self._use_predictor:
with tf.variable_scope('discriminator_predictor', reuse=tf.AUTO_REUSE):
aux_logits = ops.linear(x_rep, all_labels.shape[1], use_bias=True, scope='predictor_linear', use_sn=use_sn)
if self._use_soft_pred:
y_predicted = tf.nn.softmax(aux_logits)
else:
y_predicted = tf.one_hot(tf.arg_max(aux_logits, 1), aux_logits.shape[1])
all_labels = (1.0 - is_label_available) * y_predicted + is_label_available * all_labels
all_labels = tf.stop_gradient(all_labels)
logging.info('[Discriminator] %s -> aux_logits=%s, y_predicted=%s', aux_logits.shape, aux_logits.shape, y_predicted.shape)
class_embedding = self.get_class_embedding(y=all_labels, embedding_dim=x_rep.shape[-1].value, use_sn=use_sn)
d_logits += tf.reduce_sum(class_embedding * x_rep, axis=1, keepdims=True)
d_probs = tf.nn.sigmoid(d_logits)
(d_predictions, d_logits, rot_logits, aux_logits, is_label_available) = (d_probs, d_logits, rotation_logits, aux_logits, is_label_available)
</DeepExtract>
expected_batch_size = 2 * bs
if self._self_supervision == 'rotation':
expected_batch_size += 2 * (NUM_ROTATIONS - 1) * num_rot_examples
if d_logits.shape[0].value != expected_batch_size:
raise ValueError('Batch size unexpected: got %r expected %r' % (d_logits.shape[0].value, expected_batch_size))
(prob_real, prob_fake) = tf.split(d_predictions, 2)
(prob_real, prob_fake) = (prob_real[:bs], prob_fake[:bs])
(logits_real, logits_fake) = tf.split(d_logits, 2)
(logits_real, logits_fake) = (logits_real[:bs], logits_fake[:bs])
(self.d_loss, _, _, self.g_loss) = loss_lib.get_losses(d_real=prob_real, d_fake=prob_fake, d_real_logits=logits_real, d_fake_logits=logits_fake)
if self._self_supervision == 'rotation':
(rot_real_logits, rot_fake_logits) = tf.split(rot_logits, 2)
rot_real_logits = rot_real_logits[-rotated_bs:]
rot_fake_logits = rot_fake_logits[-rotated_bs:]
labels_rotated = tf.constant(np.repeat(np.arange(NUM_ROTATIONS, dtype=np.int32), num_rot_examples))
rot_onehot = tf.one_hot(labels_rotated, NUM_ROTATIONS)
rot_real_logp = tf.log(tf.nn.softmax(rot_real_logits) + 1e-10)
rot_fake_logp = tf.log(tf.nn.softmax(rot_fake_logits) + 1e-10)
real_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_real_logp, 1))
fake_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_fake_logp, 1))
self.d_loss += real_loss * self._weight_rotation_loss_d
self.g_loss += fake_loss * self._weight_rotation_loss_g
rot_real_labels = tf.one_hot(tf.arg_max(rot_real_logits, 1), NUM_ROTATIONS)
rot_fake_labels = tf.one_hot(tf.arg_max(rot_fake_logits, 1), NUM_ROTATIONS)
accuracy_real = tf.metrics.accuracy(rot_onehot, rot_real_labels)
accuracy_fake = tf.metrics.accuracy(rot_onehot, rot_fake_labels)
self._tpu_summary.scalar('loss/real_loss', real_loss)
self._tpu_summary.scalar('loss/fake_loss', fake_loss)
self._tpu_summary.scalar('accuracy/real', accuracy_real)
self._tpu_summary.scalar('accuracy/fake', accuracy_fake)
if self._use_predictor:
(real_aux_logits, _) = tf.split(aux_logits, 2)
real_aux_logits = real_aux_logits[:bs]
(is_label_available, _) = tf.split(is_label_available, 2)
is_label_available = tf.squeeze(is_label_available[:bs])
class_loss_real = tf.losses.softmax_cross_entropy(real_labels, real_aux_logits, weights=is_label_available)
self.d_loss += self._weight_class_loss * class_loss_real
self._tpu_summary.scalar('loss/class_loss_real', class_loss_real)
self._tpu_summary.scalar('label_frac', tf.reduce_mean(is_label_available))
|
def create_loss(self, features, labels, params, is_training=True):
"""Build the loss tensors for discriminator and generator.
This method will set self.d_loss and self.g_loss.
Args:
features: Optional dictionary with inputs to the model ("images" should
contain the real images and "z" the noise for the generator).
labels: Tensor will labels. These are class indices. Use
self._get_one_hot_labels(labels) to get a one hot encoded tensor.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
is_training: If True build the model in training mode. If False build the
model for inference mode (e.g. use trained averages for batch norm).
Raises:
ValueError: If set of meta/hyper parameters is not supported.
"""
real_images = features['images']
if self.conditional:
if self._use_soft_labels:
assert labels.shape[1] == self._dataset.num_classes, 'Need soft labels of dimension {} but got dimension {}'.format(self._dataset.num_classes, labels.shape[1])
real_labels = labels
else:
real_labels = self._get_one_hot_labels(labels)
fake_labels = self._get_one_hot_labels(features['sampled_labels'])
if self._experimental_joint_gen_for_disc:
assert 'generated' in features
fake_images = features['generated']
else:
logging.warning('Computing fake images for every sub step separately.')
fake_images = self.generator(features['z'], y=fake_labels, is_training=is_training)
bs = real_images.shape[0].value
if self._self_supervision:
assert bs % self._rotated_batch_fraction == 0, ("Rotated batch fraction is invalid: %d doesn't divide %d" % self._rotated_batch_fraction, bs)
rotated_bs = bs // self._rotated_batch_fraction
num_rot_examples = rotated_bs // NUM_ROTATIONS
logging.info('bs=%s, rotated_bs=%s, num_rot_examples=%s', bs, rotated_bs, num_rot_examples)
assert num_rot_examples > 0
if self._self_supervision == 'rotation':
assert num_rot_examples <= bs, (num_rot_examples, bs)
(real_to_rot, fake_to_rot) = (real_images[-num_rot_examples:], fake_images[-num_rot_examples:])
real_rotated = utils.rotate_images(real_to_rot, rot90_scalars=(1, 2, 3))
fake_rotated = utils.rotate_images(fake_to_rot, rot90_scalars=(1, 2, 3))
all_features = tf.concat([real_images, real_rotated, fake_images, fake_rotated], 0)
all_labels = None
if self.conditional:
real_rotated_labels = tf.tile(real_labels[-num_rot_examples:], [3, 1])
fake_rotated_labels = tf.tile(fake_labels[-num_rot_examples:], [3, 1])
all_labels = tf.concat([real_labels, real_rotated_labels, fake_labels, fake_rotated_labels], 0)
(all_features, all_labels) = (all_features, all_labels)
else:
all_features = tf.concat([real_images, fake_images], 0)
all_labels = None
if self.conditional:
all_labels = tf.concat([real_labels, fake_labels], axis=0)
(d_probs, d_logits, x_rep) = self.discriminator(all_features, y=all_labels, is_training=is_training)
use_sn = self.discriminator._spectral_norm
is_label_available = tf.cast(tf.cast(tf.reduce_sum(all_labels, axis=1, keepdims=True), tf.float32) > 0.5, tf.float32)
assert x_rep.shape.ndims == 2, x_rep.shape
rotation_logits = None
if 'rotation' in self._self_supervision:
with tf.variable_scope('discriminator_rotation', reuse=tf.AUTO_REUSE):
rotation_logits = ops.linear(x_rep, NUM_ROTATIONS, scope='score_classify', use_sn=use_sn)
logging.info('[Discriminator] rotation head %s -> %s', x_rep.shape, rotation_logits)
if not self._project_y:
(d_predictions, d_logits, rot_logits, aux_logits, is_label_available) = (d_probs, d_logits, rotation_logits, None, is_label_available)
aux_logits = None
if self._use_predictor:
with tf.variable_scope('discriminator_predictor', reuse=tf.AUTO_REUSE):
aux_logits = ops.linear(x_rep, all_labels.shape[1], use_bias=True, scope='predictor_linear', use_sn=use_sn)
if self._use_soft_pred:
y_predicted = tf.nn.softmax(aux_logits)
else:
y_predicted = tf.one_hot(tf.arg_max(aux_logits, 1), aux_logits.shape[1])
all_labels = (1.0 - is_label_available) * y_predicted + is_label_available * all_labels
all_labels = tf.stop_gradient(all_labels)
logging.info('[Discriminator] %s -> aux_logits=%s, y_predicted=%s', aux_logits.shape, aux_logits.shape, y_predicted.shape)
class_embedding = self.get_class_embedding(y=all_labels, embedding_dim=x_rep.shape[-1].value, use_sn=use_sn)
d_logits += tf.reduce_sum(class_embedding * x_rep, axis=1, keepdims=True)
d_probs = tf.nn.sigmoid(d_logits)
(d_predictions, d_logits, rot_logits, aux_logits, is_label_available) = (d_probs, d_logits, rotation_logits, aux_logits, is_label_available)
expected_batch_size = 2 * bs
if self._self_supervision == 'rotation':
expected_batch_size += 2 * (NUM_ROTATIONS - 1) * num_rot_examples
if d_logits.shape[0].value != expected_batch_size:
raise ValueError('Batch size unexpected: got %r expected %r' % (d_logits.shape[0].value, expected_batch_size))
(prob_real, prob_fake) = tf.split(d_predictions, 2)
(prob_real, prob_fake) = (prob_real[:bs], prob_fake[:bs])
(logits_real, logits_fake) = tf.split(d_logits, 2)
(logits_real, logits_fake) = (logits_real[:bs], logits_fake[:bs])
(self.d_loss, _, _, self.g_loss) = loss_lib.get_losses(d_real=prob_real, d_fake=prob_fake, d_real_logits=logits_real, d_fake_logits=logits_fake)
if self._self_supervision == 'rotation':
(rot_real_logits, rot_fake_logits) = tf.split(rot_logits, 2)
rot_real_logits = rot_real_logits[-rotated_bs:]
rot_fake_logits = rot_fake_logits[-rotated_bs:]
labels_rotated = tf.constant(np.repeat(np.arange(NUM_ROTATIONS, dtype=np.int32), num_rot_examples))
rot_onehot = tf.one_hot(labels_rotated, NUM_ROTATIONS)
rot_real_logp = tf.log(tf.nn.softmax(rot_real_logits) + 1e-10)
rot_fake_logp = tf.log(tf.nn.softmax(rot_fake_logits) + 1e-10)
real_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_real_logp, 1))
fake_loss = -tf.reduce_mean(tf.reduce_sum(rot_onehot * rot_fake_logp, 1))
self.d_loss += real_loss * self._weight_rotation_loss_d
self.g_loss += fake_loss * self._weight_rotation_loss_g
rot_real_labels = tf.one_hot(tf.arg_max(rot_real_logits, 1), NUM_ROTATIONS)
rot_fake_labels = tf.one_hot(tf.arg_max(rot_fake_logits, 1), NUM_ROTATIONS)
accuracy_real = tf.metrics.accuracy(rot_onehot, rot_real_labels)
accuracy_fake = tf.metrics.accuracy(rot_onehot, rot_fake_labels)
self._tpu_summary.scalar('loss/real_loss', real_loss)
self._tpu_summary.scalar('loss/fake_loss', fake_loss)
self._tpu_summary.scalar('accuracy/real', accuracy_real)
self._tpu_summary.scalar('accuracy/fake', accuracy_fake)
if self._use_predictor:
(real_aux_logits, _) = tf.split(aux_logits, 2)
real_aux_logits = real_aux_logits[:bs]
(is_label_available, _) = tf.split(is_label_available, 2)
is_label_available = tf.squeeze(is_label_available[:bs])
class_loss_real = tf.losses.softmax_cross_entropy(real_labels, real_aux_logits, weights=is_label_available)
self.d_loss += self._weight_class_loss * class_loss_real
self._tpu_summary.scalar('loss/class_loss_real', class_loss_real)
self._tpu_summary.scalar('label_frac', tf.reduce_mean(is_label_available))
|
compare_gan
|
positive
|
def get_symbol_rfcn(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = 2 if cfg.CLASS_AGNOSTIC else num_classes
if is_train:
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_weight = mx.symbol.Variable(name='bbox_weight')
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
label = mx.symbol.Reshape(data=label, shape=(-1,), name='label_reshape')
bbox_target = mx.symbol.Reshape(data=bbox_target, shape=(-1, 4 * num_reg_classes), name='bbox_target_reshape')
bbox_weight = mx.symbol.Reshape(data=bbox_weight, shape=(-1, 4 * num_reg_classes), name='bbox_weight_reshape')
else:
data = mx.sym.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
<DeepExtract>
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
conv_feat = res4b22_relu
</DeepExtract>
<DeepExtract>
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
relu1 = res5c_relu
</DeepExtract>
conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name='conv_new_1', lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='relu1')
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7 * 7 * num_classes, name='rfcn_cls')
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7 * 7 * 4 * num_reg_classes, name='rfcn_bbox')
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7, output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7, output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1, grad_scale=1.0)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target)
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid', grad_scale=1.0)
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target)
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([cls_prob, bbox_loss, mx.sym.BlockGrad(label)]) if cfg.TRAIN.ENABLE_OHEM else mx.sym.Group([cls_prob, bbox_loss])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([cls_prob, bbox_pred])
self.sym = group
return group
|
def get_symbol_rfcn(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = 2 if cfg.CLASS_AGNOSTIC else num_classes
if is_train:
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
label = mx.symbol.Variable(name='label')
bbox_target = mx.symbol.Variable(name='bbox_target')
bbox_weight = mx.symbol.Variable(name='bbox_weight')
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
label = mx.symbol.Reshape(data=label, shape=(-1,), name='label_reshape')
bbox_target = mx.symbol.Reshape(data=bbox_target, shape=(-1, 4 * num_reg_classes), name='bbox_target_reshape')
bbox_weight = mx.symbol.Reshape(data=bbox_weight, shape=(-1, 4 * num_reg_classes), name='bbox_weight_reshape')
else:
data = mx.sym.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
rois = mx.symbol.Reshape(data=rois, shape=(-1, 5), name='rois_reshape')
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
conv_feat = res4b22_relu
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch1 = bn5a_branch1
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True, cudnn_off=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
relu1 = res5c_relu
conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name='conv_new_1', lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='relu1')
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7 * 7 * num_classes, name='rfcn_cls')
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7 * 7 * 4 * num_reg_classes, name='rfcn_bbox')
psroipooled_cls_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_cls_rois', data=rfcn_cls, rois=rois, group_size=7, pooled_size=7, output_dim=num_classes, spatial_scale=0.0625)
psroipooled_loc_rois = mx.contrib.sym.PSROIPooling(name='psroipooled_loc_rois', data=rfcn_bbox, rois=rois, group_size=7, pooled_size=7, output_dim=8, spatial_scale=0.0625)
cls_score = mx.sym.Pooling(name='ave_cls_scors_rois', data=psroipooled_cls_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
bbox_pred = mx.sym.Pooling(name='ave_bbox_pred_rois', data=psroipooled_loc_rois, pool_type='avg', global_pool=True, kernel=(7, 7))
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1, grad_scale=1.0)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target)
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid', grad_scale=1.0)
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=bbox_pred - bbox_target)
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([cls_prob, bbox_loss, mx.sym.BlockGrad(label)]) if cfg.TRAIN.ENABLE_OHEM else mx.sym.Group([cls_prob, bbox_loss])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([cls_prob, bbox_pred])
self.sym = group
return group
|
Deformable-ConvNets
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.