before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def post_processing(output, label_list, threshold):
num_classes = 1
num_grids = 7
num_anchor_boxes = 5
original_results = output.astype(np.float32)
original_results = np.reshape(original_results, (num_anchor_boxes, 5 + num_classes, num_grids, num_grids))
reordered_results = np.transpose(original_results, (2, 3, 0, 1))
reordered_results = np.reshape(reordered_results, (num_grids * num_grids, num_anchor_boxes, 5 + num_classes))
index = 0
anchor_boxes = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
boxes = list()
for row in range(num_grids):
for col in range(num_grids):
for anchor_box_num in range(num_anchor_boxes):
box = list()
class_list = list()
current_score_total = 0
box_x = (col + sigmoid(reordered_results[row * num_grids + col][anchor_box_num][0])) / 7.0
box_y = (row + sigmoid(reordered_results[row * num_grids + col][anchor_box_num][1])) / 7.0
box_w = np.exp(reordered_results[row * num_grids + col][anchor_box_num][2]) * anchor_boxes[2 * anchor_box_num] / 7.0
box_h = np.exp(reordered_results[row * num_grids + col][anchor_box_num][3]) * anchor_boxes[2 * anchor_box_num + 1] / 7.0
for class_enum in range(num_classes):
class_list.append(reordered_results[row * num_grids + col][anchor_box_num][5 + class_enum])
current_score_total = sum(class_list)
for current_class in range(len(class_list)):
class_list[current_class] = class_list[current_class] * 1.0 / current_score_total
<DeepExtract>
object_confidence = 1.0 / (1 + np.exp(reordered_results[row * num_grids + col][anchor_box_num][4] * -1.0))
</DeepExtract>
highest_class_score = max(class_list)
class_w_highest_score = class_list.index(max(class_list)) + 1
final_object_score = object_confidence * highest_class_score
box.append(box_x)
box.append(box_y)
box.append(box_w)
box.append(box_h)
box.append(class_w_highest_score)
box.append(object_confidence)
box.append(highest_class_score)
box.append(final_object_score)
if final_object_score > threshold:
boxes.append(box)
<DeepExtract>
sorted_boxes = sorted(boxes, key=lambda d: d[7])[::-1]
high_iou_objs = dict()
for current_object in range(len(sorted_boxes)):
if current_object in high_iou_objs:
continue
truth = sorted_boxes[current_object]
for next_object in range(current_object + 1, len(sorted_boxes)):
if next_object in high_iou_objs:
continue
box = sorted_boxes[next_object]
iou = calculate_iou(box, truth)
if iou >= IOU_THRESHOLD:
high_iou_objs[next_object] = 1
filtered_result = list()
for current_object in range(len(sorted_boxes)):
if current_object not in high_iou_objs:
filtered_result.append(sorted_boxes[current_object])
results = filtered_result
</DeepExtract>
return results
|
def post_processing(output, label_list, threshold):
num_classes = 1
num_grids = 7
num_anchor_boxes = 5
original_results = output.astype(np.float32)
original_results = np.reshape(original_results, (num_anchor_boxes, 5 + num_classes, num_grids, num_grids))
reordered_results = np.transpose(original_results, (2, 3, 0, 1))
reordered_results = np.reshape(reordered_results, (num_grids * num_grids, num_anchor_boxes, 5 + num_classes))
index = 0
anchor_boxes = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
boxes = list()
for row in range(num_grids):
for col in range(num_grids):
for anchor_box_num in range(num_anchor_boxes):
box = list()
class_list = list()
current_score_total = 0
box_x = (col + sigmoid(reordered_results[row * num_grids + col][anchor_box_num][0])) / 7.0
box_y = (row + sigmoid(reordered_results[row * num_grids + col][anchor_box_num][1])) / 7.0
box_w = np.exp(reordered_results[row * num_grids + col][anchor_box_num][2]) * anchor_boxes[2 * anchor_box_num] / 7.0
box_h = np.exp(reordered_results[row * num_grids + col][anchor_box_num][3]) * anchor_boxes[2 * anchor_box_num + 1] / 7.0
for class_enum in range(num_classes):
class_list.append(reordered_results[row * num_grids + col][anchor_box_num][5 + class_enum])
current_score_total = sum(class_list)
for current_class in range(len(class_list)):
class_list[current_class] = class_list[current_class] * 1.0 / current_score_total
object_confidence = 1.0 / (1 + np.exp(reordered_results[row * num_grids + col][anchor_box_num][4] * -1.0))
highest_class_score = max(class_list)
class_w_highest_score = class_list.index(max(class_list)) + 1
final_object_score = object_confidence * highest_class_score
box.append(box_x)
box.append(box_y)
box.append(box_w)
box.append(box_h)
box.append(class_w_highest_score)
box.append(object_confidence)
box.append(highest_class_score)
box.append(final_object_score)
if final_object_score > threshold:
boxes.append(box)
sorted_boxes = sorted(boxes, key=lambda d: d[7])[::-1]
high_iou_objs = dict()
for current_object in range(len(sorted_boxes)):
if current_object in high_iou_objs:
continue
truth = sorted_boxes[current_object]
for next_object in range(current_object + 1, len(sorted_boxes)):
if next_object in high_iou_objs:
continue
box = sorted_boxes[next_object]
iou = calculate_iou(box, truth)
if iou >= IOU_THRESHOLD:
high_iou_objs[next_object] = 1
filtered_result = list()
for current_object in range(len(sorted_boxes)):
if current_object not in high_iou_objs:
filtered_result.append(sorted_boxes[current_object])
results = filtered_result
return results
|
aXeleRate
|
positive
|
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
<DeepExtract>
if self._do_indent:
self._write('\n' + ' ' * self._indent + text)
else:
self._write(text)
</DeepExtract>
<DeepExtract>
if isinstance(t.expr, list):
for t in t.expr:
self._dispatch(t)
return
meth = getattr(self, '_' + t.expr.__class__.__name__)
if t.expr.__class__.__name__ == 'NoneType' and (not self._do_indent):
return
meth(t.expr)
</DeepExtract>
|
def _Discard(self, t):
""" Node for when return value is ignored such as in "foo(a)".
"""
if self._do_indent:
self._write('\n' + ' ' * self._indent + text)
else:
self._write(text)
if isinstance(t.expr, list):
for t in t.expr:
self._dispatch(t)
return
meth = getattr(self, '_' + t.expr.__class__.__name__)
if t.expr.__class__.__name__ == 'NoneType' and (not self._do_indent):
return
meth(t.expr)
</DeepExtract>
|
biom-format
|
positive
|
def on_LoadInferenceVxNetPressed(self):
<DeepExtract>
self.inference_ctx = TorchInferenceContext()
vconfig_path = Path(self.w_vconfig_path.text())
self.inference_ctx.build(vconfig_path)
self.json_setting.set('latest_vxnet_cfg_path', str(vconfig_path))
self.info('Build VoxelNet ckpt succeed.')
</DeepExtract>
<DeepExtract>
ckpt_path = Path(self.w_vckpt_path.text())
self.json_setting.set('latest_vxnet_ckpt_path', self.w_vckpt_path.text())
self.inference_ctx.restore(ckpt_path)
self.info('load VoxelNet ckpt succeed.')
</DeepExtract>
<DeepExtract>
t = time.time()
inputs = self.inference_ctx.get_inference_input_dict(self.kitti_info, self.points)
self.info('input preparation time:', time.time() - t)
t = time.time()
with self.inference_ctx.ctx():
det_annos = self.inference_ctx.inference(inputs)
self.info('detection time:', time.time() - t)
self.draw_detection(det_annos[0])
</DeepExtract>
|
def on_LoadInferenceVxNetPressed(self):
self.inference_ctx = TorchInferenceContext()
vconfig_path = Path(self.w_vconfig_path.text())
self.inference_ctx.build(vconfig_path)
self.json_setting.set('latest_vxnet_cfg_path', str(vconfig_path))
self.info('Build VoxelNet ckpt succeed.')
ckpt_path = Path(self.w_vckpt_path.text())
self.json_setting.set('latest_vxnet_ckpt_path', self.w_vckpt_path.text())
self.inference_ctx.restore(ckpt_path)
self.info('load VoxelNet ckpt succeed.')
t = time.time()
inputs = self.inference_ctx.get_inference_input_dict(self.kitti_info, self.points)
self.info('input preparation time:', time.time() - t)
t = time.time()
with self.inference_ctx.ctx():
det_annos = self.inference_ctx.inference(inputs)
self.info('detection time:', time.time() - t)
self.draw_detection(det_annos[0])
</DeepExtract>
|
CLOCs
|
positive
|
def _create_network(incoming, reuse=None, weight_decay=1e-08):
nonlinearity = tf.nn.elu
conv_weight_init = tf.truncated_normal_initializer(stddev=0.001)
conv_bias_init = tf.zeros_initializer()
conv_regularizer = slim.l2_regularizer(weight_decay)
fc_weight_init = tf.truncated_normal_initializer(stddev=0.001)
fc_bias_init = tf.zeros_initializer()
fc_regularizer = slim.l2_regularizer(weight_decay)
def batch_norm_fn(x):
return slim.batch_norm(x, scope=tf.get_variable_scope().name + '/bn')
network = incoming
network = slim.conv2d(network, 32, [3, 3], stride=1, activation_fn=nonlinearity, padding='SAME', normalizer_fn=batch_norm_fn, scope='conv1_1', weights_initializer=conv_weight_init, biases_initializer=conv_bias_init, weights_regularizer=conv_regularizer)
network = slim.conv2d(network, 32, [3, 3], stride=1, activation_fn=nonlinearity, padding='SAME', normalizer_fn=batch_norm_fn, scope='conv1_2', weights_initializer=conv_weight_init, biases_initializer=conv_bias_init, weights_regularizer=conv_regularizer)
network = slim.max_pool2d(network, [3, 3], [2, 2], scope='pool1')
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv2_1', nonlinearity, conv_weight_init, conv_regularizer, True, summarize_activations)
</DeepExtract>
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv2_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
</DeepExtract>
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, True, summarize_activations)
network = create_link(network, network_builder, 'conv3_1', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
</DeepExtract>
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv3_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
</DeepExtract>
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, True, summarize_activations)
network = create_link(network, network_builder, 'conv4_1', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
</DeepExtract>
<DeepExtract>
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv4_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
</DeepExtract>
feature_dim = network.get_shape().as_list()[-1]
network = slim.flatten(network)
network = slim.dropout(network, keep_prob=0.6)
network = slim.fully_connected(network, feature_dim, activation_fn=nonlinearity, normalizer_fn=batch_norm_fn, weights_regularizer=fc_regularizer, scope='fc1', weights_initializer=fc_weight_init, biases_initializer=fc_bias_init)
features = network
features = slim.batch_norm(features, scope='ball', reuse=reuse)
feature_norm = tf.sqrt(tf.constant(1e-08, tf.float32) + tf.reduce_sum(tf.square(features), [1], keepdims=True))
features = features / feature_norm
return (features, None)
|
def _create_network(incoming, reuse=None, weight_decay=1e-08):
nonlinearity = tf.nn.elu
conv_weight_init = tf.truncated_normal_initializer(stddev=0.001)
conv_bias_init = tf.zeros_initializer()
conv_regularizer = slim.l2_regularizer(weight_decay)
fc_weight_init = tf.truncated_normal_initializer(stddev=0.001)
fc_bias_init = tf.zeros_initializer()
fc_regularizer = slim.l2_regularizer(weight_decay)
def batch_norm_fn(x):
return slim.batch_norm(x, scope=tf.get_variable_scope().name + '/bn')
network = incoming
network = slim.conv2d(network, 32, [3, 3], stride=1, activation_fn=nonlinearity, padding='SAME', normalizer_fn=batch_norm_fn, scope='conv1_1', weights_initializer=conv_weight_init, biases_initializer=conv_bias_init, weights_regularizer=conv_regularizer)
network = slim.conv2d(network, 32, [3, 3], stride=1, activation_fn=nonlinearity, padding='SAME', normalizer_fn=batch_norm_fn, scope='conv1_2', weights_initializer=conv_weight_init, biases_initializer=conv_bias_init, weights_regularizer=conv_regularizer)
network = slim.max_pool2d(network, [3, 3], [2, 2], scope='pool1')
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv2_1', nonlinearity, conv_weight_init, conv_regularizer, True, summarize_activations)
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv2_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, True, summarize_activations)
network = create_link(network, network_builder, 'conv3_1', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv3_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, True, summarize_activations)
network = create_link(network, network_builder, 'conv4_1', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
def network_builder(x, s):
network = create_inner_block(x, s, nonlinearity, conv_weight_init, conv_bias_init, conv_regularizer, False, summarize_activations)
network = create_link(network, network_builder, 'conv4_3', nonlinearity, conv_weight_init, conv_regularizer, is_first, summarize_activations)
feature_dim = network.get_shape().as_list()[-1]
network = slim.flatten(network)
network = slim.dropout(network, keep_prob=0.6)
network = slim.fully_connected(network, feature_dim, activation_fn=nonlinearity, normalizer_fn=batch_norm_fn, weights_regularizer=fc_regularizer, scope='fc1', weights_initializer=fc_weight_init, biases_initializer=fc_bias_init)
features = network
features = slim.batch_norm(features, scope='ball', reuse=reuse)
feature_norm = tf.sqrt(tf.constant(1e-08, tf.float32) + tf.reduce_sum(tf.square(features), [1], keepdims=True))
features = features / feature_norm
return (features, None)
|
Deep-SORT-YOLOv4
|
positive
|
@requires_auth
def update_user(self, name=None, email=None, blog=None, company=None, location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
<DeepExtract>
if login:
url = self._build_url('users', login)
else:
url = self._build_url('user')
json = self._json(self._get(url), 200)
user = User(json, self._session) if json else None
</DeepExtract>
return user.update(name, email, blog, company, location, hireable, bio)
|
@requires_auth
def update_user(self, name=None, email=None, blog=None, company=None, location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
if login:
url = self._build_url('users', login)
else:
url = self._build_url('user')
json = self._json(self._get(url), 200)
user = User(json, self._session) if json else None
return user.update(name, email, blog, company, location, hireable, bio)
|
aegea
|
positive
|
def real_sph_harm(L, spherical_coordinates, zero_m_only=True):
"""
Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).
Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.
Parameters
----------
L: int
Degree up to which to calculate the spherical harmonics (degree L is excluded).
spherical_coordinates: bool
- True: Expects the input of the formula strings to be phi and theta.
- False: Expects the input of the formula strings to be x, y and z.
zero_m_only: bool
If True only calculate the harmonics where m=0.
Returns
-------
Y_lm_real: list
Computes formula strings of the the real part of the spherical harmonics up
to degree L (where degree L is not excluded).
In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then
the total count is reduced to be only L many.
"""
z = sym.symbols('z')
<DeepExtract>
z = sym.symbols('z')
P_l_m = [[0] * (2 * l + 1) for l in range(L)]
P_l_m[0][0] = 1
if L > 0:
if zero_m_only:
P_l_m[1][0] = z
for l in range(2, L):
P_l_m[l][0] = sym.simplify(((2 * l - 1) * z * P_l_m[l - 1][0] - (l - 1) * P_l_m[l - 2][0]) / l)
P_l_m = P_l_m
else:
for l in range(1, L):
P_l_m[l][l] = sym.simplify((1 - 2 * l) * (1 - z ** 2) ** 0.5 * P_l_m[l - 1][l - 1])
for m in range(0, L - 1):
P_l_m[m + 1][m] = sym.simplify((2 * m + 1) * z * P_l_m[m][m])
for l in range(2, L):
for m in range(l - 1):
P_l_m[l][m] = sym.simplify(((2 * l - 1) * z * P_l_m[l - 1][m] - (l + m - 1) * P_l_m[l - 2][m]) / (l - m))
if not pos_m_only:
for l in range(1, L):
for m in range(1, l + 1):
P_l_m[l][-m] = sym.simplify((-1) ** m * np.math.factorial(l - m) / np.math.factorial(l + m) * P_l_m[l][m])
P_l_m = P_l_m
</DeepExtract>
if zero_m_only:
Y_l_m = [[0] for l in range(L)]
else:
Y_l_m = [[0] * (2 * l + 1) for l in range(L)]
if spherical_coordinates:
theta = sym.symbols('theta')
for l in range(L):
for m in range(len(P_l_m[l])):
if not isinstance(P_l_m[l][m], int):
P_l_m[l][m] = P_l_m[l][m].subs(z, sym.cos(theta))
for l in range(L):
Y_l_m[l][0] = sym.simplify(sph_harm_prefactor(l, 0) * P_l_m[l][0])
if not zero_m_only:
phi = sym.symbols('phi')
for l in range(1, L):
for m in range(1, l + 1):
Y_l_m[l][m] = sym.simplify(2 ** 0.5 * (-1) ** m * sph_harm_prefactor(l, m) * P_l_m[l][m] * sym.cos(m * phi))
for m in range(1, l + 1):
Y_l_m[l][-m] = sym.simplify(2 ** 0.5 * (-1) ** m * sph_harm_prefactor(l, -m) * P_l_m[l][m] * sym.sin(m * phi))
if not spherical_coordinates:
x = sym.symbols('x')
y = sym.symbols('y')
for l in range(L):
for m in range(len(Y_l_m[l])):
Y_l_m[l][m] = sym.simplify(Y_l_m[l][m].subs(phi, sym.atan2(y, x)))
return Y_l_m
|
def real_sph_harm(L, spherical_coordinates, zero_m_only=True):
"""
Computes formula strings of the the real part of the spherical harmonics up to degree L (excluded).
Variables are either spherical coordinates phi and theta (or cartesian coordinates x,y,z) on the UNIT SPHERE.
Parameters
----------
L: int
Degree up to which to calculate the spherical harmonics (degree L is excluded).
spherical_coordinates: bool
- True: Expects the input of the formula strings to be phi and theta.
- False: Expects the input of the formula strings to be x, y and z.
zero_m_only: bool
If True only calculate the harmonics where m=0.
Returns
-------
Y_lm_real: list
Computes formula strings of the the real part of the spherical harmonics up
to degree L (where degree L is not excluded).
In total L^2 many sph harm exist up to degree L (excluded). However, if zero_m_only only is True then
the total count is reduced to be only L many.
"""
z = sym.symbols('z')
z = sym.symbols('z')
P_l_m = [[0] * (2 * l + 1) for l in range(L)]
P_l_m[0][0] = 1
if L > 0:
if zero_m_only:
P_l_m[1][0] = z
for l in range(2, L):
P_l_m[l][0] = sym.simplify(((2 * l - 1) * z * P_l_m[l - 1][0] - (l - 1) * P_l_m[l - 2][0]) / l)
P_l_m = P_l_m
else:
for l in range(1, L):
P_l_m[l][l] = sym.simplify((1 - 2 * l) * (1 - z ** 2) ** 0.5 * P_l_m[l - 1][l - 1])
for m in range(0, L - 1):
P_l_m[m + 1][m] = sym.simplify((2 * m + 1) * z * P_l_m[m][m])
for l in range(2, L):
for m in range(l - 1):
P_l_m[l][m] = sym.simplify(((2 * l - 1) * z * P_l_m[l - 1][m] - (l + m - 1) * P_l_m[l - 2][m]) / (l - m))
if not pos_m_only:
for l in range(1, L):
for m in range(1, l + 1):
P_l_m[l][-m] = sym.simplify((-1) ** m * np.math.factorial(l - m) / np.math.factorial(l + m) * P_l_m[l][m])
P_l_m = P_l_m
if zero_m_only:
Y_l_m = [[0] for l in range(L)]
else:
Y_l_m = [[0] * (2 * l + 1) for l in range(L)]
if spherical_coordinates:
theta = sym.symbols('theta')
for l in range(L):
for m in range(len(P_l_m[l])):
if not isinstance(P_l_m[l][m], int):
P_l_m[l][m] = P_l_m[l][m].subs(z, sym.cos(theta))
for l in range(L):
Y_l_m[l][0] = sym.simplify(sph_harm_prefactor(l, 0) * P_l_m[l][0])
if not zero_m_only:
phi = sym.symbols('phi')
for l in range(1, L):
for m in range(1, l + 1):
Y_l_m[l][m] = sym.simplify(2 ** 0.5 * (-1) ** m * sph_harm_prefactor(l, m) * P_l_m[l][m] * sym.cos(m * phi))
for m in range(1, l + 1):
Y_l_m[l][-m] = sym.simplify(2 ** 0.5 * (-1) ** m * sph_harm_prefactor(l, -m) * P_l_m[l][m] * sym.sin(m * phi))
if not spherical_coordinates:
x = sym.symbols('x')
y = sym.symbols('y')
for l in range(L):
for m in range(len(Y_l_m[l])):
Y_l_m[l][m] = sym.simplify(Y_l_m[l][m].subs(phi, sym.atan2(y, x)))
return Y_l_m
|
DIG
|
positive
|
def on_button_press(self, widget, event):
<DeepExtract>
pos = Pos(event.x, event.y)
pos.snap_to_grid()
pos = pos
</DeepExtract>
pub.sendMessage('POINTER_MOVED', pos=pos.grid_cr())
if not Preferences.values['SELECTION_DRAG'] and self._selection.item in (DRAW_RECT, ARROW, ERASER, RECT, LINE, MAG_LINE, DIR_LINE):
if self._selection.state == IDLE:
<DeepExtract>
if self._selection.state == IDLE and self._selection.item in (DRAW_RECT, ARROW, RECT, ERASER, LINE, MAG_LINE, DIR_LINE):
pass
else:
return
pos = self.calc_position(event.x, event.y)
self._drag_dir = None
self._drag_startpos = pos
self._drag_currentpos = pos
self._drag_prevpos = []
self._drag_prevpos.append(pos)
self._selection.state = SELECTING
</DeepExtract>
elif self._selection.state == SELECTING:
offset = Pos(event.x, event.y) - self._drag_startpos
<DeepExtract>
if self._selection.state == SELECTING and self._selection.item in (DRAW_RECT, ARROW, RECT, ERASER, LINE, MAG_LINE, DIR_LINE):
pass
else:
return
offset = self.calc_position(offset.x, offset.y)
self._drag_endpos = self._drag_startpos + offset
startpos = self._drag_startpos.grid_cr()
endpos = self._drag_endpos.grid_cr()
self._selection.state = SELECTED
if self._selection.item == DRAW_RECT:
pub.sendMessage('PASTE_RECT', startpos=startpos, endpos=endpos)
elif self._selection.item == ARROW:
pub.sendMessage('PASTE_ARROW', startpos=startpos, endpos=endpos)
elif self._selection.item == RECT:
pub.sendMessage('SELECTION_CHANGED', selected=True)
elif self._selection.item == ERASER:
size = offset.grid_cr().xy
pub.sendMessage('ERASE', startpos=startpos, size=size)
elif self._selection.item == LINE:
if self._drag_dir == HORIZONTAL:
endpos.y = startpos.y
elif self._drag_dir == VERTICAL:
endpos.x = startpos.x
pub.sendMessage('PASTE_LINE', startpos=startpos, endpos=endpos, type=self._symbol.type)
elif self._selection.item == MAG_LINE:
pub.sendMessage('PASTE_MAG_LINE', startpos=startpos, endpos=endpos)
elif self._selection.item == DIR_LINE:
pub.sendMessage('PASTE_DIR_LINE', startpos=startpos, endpos=endpos)
</DeepExtract>
elif self._selection.state == SELECTING:
<DeepExtract>
if self._selection.item == ROW:
row = pos.grid_cr().y
pub.sendMessage('GRID_ROW', row=row, action=self._selection.action)
elif self._selection.item == COL:
col = pos.grid_cr().x
pub.sendMessage('GRID_COL', col=col, action=self._selection.action)
elif self._selection.item in (TEXT, TEXT_BLOCK):
button = event.button
if button == 1:
self._selection.state = SELECTED
self._symbol.startpos = pos.grid_cr()
pub.sendMessage('PASTE_TEXT', symbol=self._symbol)
elif button == 3:
self._symbol.rotate()
pub.sendMessage('ORIENTATION_CHANGED', ori=self._symbol.ori_as_str)
elif self._selection.item == OBJECT:
self._selection.state = SELECTED
ul = pos
br = ul + Pos(Preferences.values['GRIDSIZE_W'], Preferences.values['GRIDSIZE_H'])
self._drag_startpos = ul
self._drag_endpos = br
self._selection.startpos = ul
self._selection.endpos = br
self._selection.maxpos = self.max_pos_grid
pub.sendMessage('SELECTION_CHANGED', selected=True)
</DeepExtract>
elif self._selection.state == SELECTED:
<DeepExtract>
pos = self._hover_pos
pos = pos.grid_cr()
if self._selection.item in (CHARACTER, COMPONENT, OBJECTS):
button = event.button
if button == 1:
pub.sendMessage('PASTE_OBJECTS', pos=pos)
elif button == 3:
pub.sendMessage('ROTATE_SYMBOL')
</DeepExtract>
widget.queue_resize()
|
def on_button_press(self, widget, event):
pos = Pos(event.x, event.y)
pos.snap_to_grid()
pos = pos
pub.sendMessage('POINTER_MOVED', pos=pos.grid_cr())
if not Preferences.values['SELECTION_DRAG'] and self._selection.item in (DRAW_RECT, ARROW, ERASER, RECT, LINE, MAG_LINE, DIR_LINE):
if self._selection.state == IDLE:
if self._selection.state == IDLE and self._selection.item in (DRAW_RECT, ARROW, RECT, ERASER, LINE, MAG_LINE, DIR_LINE):
pass
else:
return
pos = self.calc_position(event.x, event.y)
self._drag_dir = None
self._drag_startpos = pos
self._drag_currentpos = pos
self._drag_prevpos = []
self._drag_prevpos.append(pos)
self._selection.state = SELECTING
elif self._selection.state == SELECTING:
offset = Pos(event.x, event.y) - self._drag_startpos
if self._selection.state == SELECTING and self._selection.item in (DRAW_RECT, ARROW, RECT, ERASER, LINE, MAG_LINE, DIR_LINE):
pass
else:
return
offset = self.calc_position(offset.x, offset.y)
self._drag_endpos = self._drag_startpos + offset
startpos = self._drag_startpos.grid_cr()
endpos = self._drag_endpos.grid_cr()
self._selection.state = SELECTED
if self._selection.item == DRAW_RECT:
pub.sendMessage('PASTE_RECT', startpos=startpos, endpos=endpos)
elif self._selection.item == ARROW:
pub.sendMessage('PASTE_ARROW', startpos=startpos, endpos=endpos)
elif self._selection.item == RECT:
pub.sendMessage('SELECTION_CHANGED', selected=True)
elif self._selection.item == ERASER:
size = offset.grid_cr().xy
pub.sendMessage('ERASE', startpos=startpos, size=size)
elif self._selection.item == LINE:
if self._drag_dir == HORIZONTAL:
endpos.y = startpos.y
elif self._drag_dir == VERTICAL:
endpos.x = startpos.x
pub.sendMessage('PASTE_LINE', startpos=startpos, endpos=endpos, type=self._symbol.type)
elif self._selection.item == MAG_LINE:
pub.sendMessage('PASTE_MAG_LINE', startpos=startpos, endpos=endpos)
elif self._selection.item == DIR_LINE:
pub.sendMessage('PASTE_DIR_LINE', startpos=startpos, endpos=endpos)
elif self._selection.state == SELECTING:
if self._selection.item == ROW:
row = pos.grid_cr().y
pub.sendMessage('GRID_ROW', row=row, action=self._selection.action)
elif self._selection.item == COL:
col = pos.grid_cr().x
pub.sendMessage('GRID_COL', col=col, action=self._selection.action)
elif self._selection.item in (TEXT, TEXT_BLOCK):
button = event.button
if button == 1:
self._selection.state = SELECTED
self._symbol.startpos = pos.grid_cr()
pub.sendMessage('PASTE_TEXT', symbol=self._symbol)
elif button == 3:
self._symbol.rotate()
pub.sendMessage('ORIENTATION_CHANGED', ori=self._symbol.ori_as_str)
elif self._selection.item == OBJECT:
self._selection.state = SELECTED
ul = pos
br = ul + Pos(Preferences.values['GRIDSIZE_W'], Preferences.values['GRIDSIZE_H'])
self._drag_startpos = ul
self._drag_endpos = br
self._selection.startpos = ul
self._selection.endpos = br
self._selection.maxpos = self.max_pos_grid
pub.sendMessage('SELECTION_CHANGED', selected=True)
elif self._selection.state == SELECTED:
pos = self._hover_pos
pos = pos.grid_cr()
if self._selection.item in (CHARACTER, COMPONENT, OBJECTS):
button = event.button
if button == 1:
pub.sendMessage('PASTE_OBJECTS', pos=pos)
elif button == 3:
pub.sendMessage('ROTATE_SYMBOL')
widget.queue_resize()
|
AACircuit
|
positive
|
def method_wrapper(method, **kwargs):
try:
<DeepExtract>
assert set(SECRETS).issubset(set(environ)), 'Required secrets are not present in environment variables. ENVIRONMENT: %s' % str(environ)
</DeepExtract>
<DeepExtract>
kwargs['table'] = boto3.resource('dynamodb', aws_access_key_id=environ['DYNAMODB_AWS_ACCESS_KEY_ID'], aws_secret_access_key=environ['DYNAMODB_AWS_SECRET_ACCESS_KEY'], region_name=environ['TABLE_ARN'].split(':')[3]).Table(environ['TABLE_NAME'])
</DeepExtract>
if 'range_id' not in kwargs.keys() and 'item_id' in kwargs.keys():
kwargs['range_id'] = kwargs['item_id']
return {'success': True, 'response': method(**kwargs)}
except Exception as e:
tb = traceback.format_exc()
return {'success': False, 'error': '%s %s\n\n%s' % (str(e.__class__), str(e), tb)}
|
def method_wrapper(method, **kwargs):
try:
assert set(SECRETS).issubset(set(environ)), 'Required secrets are not present in environment variables. ENVIRONMENT: %s' % str(environ)
kwargs['table'] = boto3.resource('dynamodb', aws_access_key_id=environ['DYNAMODB_AWS_ACCESS_KEY_ID'], aws_secret_access_key=environ['DYNAMODB_AWS_SECRET_ACCESS_KEY'], region_name=environ['TABLE_ARN'].split(':')[3]).Table(environ['TABLE_NAME'])
if 'range_id' not in kwargs.keys() and 'item_id' in kwargs.keys():
kwargs['range_id'] = kwargs['item_id']
return {'success': True, 'response': method(**kwargs)}
except Exception as e:
tb = traceback.format_exc()
return {'success': False, 'error': '%s %s\n\n%s' % (str(e.__class__), str(e), tb)}
|
aws-servicebroker
|
positive
|
def handle(self, *args, **options):
self.cache_dir = os.path.join(tempfile.gettempdir(), 'code-review-reports', options['environment'])
os.makedirs(self.cache_dir, exist_ok=True)
tasks = self.load_local_reports() if options['offline'] else self.load_tasks(options['environment'])
for (task_id, report) in tasks:
<DeepExtract>
try:
head_repository = Repository.objects.get(url=report['revision']['repository'])
except Repository.DoesNotExist:
logger.warning(f"No repository found with URL {report['revision']['repository']}, skipping.")
(revision, diff) = (None, None)
try:
base_repository = Repository.objects.get(url=report['revision']['target_repository'])
except Repository.DoesNotExist:
logger.warning(f"No repository found with URL {report['revision']['target_repository']}, skipping.")
(revision, diff) = (None, None)
(revision, _) = head_repository.head_revisions.get_or_create(id=report['revision']['id'], defaults={'phid': report['revision']['phid'], 'title': report['revision']['title'], 'bugzilla_id': int(report['revision']['bugzilla_id']) if report['revision']['bugzilla_id'] else None, 'base_repository': base_repository})
(diff, _) = revision.diffs.get_or_create(id=report['revision']['diff_id'], defaults={'repository': head_repository, 'phid': report['revision']['diff_phid'], 'review_task_id': task_id, 'mercurial_hash': report['revision']['mercurial_revision']})
(revision, diff) = (revision, diff)
</DeepExtract>
if not revision:
continue
try:
<DeepExtract>
diff.issues.all().delete()
created_issues = Issue.objects.bulk_create((Issue(path=i['path'], line=i['line'], nb_lines=i.get('nb_lines', 1), char=i.get('char'), level=i.get('level', 'warning'), analyzer_check=i.get('kind') or i.get('check'), message=i.get('message'), analyzer=i['analyzer'], hash=i['hash'], new_for_revision=detect_new_for_revision(diff, path=i['path'], hash=i['hash'])) for i in report['issues'] if i['hash']))
IssueLink.objects.bulk_create((IssueLink(issue=i, diff=diff, revision_id=diff.revision_id) for i in created_issues))
report['issues'] = created_issues
</DeepExtract>
logger.info(f'Imported task {task_id} - {len(issues)}')
except Exception as e:
logger.error(f'Failed to save issues for {task_id}: {e}', exc_info=True)
|
def handle(self, *args, **options):
self.cache_dir = os.path.join(tempfile.gettempdir(), 'code-review-reports', options['environment'])
os.makedirs(self.cache_dir, exist_ok=True)
tasks = self.load_local_reports() if options['offline'] else self.load_tasks(options['environment'])
for (task_id, report) in tasks:
try:
head_repository = Repository.objects.get(url=report['revision']['repository'])
except Repository.DoesNotExist:
logger.warning(f"No repository found with URL {report['revision']['repository']}, skipping.")
(revision, diff) = (None, None)
try:
base_repository = Repository.objects.get(url=report['revision']['target_repository'])
except Repository.DoesNotExist:
logger.warning(f"No repository found with URL {report['revision']['target_repository']}, skipping.")
(revision, diff) = (None, None)
(revision, _) = head_repository.head_revisions.get_or_create(id=report['revision']['id'], defaults={'phid': report['revision']['phid'], 'title': report['revision']['title'], 'bugzilla_id': int(report['revision']['bugzilla_id']) if report['revision']['bugzilla_id'] else None, 'base_repository': base_repository})
(diff, _) = revision.diffs.get_or_create(id=report['revision']['diff_id'], defaults={'repository': head_repository, 'phid': report['revision']['diff_phid'], 'review_task_id': task_id, 'mercurial_hash': report['revision']['mercurial_revision']})
(revision, diff) = (revision, diff)
if not revision:
continue
try:
diff.issues.all().delete()
created_issues = Issue.objects.bulk_create((Issue(path=i['path'], line=i['line'], nb_lines=i.get('nb_lines', 1), char=i.get('char'), level=i.get('level', 'warning'), analyzer_check=i.get('kind') or i.get('check'), message=i.get('message'), analyzer=i['analyzer'], hash=i['hash'], new_for_revision=detect_new_for_revision(diff, path=i['path'], hash=i['hash'])) for i in report['issues'] if i['hash']))
IssueLink.objects.bulk_create((IssueLink(issue=i, diff=diff, revision_id=diff.revision_id) for i in created_issues))
report['issues'] = created_issues
logger.info(f'Imported task {task_id} - {len(issues)}')
except Exception as e:
logger.error(f'Failed to save issues for {task_id}: {e}', exc_info=True)
|
code-review
|
positive
|
@staticmethod
def from_json(s):
"""Deserialize a TurnMessage from a JSON string
:type s: str
:rtype: TurnMessage
"""
assert type(s) is str, 'incorrect type of arg s: should be str, is {}'.format(type(s))
result = _lib.bc_TurnMessage_from_json(_ffi.new('char[]', s.encode()))
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
_result = TurnMessage.__new__(TurnMessage)
if result != _ffi.NULL:
_result._ptr = result
result = _result
return result
|
@staticmethod
def from_json(s):
"""Deserialize a TurnMessage from a JSON string
:type s: str
:rtype: TurnMessage
"""
assert type(s) is str, 'incorrect type of arg s: should be str, is {}'.format(type(s))
result = _lib.bc_TurnMessage_from_json(_ffi.new('char[]', s.encode()))
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
_result = TurnMessage.__new__(TurnMessage)
if result != _ffi.NULL:
_result._ptr = result
result = _result
return result
|
bc18-scaffold
|
positive
|
def create_temp_archive(archive_name, source_dir, filenames=None, recursive=False, require_complete=False):
"""
Create archive file with specified list of files
:param archive_name: the name of the archive to be created
:param source_dir: the root folder containing source files
:param filenames: the list of filenames, each of which can contain wildcards and/or specify subdirectories
:param recursive: flag to include sub directories recursively
:param require_complete: flag to indicate an exception should be raised if all filenames are not included
:return: full path of the created archive
"""
def tar_filter(tarinfo):
"""Filter files from the generated archive"""
if tarinfo.type == tarfile.DIRTYPE:
if any((dir.startswith('.') for dir in tarinfo.name.split('/'))):
return None
elif not tarinfo.name:
return tarinfo
elif recursive:
return tarinfo
elif not include_all and directory_in_list(tarinfo.name, filenames_set):
return tarinfo
return None
if include_all:
return tarinfo
for filename in filenames_set:
if not filename or filename in processed_filenames:
continue
if fnmatch.fnmatch(tarinfo.name, filename):
if not has_wildcards(filename) and (not recursive):
processed_filenames.append(filename)
matched_set.add(filename)
return tarinfo
if not directory_prefixed(filename) and has_wildcards(filename):
if fnmatch.fnmatch(os.path.basename(tarinfo.name), filename):
matched_set.add(filename)
return tarinfo
return None
filenames_set = set(filenames or [])
include_all = len({WILDCARDS[0]} & filenames_set) > 0
processed_filenames = []
matched_set = set()
<DeepExtract>
temp_dir = tempfile.gettempdir()
project_temp_dir = os.path.join(temp_dir, 'elyra')
if not os.path.exists(project_temp_dir):
os.mkdir(project_temp_dir)
temp_dir = project_temp_dir
</DeepExtract>
archive = os.path.join(temp_dir, archive_name)
with tarfile.open(archive, 'w:gz', dereference=True) as tar:
tar.add(source_dir, arcname='', filter=tar_filter)
dependencies_set = set([] if not filenames else filenames[1:])
wildcard_expression_list = [f'{WILDCARDS[0]}.py', f'{WILDCARDS[0]}.r']
wildcard_expression = len(dependencies_set) == 1 and next(iter(dependencies_set)) in wildcard_expression_list
if require_complete and (not include_all):
if len(filenames_set) > len(matched_set) and (not wildcard_expression):
raise FileNotFoundError(filenames_set - matched_set)
return archive
|
def create_temp_archive(archive_name, source_dir, filenames=None, recursive=False, require_complete=False):
"""
Create archive file with specified list of files
:param archive_name: the name of the archive to be created
:param source_dir: the root folder containing source files
:param filenames: the list of filenames, each of which can contain wildcards and/or specify subdirectories
:param recursive: flag to include sub directories recursively
:param require_complete: flag to indicate an exception should be raised if all filenames are not included
:return: full path of the created archive
"""
def tar_filter(tarinfo):
"""Filter files from the generated archive"""
if tarinfo.type == tarfile.DIRTYPE:
if any((dir.startswith('.') for dir in tarinfo.name.split('/'))):
return None
elif not tarinfo.name:
return tarinfo
elif recursive:
return tarinfo
elif not include_all and directory_in_list(tarinfo.name, filenames_set):
return tarinfo
return None
if include_all:
return tarinfo
for filename in filenames_set:
if not filename or filename in processed_filenames:
continue
if fnmatch.fnmatch(tarinfo.name, filename):
if not has_wildcards(filename) and (not recursive):
processed_filenames.append(filename)
matched_set.add(filename)
return tarinfo
if not directory_prefixed(filename) and has_wildcards(filename):
if fnmatch.fnmatch(os.path.basename(tarinfo.name), filename):
matched_set.add(filename)
return tarinfo
return None
filenames_set = set(filenames or [])
include_all = len({WILDCARDS[0]} & filenames_set) > 0
processed_filenames = []
matched_set = set()
temp_dir = tempfile.gettempdir()
project_temp_dir = os.path.join(temp_dir, 'elyra')
if not os.path.exists(project_temp_dir):
os.mkdir(project_temp_dir)
temp_dir = project_temp_dir
archive = os.path.join(temp_dir, archive_name)
with tarfile.open(archive, 'w:gz', dereference=True) as tar:
tar.add(source_dir, arcname='', filter=tar_filter)
dependencies_set = set([] if not filenames else filenames[1:])
wildcard_expression_list = [f'{WILDCARDS[0]}.py', f'{WILDCARDS[0]}.r']
wildcard_expression = len(dependencies_set) == 1 and next(iter(dependencies_set)) in wildcard_expression_list
if require_complete and (not include_all):
if len(filenames_set) > len(matched_set) and (not wildcard_expression):
raise FileNotFoundError(filenames_set - matched_set)
return archive
|
elyra
|
positive
|
def __get_item_rough_length(item, parent='root'):
"""
Get the rough length of an item.
It is used as a part of calculating the rough distance between objects.
**parameters**
item: The item to calculate the rough length for
parent: It is only used for DeepHash reporting purposes. Not really useful here.
"""
if not hasattr(self, 'hashes'):
raise RuntimeError(DISTANCE_CALCS_NEEDS_CACHE)
length = DeepHash.get_key(self.hashes, key=item, default=None, extract_index=1)
if length is None:
<DeepExtract>
DeepHash(item, hashes=self.hashes, parent='root', apply_hash=True, **self.deephash_parameters)
</DeepExtract>
length = DeepHash.get_key(self.hashes, key=item, default=None, extract_index=1)
return length
|
def __get_item_rough_length(item, parent='root'):
"""
Get the rough length of an item.
It is used as a part of calculating the rough distance between objects.
**parameters**
item: The item to calculate the rough length for
parent: It is only used for DeepHash reporting purposes. Not really useful here.
"""
if not hasattr(self, 'hashes'):
raise RuntimeError(DISTANCE_CALCS_NEEDS_CACHE)
length = DeepHash.get_key(self.hashes, key=item, default=None, extract_index=1)
if length is None:
DeepHash(item, hashes=self.hashes, parent='root', apply_hash=True, **self.deephash_parameters)
length = DeepHash.get_key(self.hashes, key=item, default=None, extract_index=1)
return length
|
deepdiff
|
positive
|
def send_frame(self, opcode: Opcode, payload: Optional[Union[str, bytes, bytearray, memoryview]]=None, *, fin: bool=True, on_complete: Optional[Callable[[OnSendFrameCompleteData], None]]=None):
"""Send a WebSocket frame asynchronously.
See `RFC 6455 section 5 - Data Framing <https://www.rfc-editor.org/rfc/rfc6455#section-5>`_
for details on all frame types.
This is a low-level API, which requires you to send the appropriate payload for each type of opcode.
If you are not an expert, stick to sending :attr:`Opcode.TEXT` or :attr:`Opcode.BINARY` frames,
and don't touch the FIN bit.
See :ref:`flow-control-writing` to learn about limiting the amount of
unsent data buffered in memory.
Args:
opcode: :class:`Opcode` for this frame.
payload: Any `bytes-like object <https://docs.python.org/3/glossary.html#term-bytes-like-object>`_.
`str` will always be encoded as UTF-8. It is fine to pass a `str` for a BINARY frame.
None will result in an empty payload, the same as passing empty `bytes()`
fin: The FIN bit indicates that this is the final fragment in a message.
Do not set this False unless you understand
`WebSocket fragmentation <https://www.rfc-editor.org/rfc/rfc6455#section-5.4>`_
on_complete: Optional callback, invoked when the frame has finished sending.
Takes a single :class:`OnSendFrameCompleteData` argument.
If :attr:`OnSendFrameCompleteData.exception` is set, the connection
was lost before this frame could be completely sent.
But if `exception` is None, the frame was successfully written to the OS socket.
(This doesn't mean the other endpoint has received the data yet,
or even guarantee that the data has left the machine yet,
but it's on track to get there).
Be sure to read about :ref:`authoring-callbacks`.
"""
def _on_complete(error_code):
cbdata = OnSendFrameCompleteData()
if error_code:
cbdata.exception = awscrt.exceptions.from_code(error_code)
try:
if on_complete is not None:
on_complete(cbdata)
except BaseException:
print('Exception in WebSocket.send_frame on_complete callback', file=sys.stderr)
sys.excepthook(*sys.exc_info())
<DeepExtract>
_awscrt.websocket_close(self._binding)
</DeepExtract>
_awscrt.websocket_send_frame(self._binding, Opcode(opcode), payload, fin, _on_complete)
|
def send_frame(self, opcode: Opcode, payload: Optional[Union[str, bytes, bytearray, memoryview]]=None, *, fin: bool=True, on_complete: Optional[Callable[[OnSendFrameCompleteData], None]]=None):
"""Send a WebSocket frame asynchronously.
See `RFC 6455 section 5 - Data Framing <https://www.rfc-editor.org/rfc/rfc6455#section-5>`_
for details on all frame types.
This is a low-level API, which requires you to send the appropriate payload for each type of opcode.
If you are not an expert, stick to sending :attr:`Opcode.TEXT` or :attr:`Opcode.BINARY` frames,
and don't touch the FIN bit.
See :ref:`flow-control-writing` to learn about limiting the amount of
unsent data buffered in memory.
Args:
opcode: :class:`Opcode` for this frame.
payload: Any `bytes-like object <https://docs.python.org/3/glossary.html#term-bytes-like-object>`_.
`str` will always be encoded as UTF-8. It is fine to pass a `str` for a BINARY frame.
None will result in an empty payload, the same as passing empty `bytes()`
fin: The FIN bit indicates that this is the final fragment in a message.
Do not set this False unless you understand
`WebSocket fragmentation <https://www.rfc-editor.org/rfc/rfc6455#section-5.4>`_
on_complete: Optional callback, invoked when the frame has finished sending.
Takes a single :class:`OnSendFrameCompleteData` argument.
If :attr:`OnSendFrameCompleteData.exception` is set, the connection
was lost before this frame could be completely sent.
But if `exception` is None, the frame was successfully written to the OS socket.
(This doesn't mean the other endpoint has received the data yet,
or even guarantee that the data has left the machine yet,
but it's on track to get there).
Be sure to read about :ref:`authoring-callbacks`.
"""
def _on_complete(error_code):
cbdata = OnSendFrameCompleteData()
if error_code:
cbdata.exception = awscrt.exceptions.from_code(error_code)
try:
if on_complete is not None:
on_complete(cbdata)
except BaseException:
print('Exception in WebSocket.send_frame on_complete callback', file=sys.stderr)
sys.excepthook(*sys.exc_info())
_awscrt.websocket_close(self._binding)
_awscrt.websocket_send_frame(self._binding, Opcode(opcode), payload, fin, _on_complete)
|
aws-crt-python
|
positive
|
@register_model
def efficientnet_b0(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" EfficientNet-B0 """
default_cfg = default_cfgs['efficientnet_b0']
<DeepExtract>
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']]
num_features = _round_channels(1280, 1.0, 8, None)
model = GenEfficientNet(_decode_arch_def(arch_def, 1.0), num_classes=num_classes, stem_size=32, channel_multiplier=1.0, num_features=num_features, bn_args=_resolve_bn_args(kwargs), act_fn=swish, **kwargs)
model = model
</DeepExtract>
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
@register_model
def efficientnet_b0(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
""" EfficientNet-B0 """
default_cfg = default_cfgs['efficientnet_b0']
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']]
num_features = _round_channels(1280, 1.0, 8, None)
model = GenEfficientNet(_decode_arch_def(arch_def, 1.0), num_classes=num_classes, stem_size=32, channel_multiplier=1.0, num_features=num_features, bn_args=_resolve_bn_args(kwargs), act_fn=swish, **kwargs)
model = model
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
DNA
|
positive
|
def select(self, label_index, unlabel_index, model=None, batch_size=1, n_jobs=None):
"""Select indexes from the unlabel_index for querying.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
batch_size: int, optional (default=1)
Selection batch size.
n_jobs: int, optional (default=None)
How many threads will be used in training bagging.
Returns
-------
selected_idx: list
The selected indexes which is a subset of unlabel_index.
"""
assert batch_size > 0
assert isinstance(unlabel_index, collections.Iterable)
assert isinstance(label_index, collections.Iterable)
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= batch_size:
return unlabel_index
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
unlabel_x = self.X[unlabel_index]
label_x = self.X[label_index]
label_y = self.y[label_index]
if n_jobs is None:
bagging = BaggingClassifier(model)
else:
bagging = BaggingClassifier(model, n_jobs=n_jobs)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
bagging.fit(label_x, label_y)
est_arr = bagging.estimators_
if self._disagreement == 'vote_entropy':
<DeepExtract>
score = []
(input_shape, committee_size) = [estimator.predict(unlabel_x) for estimator in est_arr]()._check_committee_results(predict_matrices)
if len(input_shape) == 2:
ele_uni = np.unique(predict_matrices)
if not (len(ele_uni) == 2 and 0 in ele_uni and (1 in ele_uni)):
raise ValueError('The predicted label matrix must only contain 0 and 1')
for i in range(input_shape[0]):
instance_mat = np.array([X[i, :] for X in predict_matrices if X is not None])
voting = np.sum(instance_mat, axis=0)
tmp = 0
for vote in voting:
if vote != 0:
tmp += vote / len(predict_matrices) * np.log((vote + 1e-09) / len(predict_matrices))
score.append(-tmp)
else:
input_mat = np.array([X for X in predict_matrices if X is not None])
for i in range(input_shape[0]):
count_dict = collections.Counter(input_mat[:, i])
tmp = 0
for key in count_dict:
tmp += count_dict[key] / committee_size * np.log((count_dict[key] + 1e-09) / committee_size)
score.append(-tmp)
score = score
</DeepExtract>
else:
<DeepExtract>
score = []
(input_shape, committee_size) = [estimator.predict_proba(unlabel_x) for estimator in est_arr]()._check_committee_results(predict_matrices)
if len(input_shape) == 2:
label_num = input_shape[1]
for i in range(input_shape[0]):
instance_mat = np.array([X[i, :] for X in predict_matrices if X is not None])
tmp = 0
for lab in range(label_num):
committee_consensus = np.sum(instance_mat[:, lab]) / committee_size + 1e-09
for committee in range(committee_size):
tmp += instance_mat[committee, lab] * np.log((instance_mat[committee, lab] + 1e-09) / committee_consensus)
score.append(tmp)
else:
raise Exception('A 2D probabilistic prediction matrix must be provided, with the shape like [n_samples, n_class]')
score = score
</DeepExtract>
return unlabel_index[nlargestarg(score, batch_size)]
|
def select(self, label_index, unlabel_index, model=None, batch_size=1, n_jobs=None):
"""Select indexes from the unlabel_index for querying.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
batch_size: int, optional (default=1)
Selection batch size.
n_jobs: int, optional (default=None)
How many threads will be used in training bagging.
Returns
-------
selected_idx: list
The selected indexes which is a subset of unlabel_index.
"""
assert batch_size > 0
assert isinstance(unlabel_index, collections.Iterable)
assert isinstance(label_index, collections.Iterable)
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= batch_size:
return unlabel_index
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
unlabel_x = self.X[unlabel_index]
label_x = self.X[label_index]
label_y = self.y[label_index]
if n_jobs is None:
bagging = BaggingClassifier(model)
else:
bagging = BaggingClassifier(model, n_jobs=n_jobs)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
bagging.fit(label_x, label_y)
est_arr = bagging.estimators_
if self._disagreement == 'vote_entropy':
score = []
(input_shape, committee_size) = [estimator.predict(unlabel_x) for estimator in est_arr]()._check_committee_results(predict_matrices)
if len(input_shape) == 2:
ele_uni = np.unique(predict_matrices)
if not (len(ele_uni) == 2 and 0 in ele_uni and (1 in ele_uni)):
raise ValueError('The predicted label matrix must only contain 0 and 1')
for i in range(input_shape[0]):
instance_mat = np.array([X[i, :] for X in predict_matrices if X is not None])
voting = np.sum(instance_mat, axis=0)
tmp = 0
for vote in voting:
if vote != 0:
tmp += vote / len(predict_matrices) * np.log((vote + 1e-09) / len(predict_matrices))
score.append(-tmp)
else:
input_mat = np.array([X for X in predict_matrices if X is not None])
for i in range(input_shape[0]):
count_dict = collections.Counter(input_mat[:, i])
tmp = 0
for key in count_dict:
tmp += count_dict[key] / committee_size * np.log((count_dict[key] + 1e-09) / committee_size)
score.append(-tmp)
score = score
else:
score = []
(input_shape, committee_size) = [estimator.predict_proba(unlabel_x) for estimator in est_arr]()._check_committee_results(predict_matrices)
if len(input_shape) == 2:
label_num = input_shape[1]
for i in range(input_shape[0]):
instance_mat = np.array([X[i, :] for X in predict_matrices if X is not None])
tmp = 0
for lab in range(label_num):
committee_consensus = np.sum(instance_mat[:, lab]) / committee_size + 1e-09
for committee in range(committee_size):
tmp += instance_mat[committee, lab] * np.log((instance_mat[committee, lab] + 1e-09) / committee_consensus)
score.append(tmp)
else:
raise Exception('A 2D probabilistic prediction matrix must be provided, with the shape like [n_samples, n_class]')
score = score
return unlabel_index[nlargestarg(score, batch_size)]
|
ALiPy
|
positive
|
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8('Dialog'))
Dialog.resize(452, 139)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8('gridLayout'))
self.label = QtGui.QLabel(Dialog)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8('label'))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.progressBar = QtGui.QProgressBar(Dialog)
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(0)
self.progressBar.setProperty('value', -1)
self.progressBar.setObjectName(_fromUtf8('progressBar'))
self.gridLayout.addWidget(self.progressBar, 1, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Help)
self.buttonBox.setObjectName(_fromUtf8('buttonBox'))
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
<DeepExtract>
Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))
self.label.setText(_translate('Dialog', 'Unable to register with master node [http://localhost:11311]: master may not be running yet. Will keep trying.', None))
</DeepExtract>
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8('accepted()')), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8('rejected()')), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8('Dialog'))
Dialog.resize(452, 139)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8('gridLayout'))
self.label = QtGui.QLabel(Dialog)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8('label'))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.progressBar = QtGui.QProgressBar(Dialog)
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(0)
self.progressBar.setProperty('value', -1)
self.progressBar.setObjectName(_fromUtf8('progressBar'))
self.gridLayout.addWidget(self.progressBar, 1, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Help)
self.buttonBox.setObjectName(_fromUtf8('buttonBox'))
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))
self.label.setText(_translate('Dialog', 'Unable to register with master node [http://localhost:11311]: master may not be running yet. Will keep trying.', None))
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8('accepted()')), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8('rejected()')), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
|
crazyflieROS
|
positive
|
def subscribe(self, topics: Set[str], listener=None):
""" Subscribe to a list (or tuple) of topics
Caller: Consumer.
Affects: SubscriptionState.subscription
"""
assert isinstance(topics, set)
assert listener is None or isinstance(listener, ConsumerRebalanceListener)
<DeepExtract>
if self._subscription_type == SubscriptionType.NONE or self._subscription_type == SubscriptionType.AUTO_TOPICS:
self._subscription_type = SubscriptionType.AUTO_TOPICS
else:
raise IllegalStateError('Subscription to topics, partitions and pattern are mutually exclusive')
</DeepExtract>
<DeepExtract>
log.info('Updating subscribed topics to: %s', Subscription(topics, loop=self._loop).topics)
if self._subscription is not None:
self._subscription._unsubscribe()
self._subscription = Subscription(topics, loop=self._loop)
self._notify_subscription_waiters()
</DeepExtract>
self._listener = listener
<DeepExtract>
for waiter in self._subscription_waiters:
if not waiter.done():
waiter.set_result(None)
self._subscription_waiters.clear()
</DeepExtract>
|
def subscribe(self, topics: Set[str], listener=None):
""" Subscribe to a list (or tuple) of topics
Caller: Consumer.
Affects: SubscriptionState.subscription
"""
assert isinstance(topics, set)
assert listener is None or isinstance(listener, ConsumerRebalanceListener)
if self._subscription_type == SubscriptionType.NONE or self._subscription_type == SubscriptionType.AUTO_TOPICS:
self._subscription_type = SubscriptionType.AUTO_TOPICS
else:
raise IllegalStateError('Subscription to topics, partitions and pattern are mutually exclusive')
log.info('Updating subscribed topics to: %s', Subscription(topics, loop=self._loop).topics)
if self._subscription is not None:
self._subscription._unsubscribe()
self._subscription = Subscription(topics, loop=self._loop)
self._notify_subscription_waiters()
self._listener = listener
for waiter in self._subscription_waiters:
if not waiter.done():
waiter.set_result(None)
self._subscription_waiters.clear()
</DeepExtract>
|
aiokafka
|
positive
|
def idempotence_check(self, checkout_dir):
"""Verify that calling checkout_externals and
checkout_externals --status does not cause errors"""
cwd = os.getcwd()
os.chdir(checkout_dir)
<DeepExtract>
cwd = os.getcwd()
checkout_path = os.path.abspath('{0}/../../checkout_externals')
os.chdir(self._my_test_dir)
cmdline = ['--externals', CFG_NAME]
cmdline += self.checkout_args
repo_root = 'MANIC_TEST_BARE_REPO_ROOT={root}'.format(root=os.environ[MANIC_TEST_BARE_REPO_ROOT])
manual_cmd = 'Test cmd:\npushd {cwd}; {env} {checkout} {args}'.format(cwd=self._my_test_dir, env=repo_root, checkout=checkout_path, args=' '.join(cmdline))
printlog(manual_cmd)
options = checkout.commandline_arguments(cmdline)
(overall_status, tree_status) = checkout.main(options)
os.chdir(cwd)
(overall, _) = (overall_status, tree_status)
</DeepExtract>
self.assertTrue(overall == 0)
<DeepExtract>
cwd = os.getcwd()
checkout_path = os.path.abspath('{0}/../../checkout_externals')
os.chdir(self._my_test_dir)
cmdline = ['--externals', CFG_NAME]
cmdline += self.status_args
repo_root = 'MANIC_TEST_BARE_REPO_ROOT={root}'.format(root=os.environ[MANIC_TEST_BARE_REPO_ROOT])
manual_cmd = 'Test cmd:\npushd {cwd}; {env} {checkout} {args}'.format(cwd=self._my_test_dir, env=repo_root, checkout=checkout_path, args=' '.join(cmdline))
printlog(manual_cmd)
options = checkout.commandline_arguments(cmdline)
(overall_status, tree_status) = checkout.main(options)
os.chdir(cwd)
(overall, _) = (overall_status, tree_status)
</DeepExtract>
self.assertTrue(overall == 0)
os.chdir(cwd)
|
def idempotence_check(self, checkout_dir):
"""Verify that calling checkout_externals and
checkout_externals --status does not cause errors"""
cwd = os.getcwd()
os.chdir(checkout_dir)
cwd = os.getcwd()
checkout_path = os.path.abspath('{0}/../../checkout_externals')
os.chdir(self._my_test_dir)
cmdline = ['--externals', CFG_NAME]
cmdline += self.checkout_args
repo_root = 'MANIC_TEST_BARE_REPO_ROOT={root}'.format(root=os.environ[MANIC_TEST_BARE_REPO_ROOT])
manual_cmd = 'Test cmd:\npushd {cwd}; {env} {checkout} {args}'.format(cwd=self._my_test_dir, env=repo_root, checkout=checkout_path, args=' '.join(cmdline))
printlog(manual_cmd)
options = checkout.commandline_arguments(cmdline)
(overall_status, tree_status) = checkout.main(options)
os.chdir(cwd)
(overall, _) = (overall_status, tree_status)
self.assertTrue(overall == 0)
cwd = os.getcwd()
checkout_path = os.path.abspath('{0}/../../checkout_externals')
os.chdir(self._my_test_dir)
cmdline = ['--externals', CFG_NAME]
cmdline += self.status_args
repo_root = 'MANIC_TEST_BARE_REPO_ROOT={root}'.format(root=os.environ[MANIC_TEST_BARE_REPO_ROOT])
manual_cmd = 'Test cmd:\npushd {cwd}; {env} {checkout} {args}'.format(cwd=self._my_test_dir, env=repo_root, checkout=checkout_path, args=' '.join(cmdline))
printlog(manual_cmd)
options = checkout.commandline_arguments(cmdline)
(overall_status, tree_status) = checkout.main(options)
os.chdir(cwd)
(overall, _) = (overall_status, tree_status)
self.assertTrue(overall == 0)
os.chdir(cwd)
|
CESM
|
positive
|
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
"""
1. Fit density model
if params["density_model"] == 'ex2':
the call to exploration.fit_density_model should return ll, kl, elbo
else:
the call to exploration.fit_density_model should return nothing
2. Modify the re_n with the reward bonus by calling exploration.modify_reward
"""
ex2_vars = None
old_re_n = re_n
if self.dm_type == 'none':
pass
else:
if self.dm_type == 'ex2':
raise NotImplementedError
elif self.dm_type == 'hist' or self.dm_type == 'rbf':
raise NotImplementedError
else:
assert False
raise NotImplementedError
print('average state', np.mean(ob_no, axis=0))
print('average action', np.mean(ac_na, axis=0))
loss = OrderedDict()
for critic_update in range(self.num_critic_updates_per_agent_update):
loss['Critic_Loss'] = self.critic.update(ob_no, next_ob_no, re_n, terminal_n)
<DeepExtract>
(ob, next_ob, rew, done) = map(lambda x: torch.from_numpy(x).to(self.device), [ob_no, next_ob_no, re_n, terminal_n])
value = self.critic.value_func(ob).squeeze()
next_value = self.critic.value_func(next_ob).squeeze() * (1 - done)
adv_n = rew + self.gamma * next_value - value
adv_n = adv_n.cpu().detach().numpy()
if self.standardize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-08)
adv_n = adv_n
</DeepExtract>
for actor_update in range(self.num_actor_updates_per_agent_update):
loss['Actor_Loss'] = self.actor.update(ob_no, ac_na, adv_n)
return (loss, ex2_vars)
|
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
"""
1. Fit density model
if params["density_model"] == 'ex2':
the call to exploration.fit_density_model should return ll, kl, elbo
else:
the call to exploration.fit_density_model should return nothing
2. Modify the re_n with the reward bonus by calling exploration.modify_reward
"""
ex2_vars = None
old_re_n = re_n
if self.dm_type == 'none':
pass
else:
if self.dm_type == 'ex2':
raise NotImplementedError
elif self.dm_type == 'hist' or self.dm_type == 'rbf':
raise NotImplementedError
else:
assert False
raise NotImplementedError
print('average state', np.mean(ob_no, axis=0))
print('average action', np.mean(ac_na, axis=0))
loss = OrderedDict()
for critic_update in range(self.num_critic_updates_per_agent_update):
loss['Critic_Loss'] = self.critic.update(ob_no, next_ob_no, re_n, terminal_n)
(ob, next_ob, rew, done) = map(lambda x: torch.from_numpy(x).to(self.device), [ob_no, next_ob_no, re_n, terminal_n])
value = self.critic.value_func(ob).squeeze()
next_value = self.critic.value_func(next_ob).squeeze() * (1 - done)
adv_n = rew + self.gamma * next_value - value
adv_n = adv_n.cpu().detach().numpy()
if self.standardize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-08)
adv_n = adv_n
for actor_update in range(self.num_actor_updates_per_agent_update):
loss['Actor_Loss'] = self.actor.update(ob_no, ac_na, adv_n)
return (loss, ex2_vars)
|
berkeley-deep-RL-pytorch-starter
|
positive
|
def extract_from_file(thefile, name):
"""import a file and then extract a lookup set"""
<DeepExtract>
if thefile not in self._filecache.keys():
drop_gz = thefile.replace('.gz', '')
file_dots = os.path.basename(drop_gz).split('.')
theformat = file_dots[-1].strip()
thetype = 'default'
if len(file_dots) > 2:
thetype = file_dots[-2]
if 'pileup' in thefile and 'pileup' in file_converters[theformat]:
thetype = 'pileup'
if '_SF_' in thefile and 'jersf' in file_converters[theformat]:
thetype = 'jersf'
if '_L5Flavor_' in thefile and 'l5flavor' in file_converters[theformat]:
thetype = 'l5flavor'
self._filecache[thefile] = file_converters[theformat][thetype](thefile)
</DeepExtract>
weights = self._filecache[thefile]
names = {key[0]: key[1] for key in weights.keys()}
if name not in names.keys():
raise Exception(f'Weights named "{name}" not in {thefile}!')
return (weights[name, names[name]], names[name])
|
def extract_from_file(thefile, name):
"""import a file and then extract a lookup set"""
if thefile not in self._filecache.keys():
drop_gz = thefile.replace('.gz', '')
file_dots = os.path.basename(drop_gz).split('.')
theformat = file_dots[-1].strip()
thetype = 'default'
if len(file_dots) > 2:
thetype = file_dots[-2]
if 'pileup' in thefile and 'pileup' in file_converters[theformat]:
thetype = 'pileup'
if '_SF_' in thefile and 'jersf' in file_converters[theformat]:
thetype = 'jersf'
if '_L5Flavor_' in thefile and 'l5flavor' in file_converters[theformat]:
thetype = 'l5flavor'
self._filecache[thefile] = file_converters[theformat][thetype](thefile)
weights = self._filecache[thefile]
names = {key[0]: key[1] for key in weights.keys()}
if name not in names.keys():
raise Exception(f'Weights named "{name}" not in {thefile}!')
return (weights[name, names[name]], names[name])
|
coffea
|
positive
|
def metalearn(weights, opt, meta_steps):
"""Runs a MAML learning loop."""
ds = sin_dataset().repeat().batch(META_BATCH_SIZE).take(meta_steps)
for (inputs, outputs, _, _) in ds:
train_inputs = inputs[:, TRAIN_SAMPES_SLICE, :]
valid_inputs = inputs[:, VALID_SAMPES_SLICE, :]
train_outputs = outputs[:, TRAIN_SAMPES_SLICE, :]
valid_outputs = outputs[:, VALID_SAMPES_SLICE, :]
with tf.GradientTape() as tape:
tf.nest.map_structure(tape.watch, weights)
task_losses = tf.TensorArray(tf.float32, size=META_BATCH_SIZE)
for i in tf.range(META_BATCH_SIZE):
<DeepExtract>
learned_weights = tf.nest.map_structure(lambda w: w.read_value(), weights)
for _ in tf.range(LOCAL_LEARNING_STEPS):
with tf.GradientTape() as tape:
tf.nest.map_structure(tape.watch, learned_weights)
y_pred = model(train_inputs[i], learned_weights)
step_loss = mse(y_pred, train_outputs[i])
grads = tape.gradient(step_loss, learned_weights)
learned_weights = tf.nest.map_structure(lambda w, g: w - LOCAL_LEARNING_RATE * g, learned_weights, grads)
learned_weights = learned_weights
</DeepExtract>
<DeepExtract>
x = valid_inputs[i]
for i in range(1, len(HIDDEN_SIZES) + 1):
x = tf.nn.relu(tf.matmul(x, learned_weights[w(i)]) + learned_weights[b(i)])
i = len(HIDDEN_SIZES) + 1
learned_valid_outputs = tf.matmul(x, learned_weights[w(i)]) + learned_weights[b(i)]
</DeepExtract>
<DeepExtract>
task_loss = tf.reduce_mean(tf.square(learned_valid_outputs - valid_outputs[i]))
</DeepExtract>
task_losses = task_losses.write(i, task_loss)
meta_loss = tf.reduce_mean(task_losses.stack())
meta_grads = tape.gradient(meta_loss, weights)
grads_and_vars = zip(tf.nest.flatten(meta_grads), tf.nest.flatten(weights))
opt.apply_gradients(grads_and_vars)
|
def metalearn(weights, opt, meta_steps):
"""Runs a MAML learning loop."""
ds = sin_dataset().repeat().batch(META_BATCH_SIZE).take(meta_steps)
for (inputs, outputs, _, _) in ds:
train_inputs = inputs[:, TRAIN_SAMPES_SLICE, :]
valid_inputs = inputs[:, VALID_SAMPES_SLICE, :]
train_outputs = outputs[:, TRAIN_SAMPES_SLICE, :]
valid_outputs = outputs[:, VALID_SAMPES_SLICE, :]
with tf.GradientTape() as tape:
tf.nest.map_structure(tape.watch, weights)
task_losses = tf.TensorArray(tf.float32, size=META_BATCH_SIZE)
for i in tf.range(META_BATCH_SIZE):
learned_weights = tf.nest.map_structure(lambda w: w.read_value(), weights)
for _ in tf.range(LOCAL_LEARNING_STEPS):
with tf.GradientTape() as tape:
tf.nest.map_structure(tape.watch, learned_weights)
y_pred = model(train_inputs[i], learned_weights)
step_loss = mse(y_pred, train_outputs[i])
grads = tape.gradient(step_loss, learned_weights)
learned_weights = tf.nest.map_structure(lambda w, g: w - LOCAL_LEARNING_RATE * g, learned_weights, grads)
learned_weights = learned_weights
x = valid_inputs[i]
for i in range(1, len(HIDDEN_SIZES) + 1):
x = tf.nn.relu(tf.matmul(x, learned_weights[w(i)]) + learned_weights[b(i)])
i = len(HIDDEN_SIZES) + 1
learned_valid_outputs = tf.matmul(x, learned_weights[w(i)]) + learned_weights[b(i)]
task_loss = tf.reduce_mean(tf.square(learned_valid_outputs - valid_outputs[i]))
task_losses = task_losses.write(i, task_loss)
meta_loss = tf.reduce_mean(task_losses.stack())
meta_grads = tape.gradient(meta_loss, weights)
grads_and_vars = zip(tf.nest.flatten(meta_grads), tf.nest.flatten(weights))
opt.apply_gradients(grads_and_vars)
|
autograph
|
positive
|
def send_message(self, req, resp):
SendMessageValidator.validate(req)
req_inter = RequestInternal(req.method, uri='/%s/%s/%s' % (URISEC_QUEUE, req.queue_name, URISEC_MESSAGE))
req_inter.data = MessageEncoder.encode(req)
<DeepExtract>
if req.request_id is not None:
req_inter.header['x-mns-user-request-id'] = req.request_id
if self.http.is_keep_alive():
req_inter.header['Connection'] = 'Keep-Alive'
if req_inter.data != '':
req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8')
req_inter.header['content-type'] = 'text/xml;charset=UTF-8'
req_inter.header['x-mns-version'] = self.version
req_inter.header['host'] = self.host
req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version())
req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri)
if self.security_token != '':
req_inter.header['security-token'] = self.security_token
</DeepExtract>
resp_inter = self.http.send_request(req_inter)
resp.status = resp_inter.status
resp.header = resp_inter.header
<DeepExtract>
if resp_inter.status >= 200 and resp_inter.status < 400:
resp.error_data = ''
else:
resp.error_data = resp_inter.data
if resp_inter.status >= 400 and resp_inter.status <= 600:
(excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id())
if reqId is None:
reqId = resp.header['x-mns-request-id']
raise MNSServerException(excType, excMessage, reqId, hostId, subErr)
else:
raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id())
</DeepExtract>
if resp.error_data == '':
(resp.message_id, resp.message_body_md5, resp.receipt_handle) = SendMessageDecoder.decode(resp_inter.data, req_inter.get_req_id())
if self.logger:
self.logger.info('SendMessage RequestId:%s QueueName:%s Priority:%s DelaySeconds:%s MessageId:%s MessageBodyMD5:%s' % (resp.get_requestid(), req.queue_name, req.priority, req.delay_seconds, resp.message_id, resp.message_body_md5))
|
def send_message(self, req, resp):
SendMessageValidator.validate(req)
req_inter = RequestInternal(req.method, uri='/%s/%s/%s' % (URISEC_QUEUE, req.queue_name, URISEC_MESSAGE))
req_inter.data = MessageEncoder.encode(req)
if req.request_id is not None:
req_inter.header['x-mns-user-request-id'] = req.request_id
if self.http.is_keep_alive():
req_inter.header['Connection'] = 'Keep-Alive'
if req_inter.data != '':
req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8')
req_inter.header['content-type'] = 'text/xml;charset=UTF-8'
req_inter.header['x-mns-version'] = self.version
req_inter.header['host'] = self.host
req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version())
req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri)
if self.security_token != '':
req_inter.header['security-token'] = self.security_token
resp_inter = self.http.send_request(req_inter)
resp.status = resp_inter.status
resp.header = resp_inter.header
if resp_inter.status >= 200 and resp_inter.status < 400:
resp.error_data = ''
else:
resp.error_data = resp_inter.data
if resp_inter.status >= 400 and resp_inter.status <= 600:
(excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id())
if reqId is None:
reqId = resp.header['x-mns-request-id']
raise MNSServerException(excType, excMessage, reqId, hostId, subErr)
else:
raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id())
if resp.error_data == '':
(resp.message_id, resp.message_body_md5, resp.receipt_handle) = SendMessageDecoder.decode(resp_inter.data, req_inter.get_req_id())
if self.logger:
self.logger.info('SendMessage RequestId:%s QueueName:%s Priority:%s DelaySeconds:%s MessageId:%s MessageBodyMD5:%s' % (resp.get_requestid(), req.queue_name, req.priority, req.delay_seconds, resp.message_id, resp.message_body_md5))
|
AutomationTest
|
positive
|
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
self.mode = 'video'
(ret_val, img0) = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf:
raise StopIteration
else:
path = self.files[self.count]
<DeepExtract>
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
</DeepExtract>
(ret_val, img0) = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
self.count += 1
img0 = cv2.imread(path)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
img = letterbox(img0, new_shape=self.img_size)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return (path, img, img0, self.cap)
|
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
self.mode = 'video'
(ret_val, img0) = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf:
raise StopIteration
else:
path = self.files[self.count]
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
(ret_val, img0) = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
self.count += 1
img0 = cv2.imread(path)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
img = letterbox(img0, new_shape=self.img_size)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return (path, img, img0, self.cap)
|
Auto_maker
|
positive
|
def removeAttr(self, attr, itemList=None, *args):
""" Delete the given attribute and reload the removeAttrUI.
"""
<DeepExtract>
if not itemList:
itemList = cmds.ls(selection=True, type='transform')
itemList = itemList
</DeepExtract>
if itemList:
for item in itemList:
if cmds.objExists(item + '.' + attr):
cmds.setAttr(item + '.' + attr, edit=True, lock=False)
cmds.deleteAttr(item + '.' + attr)
if self.ui:
if cmds.button('removeButton' + attr, query=True, exists=True):
cmds.deleteUI('removeButton' + attr)
|
def removeAttr(self, attr, itemList=None, *args):
""" Delete the given attribute and reload the removeAttrUI.
"""
if not itemList:
itemList = cmds.ls(selection=True, type='transform')
itemList = itemList
if itemList:
for item in itemList:
if cmds.objExists(item + '.' + attr):
cmds.setAttr(item + '.' + attr, edit=True, lock=False)
cmds.deleteAttr(item + '.' + attr)
if self.ui:
if cmds.button('removeButton' + attr, query=True, exists=True):
cmds.deleteUI('removeButton' + attr)
|
dpAutoRigSystem
|
positive
|
def do_kill_unit_from(conf):
started = time.time()
doSendSIGKILL = conf.getbool('Service', 'SendSIGKILL', 'yes')
doSendSIGHUP = conf.getbool('Service', 'SendSIGHUP', 'no')
useKillMode = conf.get('Service', 'KillMode', 'control-group')
useKillSignal = conf.get('Service', 'KillSignal', 'SIGTERM')
kill_signal = getattr(signal, useKillSignal)
<DeepExtract>
timeout = conf.get('Service', 'TimeoutSec', DefaultTimeoutStartSec)
timeout = conf.get('Service', 'TimeoutStopSec', timeout)
timeout = time_to_seconds(timeout, DefaultMaximumTimeout)
</DeepExtract>
<DeepExtract>
if default is None:
default = self.default_status_file(conf)
if conf is None:
status_file = default
status_file = conf.get('Service', 'StatusFile', default)
status_file = self.expand_special(status_file, conf)
</DeepExtract>
size = os.path.exists(status_file) and os.path.getsize(status_file)
logg.info('STATUS %s %s', status_file, size)
<DeepExtract>
try:
mainpid = int(self.read_mainpid_from(conf, ''))
except:
mainpid = default
</DeepExtract>
<DeepExtract>
status_file = self.status_file_from(conf)
if os.path.exists(status_file):
os.remove(status_file)
conf.status = {}
</DeepExtract>
if not mainpid:
if useKillMode in ['control-group']:
logg.warning('no main PID [%s]', conf.filename())
logg.warning('and there is no control-group here')
else:
logg.info('no main PID [%s]', conf.filename())
return False
if not pid_exists(mainpid) or pid_zombie(mainpid):
logg.debug('ignoring children when mainpid is already dead')
return True
<DeepExtract>
try:
mainpid = int(mainpid)
except:
pidlist = []
pidlist = [mainpid]
pids = [mainpid]
for depth in xrange(ProcMaxDepth):
for mainpid in os.listdir('/proc'):
try:
mainpid = int(mainpid)
except:
continue
proc_status = '/proc/%s/status' % mainpid
if os.path.isfile(proc_status):
try:
for line in open(proc_status):
if line.startswith('PPid:'):
ppid = line[len('PPid:'):].strip()
try:
ppid = int(ppid)
except:
continue
if ppid in pidlist and mainpid not in pids:
pids += [mainpid]
except IOError as e:
logg.warning('%s : %s', proc_status, e)
continue
if len(pids) != len(pidlist):
pidlist = pids[:]
continue
pidlist = pids
</DeepExtract>
if pid_exists(mainpid):
logg.info('stop kill PID %s', mainpid)
<DeepExtract>
try:
sig = kill_signal or signal.SIGTERM
os.kill(mainpid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', mainpid)
return True
else:
logg.error('kill PID %s => %s', mainpid, str(e))
return False
return not pid_exists(mainpid) or pid_zombie(mainpid)
</DeepExtract>
if useKillMode in ['control-group']:
if len(pidlist) > 1:
logg.info('stop control-group PIDs %s', pidlist)
for pid in pidlist:
if pid != mainpid:
<DeepExtract>
try:
sig = kill_signal or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
</DeepExtract>
if doSendSIGHUP:
logg.info('stop SendSIGHUP to PIDs %s', pidlist)
for pid in pidlist:
<DeepExtract>
try:
sig = signal.SIGHUP or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
</DeepExtract>
while True:
dead = True
for pid in pidlist:
if pid_exists(pid) and (not pid_zombie(pid)):
dead = False
break
if dead:
break
if time.time() > started + timeout:
logg.info('service PIDs not stopped after %s', timeout)
break
time.sleep(1)
if dead or not doSendSIGKILL:
logg.info('done kill PID %s %s', mainpid, dead and 'OK')
return dead
if useKillMode in ['control-group', 'mixed']:
logg.info('hard kill PIDs %s', pidlist)
for pid in pidlist:
if pid != mainpid:
<DeepExtract>
try:
sig = signal.SIGKILL or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
</DeepExtract>
time.sleep(MinimumYield)
if pid_exists(mainpid):
logg.info('hard kill PID %s', mainpid)
<DeepExtract>
try:
sig = signal.SIGKILL or signal.SIGTERM
os.kill(mainpid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', mainpid)
return True
else:
logg.error('kill PID %s => %s', mainpid, str(e))
return False
return not pid_exists(mainpid) or pid_zombie(mainpid)
</DeepExtract>
time.sleep(MinimumYield)
dead = not pid_exists(mainpid) or pid_zombie(mainpid)
logg.info('done hard kill PID %s %s', mainpid, dead and 'OK')
return dead
|
def do_kill_unit_from(conf):
started = time.time()
doSendSIGKILL = conf.getbool('Service', 'SendSIGKILL', 'yes')
doSendSIGHUP = conf.getbool('Service', 'SendSIGHUP', 'no')
useKillMode = conf.get('Service', 'KillMode', 'control-group')
useKillSignal = conf.get('Service', 'KillSignal', 'SIGTERM')
kill_signal = getattr(signal, useKillSignal)
timeout = conf.get('Service', 'TimeoutSec', DefaultTimeoutStartSec)
timeout = conf.get('Service', 'TimeoutStopSec', timeout)
timeout = time_to_seconds(timeout, DefaultMaximumTimeout)
if default is None:
default = self.default_status_file(conf)
if conf is None:
status_file = default
status_file = conf.get('Service', 'StatusFile', default)
status_file = self.expand_special(status_file, conf)
size = os.path.exists(status_file) and os.path.getsize(status_file)
logg.info('STATUS %s %s', status_file, size)
try:
mainpid = int(self.read_mainpid_from(conf, ''))
except:
mainpid = default
status_file = self.status_file_from(conf)
if os.path.exists(status_file):
os.remove(status_file)
conf.status = {}
if not mainpid:
if useKillMode in ['control-group']:
logg.warning('no main PID [%s]', conf.filename())
logg.warning('and there is no control-group here')
else:
logg.info('no main PID [%s]', conf.filename())
return False
if not pid_exists(mainpid) or pid_zombie(mainpid):
logg.debug('ignoring children when mainpid is already dead')
return True
try:
mainpid = int(mainpid)
except:
pidlist = []
pidlist = [mainpid]
pids = [mainpid]
for depth in xrange(ProcMaxDepth):
for mainpid in os.listdir('/proc'):
try:
mainpid = int(mainpid)
except:
continue
proc_status = '/proc/%s/status' % mainpid
if os.path.isfile(proc_status):
try:
for line in open(proc_status):
if line.startswith('PPid:'):
ppid = line[len('PPid:'):].strip()
try:
ppid = int(ppid)
except:
continue
if ppid in pidlist and mainpid not in pids:
pids += [mainpid]
except IOError as e:
logg.warning('%s : %s', proc_status, e)
continue
if len(pids) != len(pidlist):
pidlist = pids[:]
continue
pidlist = pids
if pid_exists(mainpid):
logg.info('stop kill PID %s', mainpid)
try:
sig = kill_signal or signal.SIGTERM
os.kill(mainpid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', mainpid)
return True
else:
logg.error('kill PID %s => %s', mainpid, str(e))
return False
return not pid_exists(mainpid) or pid_zombie(mainpid)
if useKillMode in ['control-group']:
if len(pidlist) > 1:
logg.info('stop control-group PIDs %s', pidlist)
for pid in pidlist:
if pid != mainpid:
try:
sig = kill_signal or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
if doSendSIGHUP:
logg.info('stop SendSIGHUP to PIDs %s', pidlist)
for pid in pidlist:
try:
sig = signal.SIGHUP or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
while True:
dead = True
for pid in pidlist:
if pid_exists(pid) and (not pid_zombie(pid)):
dead = False
break
if dead:
break
if time.time() > started + timeout:
logg.info('service PIDs not stopped after %s', timeout)
break
time.sleep(1)
if dead or not doSendSIGKILL:
logg.info('done kill PID %s %s', mainpid, dead and 'OK')
return dead
if useKillMode in ['control-group', 'mixed']:
logg.info('hard kill PIDs %s', pidlist)
for pid in pidlist:
if pid != mainpid:
try:
sig = signal.SIGKILL or signal.SIGTERM
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', pid)
return True
else:
logg.error('kill PID %s => %s', pid, str(e))
return False
return not pid_exists(pid) or pid_zombie(pid)
time.sleep(MinimumYield)
if pid_exists(mainpid):
logg.info('hard kill PID %s', mainpid)
try:
sig = signal.SIGKILL or signal.SIGTERM
os.kill(mainpid, sig)
except OSError as e:
if e.errno == errno.ESRCH or e.errno == errno.ENOENT:
logg.debug('kill PID %s => No such process', mainpid)
return True
else:
logg.error('kill PID %s => %s', mainpid, str(e))
return False
return not pid_exists(mainpid) or pid_zombie(mainpid)
time.sleep(MinimumYield)
dead = not pid_exists(mainpid) or pid_zombie(mainpid)
logg.info('done hard kill PID %s %s', mainpid, dead and 'OK')
return dead
|
deployment
|
positive
|
def objfunc(params, force_posterior=False):
global globdict
<DeepExtract>
tparams = transform_params(params[:globdict['n_params']], globdict['param_map'], globdict['minimizer_func'], globdict['model_prior'])
for j in range(len(params[:globdict['n_params']])):
if globdict['param_map'][j][1] in globdict['trial_model'].params[globdict['param_map'][j][0]].keys():
globdict['trial_model'].params[globdict['param_map'][j][0]][globdict['param_map'][j][1]] = tparams[j] * globdict['param_map'][j][2]
else:
if globdict['param_map'][j][1].find('cpol') != -1:
param_type = 'beta_list_cpol'
idx = int(globdict['param_map'][j][1].split('_')[0][8:])
elif globdict['param_map'][j][1].find('pol') != -1:
param_type = 'beta_list_pol'
idx = int(globdict['param_map'][j][1].split('_')[0][7:]) + (len(globdict['trial_model'].params[globdict['param_map'][j][0]][param_type]) - 1) // 2
elif globdict['param_map'][j][1].find('beta') != -1:
param_type = 'beta_list'
idx = int(globdict['param_map'][j][1].split('_')[0][4:]) - 1
else:
raise Exception('Unsure how to interpret ' + globdict['param_map'][j][1])
curval = globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx]
if '_' not in globdict['param_map'][j][1]:
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2]
elif globdict['param_map'][j][1][-2:] == 're':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] + np.imag(curval) * 1j
elif globdict['param_map'][j][1][-2:] == 'im':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] * 1j + np.real(curval)
elif globdict['param_map'][j][1][-3:] == 'abs':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] * np.exp(1j * np.angle(curval))
elif globdict['param_map'][j][1][-3:] == 'arg':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = np.abs(curval) * np.exp(1j * tparams[j] * globdict['param_map'][j][2])
else:
print('Parameter ' + globdict['param_map'][j][1] + ' not understood!')
</DeepExtract>
gains = params[globdict['n_params']:globdict['n_params'] + globdict['n_gains']]
leakage = params[globdict['n_params'] + globdict['n_gains']:]
<DeepExtract>
global globdict
if len(leakage) == 0:
return
station_leakages = globdict['station_leakages']
leakage_fit = globdict['leakage_fit']
for j in range(len(leakage) // 2):
station_leakages[leakage_fit[j][0]][leakage_fit[j][1]] = leakage[2 * j] + 1j * leakage[2 * j + 1]
for j in range(1, 4):
jonesdict = globdict['jonesdict' + str(j)]
if jonesdict is not None:
if type(jonesdict) is dict:
jonesdict['DR1'] = np.array([station_leakages[jonesdict['t1'][_]]['R'] for _ in range(len(jonesdict['t1']))])
jonesdict['DR2'] = np.array([station_leakages[jonesdict['t2'][_]]['R'] for _ in range(len(jonesdict['t1']))])
jonesdict['DL1'] = np.array([station_leakages[jonesdict['t1'][_]]['L'] for _ in range(len(jonesdict['t1']))])
jonesdict['DL2'] = np.array([station_leakages[jonesdict['t2'][_]]['L'] for _ in range(len(jonesdict['t1']))])
jonesdict['leakage_fit'] = globdict['leakage_fit']
else:
for jonesdict2 in jonesdict:
jonesdict2['DR1'] = np.array([station_leakages[jonesdict2['t1'][_]]['R'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DR2'] = np.array([station_leakages[jonesdict2['t2'][_]]['R'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DL1'] = np.array([station_leakages[jonesdict2['t1'][_]]['L'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DL2'] = np.array([station_leakages[jonesdict2['t2'][_]]['L'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['leakage_fit'] = globdict['leakage_fit']
</DeepExtract>
if globdict['marginalize_gains']:
_globdict = globdict
<DeepExtract>
gains = [np.abs(selfcal(globdict['Obsdata'], globdict['trial_model'], gain_init=None, gain_prior=globdict['gain_prior'], msgtype='none').data[site]['rscale'][selfcal(globdict['Obsdata'], globdict['trial_model'], gain_init=None, gain_prior=globdict['gain_prior'], msgtype='none').data[site]['time'] == time][0]) - 1.0 for (time, site) in globdict['gain_list']]
_globdict['gain_init'] = gains
</DeepExtract>
globdict = _globdict
<DeepExtract>
global globdict
chi2_1 = chisq_wgain(globdict['trial_model'], globdict['d1'], globdict['data1'], globdict['uv1'], globdict['sigma1'], globdict['pol1'], globdict['jonesdict1'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_2 = chisq_wgain(globdict['trial_model'], globdict['d2'], globdict['data2'], globdict['uv2'], globdict['sigma2'], globdict['pol2'], globdict['jonesdict2'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_3 = chisq_wgain(globdict['trial_model'], globdict['d3'], globdict['data3'], globdict['uv3'], globdict['sigma3'], globdict['pol3'], globdict['jonesdict3'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_4 = chisq_wgain(globdict['trial_model'], globdict['d4'], globdict['data4'], globdict['uv4'], globdict['sigma4'], globdict['pol4'], globdict['jonesdict4'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
(chi2_1, chi2_2, chi2_3, chi2_4) = (chi2_1, chi2_2, chi2_3, chi2_4)
</DeepExtract>
datterm = globdict['alpha_d1'] * chi2_1 + globdict['alpha_d2'] * chi2_2 + globdict['alpha_d3'] * chi2_3 + globdict['alpha_d4'] * chi2_4
if globdict['marginalize_gains']:
<DeepExtract>
global globdict
l1 = laplace_approximation(globdict['trial_model'], globdict['d1'], globdict['data1'], globdict['uv1'], globdict['sigma1'], globdict['gains_t1'], globdict['gains_t2'])
l2 = laplace_approximation(globdict['trial_model'], globdict['d2'], globdict['data2'], globdict['uv2'], globdict['sigma2'], globdict['gains_t1'], globdict['gains_t2'])
l3 = laplace_approximation(globdict['trial_model'], globdict['d3'], globdict['data3'], globdict['uv3'], globdict['sigma3'], globdict['gains_t1'], globdict['gains_t2'])
l4 = laplace_approximation(globdict['trial_model'], globdict['d4'], globdict['data4'], globdict['uv4'], globdict['sigma4'], globdict['gains_t1'], globdict['gains_t2'])
(l1, l2, l3, l4) = (l1, l2, l3, l4)
</DeepExtract>
datterm += l1 + l2 + l3 + l4
if globdict['minimizer_func'] not in ['dynesty_static', 'dynesty_dynamic', 'pymc3'] or force_posterior:
<DeepExtract>
tparams = transform_params(params[:globdict['n_params']], globdict['param_map'], globdict['minimizer_func'], globdict['model_prior'])
priterm = np.sum([np.log(prior_func(tparams[j] * globdict['param_map'][j][2], globdict['model_prior'][globdict['param_map'][j][0]][globdict['param_map'][j][1]])) for j in range(len(params[:globdict['n_params']]))])
</DeepExtract>
priterm += prior_gain(params[globdict['n_params']:globdict['n_params'] + globdict['n_gains']], globdict['gain_list'], globdict['gain_prior'], globdict['fit_gains'])
priterm += prior_leakage(params[globdict['n_params'] + globdict['n_gains']:], globdict['leakage_fit'], globdict['leakage_prior'], globdict['fit_leakage'])
else:
priterm = 0.0
fluxterm = globdict['alpha_flux'] * flux_constraint(globdict['trial_model'], globdict['alpha_flux'], globdict['flux'])
return datterm - priterm + fluxterm - globdict['ln_norm']
|
def objfunc(params, force_posterior=False):
global globdict
tparams = transform_params(params[:globdict['n_params']], globdict['param_map'], globdict['minimizer_func'], globdict['model_prior'])
for j in range(len(params[:globdict['n_params']])):
if globdict['param_map'][j][1] in globdict['trial_model'].params[globdict['param_map'][j][0]].keys():
globdict['trial_model'].params[globdict['param_map'][j][0]][globdict['param_map'][j][1]] = tparams[j] * globdict['param_map'][j][2]
else:
if globdict['param_map'][j][1].find('cpol') != -1:
param_type = 'beta_list_cpol'
idx = int(globdict['param_map'][j][1].split('_')[0][8:])
elif globdict['param_map'][j][1].find('pol') != -1:
param_type = 'beta_list_pol'
idx = int(globdict['param_map'][j][1].split('_')[0][7:]) + (len(globdict['trial_model'].params[globdict['param_map'][j][0]][param_type]) - 1) // 2
elif globdict['param_map'][j][1].find('beta') != -1:
param_type = 'beta_list'
idx = int(globdict['param_map'][j][1].split('_')[0][4:]) - 1
else:
raise Exception('Unsure how to interpret ' + globdict['param_map'][j][1])
curval = globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx]
if '_' not in globdict['param_map'][j][1]:
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2]
elif globdict['param_map'][j][1][-2:] == 're':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] + np.imag(curval) * 1j
elif globdict['param_map'][j][1][-2:] == 'im':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] * 1j + np.real(curval)
elif globdict['param_map'][j][1][-3:] == 'abs':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = tparams[j] * globdict['param_map'][j][2] * np.exp(1j * np.angle(curval))
elif globdict['param_map'][j][1][-3:] == 'arg':
globdict['trial_model'].params[globdict['param_map'][j][0]][param_type][idx] = np.abs(curval) * np.exp(1j * tparams[j] * globdict['param_map'][j][2])
else:
print('Parameter ' + globdict['param_map'][j][1] + ' not understood!')
gains = params[globdict['n_params']:globdict['n_params'] + globdict['n_gains']]
leakage = params[globdict['n_params'] + globdict['n_gains']:]
global globdict
if len(leakage) == 0:
return
station_leakages = globdict['station_leakages']
leakage_fit = globdict['leakage_fit']
for j in range(len(leakage) // 2):
station_leakages[leakage_fit[j][0]][leakage_fit[j][1]] = leakage[2 * j] + 1j * leakage[2 * j + 1]
for j in range(1, 4):
jonesdict = globdict['jonesdict' + str(j)]
if jonesdict is not None:
if type(jonesdict) is dict:
jonesdict['DR1'] = np.array([station_leakages[jonesdict['t1'][_]]['R'] for _ in range(len(jonesdict['t1']))])
jonesdict['DR2'] = np.array([station_leakages[jonesdict['t2'][_]]['R'] for _ in range(len(jonesdict['t1']))])
jonesdict['DL1'] = np.array([station_leakages[jonesdict['t1'][_]]['L'] for _ in range(len(jonesdict['t1']))])
jonesdict['DL2'] = np.array([station_leakages[jonesdict['t2'][_]]['L'] for _ in range(len(jonesdict['t1']))])
jonesdict['leakage_fit'] = globdict['leakage_fit']
else:
for jonesdict2 in jonesdict:
jonesdict2['DR1'] = np.array([station_leakages[jonesdict2['t1'][_]]['R'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DR2'] = np.array([station_leakages[jonesdict2['t2'][_]]['R'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DL1'] = np.array([station_leakages[jonesdict2['t1'][_]]['L'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['DL2'] = np.array([station_leakages[jonesdict2['t2'][_]]['L'] for _ in range(len(jonesdict2['t1']))])
jonesdict2['leakage_fit'] = globdict['leakage_fit']
if globdict['marginalize_gains']:
_globdict = globdict
gains = [np.abs(selfcal(globdict['Obsdata'], globdict['trial_model'], gain_init=None, gain_prior=globdict['gain_prior'], msgtype='none').data[site]['rscale'][selfcal(globdict['Obsdata'], globdict['trial_model'], gain_init=None, gain_prior=globdict['gain_prior'], msgtype='none').data[site]['time'] == time][0]) - 1.0 for (time, site) in globdict['gain_list']]
_globdict['gain_init'] = gains
globdict = _globdict
global globdict
chi2_1 = chisq_wgain(globdict['trial_model'], globdict['d1'], globdict['data1'], globdict['uv1'], globdict['sigma1'], globdict['pol1'], globdict['jonesdict1'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_2 = chisq_wgain(globdict['trial_model'], globdict['d2'], globdict['data2'], globdict['uv2'], globdict['sigma2'], globdict['pol2'], globdict['jonesdict2'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_3 = chisq_wgain(globdict['trial_model'], globdict['d3'], globdict['data3'], globdict['uv3'], globdict['sigma3'], globdict['pol3'], globdict['jonesdict3'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
chi2_4 = chisq_wgain(globdict['trial_model'], globdict['d4'], globdict['data4'], globdict['uv4'], globdict['sigma4'], globdict['pol4'], globdict['jonesdict4'], gains, globdict['gains_t1'], globdict['gains_t2'], globdict['fit_gains'] + globdict['marginalize_gains'])
(chi2_1, chi2_2, chi2_3, chi2_4) = (chi2_1, chi2_2, chi2_3, chi2_4)
datterm = globdict['alpha_d1'] * chi2_1 + globdict['alpha_d2'] * chi2_2 + globdict['alpha_d3'] * chi2_3 + globdict['alpha_d4'] * chi2_4
if globdict['marginalize_gains']:
global globdict
l1 = laplace_approximation(globdict['trial_model'], globdict['d1'], globdict['data1'], globdict['uv1'], globdict['sigma1'], globdict['gains_t1'], globdict['gains_t2'])
l2 = laplace_approximation(globdict['trial_model'], globdict['d2'], globdict['data2'], globdict['uv2'], globdict['sigma2'], globdict['gains_t1'], globdict['gains_t2'])
l3 = laplace_approximation(globdict['trial_model'], globdict['d3'], globdict['data3'], globdict['uv3'], globdict['sigma3'], globdict['gains_t1'], globdict['gains_t2'])
l4 = laplace_approximation(globdict['trial_model'], globdict['d4'], globdict['data4'], globdict['uv4'], globdict['sigma4'], globdict['gains_t1'], globdict['gains_t2'])
(l1, l2, l3, l4) = (l1, l2, l3, l4)
datterm += l1 + l2 + l3 + l4
if globdict['minimizer_func'] not in ['dynesty_static', 'dynesty_dynamic', 'pymc3'] or force_posterior:
tparams = transform_params(params[:globdict['n_params']], globdict['param_map'], globdict['minimizer_func'], globdict['model_prior'])
priterm = np.sum([np.log(prior_func(tparams[j] * globdict['param_map'][j][2], globdict['model_prior'][globdict['param_map'][j][0]][globdict['param_map'][j][1]])) for j in range(len(params[:globdict['n_params']]))])
priterm += prior_gain(params[globdict['n_params']:globdict['n_params'] + globdict['n_gains']], globdict['gain_list'], globdict['gain_prior'], globdict['fit_gains'])
priterm += prior_leakage(params[globdict['n_params'] + globdict['n_gains']:], globdict['leakage_fit'], globdict['leakage_prior'], globdict['fit_leakage'])
else:
priterm = 0.0
fluxterm = globdict['alpha_flux'] * flux_constraint(globdict['trial_model'], globdict['alpha_flux'], globdict['flux'])
return datterm - priterm + fluxterm - globdict['ln_norm']
|
eht-imaging
|
positive
|
def test_join_sync_leader(self):
"""
Successfully join, assign as leader, and sync
"""
<DeepExtract>
if topic_partitions is None:
topic_partitions = {'topic1': [0, 1]}
def _load_topic_partitions(*topics):
result = {topic: topic_partitions[topic] for topic in topics}
client = defer.succeed(result)
client = Mock()
client.reactor = task.Clock()
client._send_request_to_coordinator.side_effect = [self.join_response(), self.sync_response()]
client._load_topic_partitions.side_effect = _load_topic_partitions
client = client
</DeepExtract>
<DeepExtract>
coord = Coordinator(client, 'group_id', ['topic1'])
</DeepExtract>
de = coord.join_and_sync()
self.successResultOf(de)
self.assertEqual(coord._rejoin_needed, False)
self.assertEqual(coord.member_id, 'm1')
self.assertEqual(coord.leader_id, 'm1')
self.assertIn('joined', repr(coord))
|
def test_join_sync_leader(self):
"""
Successfully join, assign as leader, and sync
"""
if topic_partitions is None:
topic_partitions = {'topic1': [0, 1]}
def _load_topic_partitions(*topics):
result = {topic: topic_partitions[topic] for topic in topics}
client = defer.succeed(result)
client = Mock()
client.reactor = task.Clock()
client._send_request_to_coordinator.side_effect = [self.join_response(), self.sync_response()]
client._load_topic_partitions.side_effect = _load_topic_partitions
client = client
coord = Coordinator(client, 'group_id', ['topic1'])
de = coord.join_and_sync()
self.successResultOf(de)
self.assertEqual(coord._rejoin_needed, False)
self.assertEqual(coord.member_id, 'm1')
self.assertEqual(coord.leader_id, 'm1')
self.assertIn('joined', repr(coord))
|
afkak
|
positive
|
def add_resource(self, transaction, parent_resource, lp):
"""
Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
"""
method = getattr(parent_resource, 'render_POST', None)
try:
resource = method(request=transaction.request)
except NotImplementedError:
try:
method = getattr(parent_resource, 'render_POST_advanced', None)
ret = method(request=transaction.request, response=transaction.response)
if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[1], Response) and isinstance(ret[0], Resource):
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif isinstance(ret, tuple) and len(ret) == 3 and isinstance(ret[1], Response) and isinstance(ret[0], Resource):
(resource, response, callback) = ret
<DeepExtract>
if not transaction.request.acknowledged:
self._parent._send_ack(transaction)
transaction.request.acknowledged = True
ret = callback(request=transaction.request, response=transaction.response)
</DeepExtract>
if not isinstance(ret, tuple) or not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
(resource, callback) = resource
<DeepExtract>
if not transaction.request.acknowledged:
self._parent._send_ack(transaction)
transaction.request.acknowledged = True
resource = callback(request=transaction.request)
resource = resource
</DeepExtract>
if not isinstance(resource, Resource):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if resource.etag is not None:
transaction.response.etag = resource.etag
if resource.max_age is not None:
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction
|
def add_resource(self, transaction, parent_resource, lp):
"""
Render a POST on a new resource.
:param transaction: the transaction
:param parent_resource: the parent of the resource
:param lp: the location_path attribute of the resource
:return: the response
"""
method = getattr(parent_resource, 'render_POST', None)
try:
resource = method(request=transaction.request)
except NotImplementedError:
try:
method = getattr(parent_resource, 'render_POST_advanced', None)
ret = method(request=transaction.request, response=transaction.response)
if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[1], Response) and isinstance(ret[0], Resource):
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
elif isinstance(ret, tuple) and len(ret) == 3 and isinstance(ret[1], Response) and isinstance(ret[0], Resource):
(resource, response, callback) = ret
if not transaction.request.acknowledged:
self._parent._send_ack(transaction)
transaction.request.acknowledged = True
ret = callback(request=transaction.request, response=transaction.response)
if not isinstance(ret, tuple) or not (isinstance(ret[0], Resource) and isinstance(ret[1], Response)):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
(resource, response) = ret
resource.path = lp
resource.changed = True
self._parent.root[resource.path] = resource
transaction.resource = resource
transaction.response = response
if transaction.response.code is None:
transaction.response.code = defines.Codes.CREATED.number
return transaction
else:
raise NotImplementedError
except NotImplementedError:
transaction.response.code = defines.Codes.METHOD_NOT_ALLOWED.number
return transaction
if isinstance(resource, Resource):
pass
elif isinstance(resource, tuple) and len(resource) == 2:
(resource, callback) = resource
if not transaction.request.acknowledged:
self._parent._send_ack(transaction)
transaction.request.acknowledged = True
resource = callback(request=transaction.request)
resource = resource
if not isinstance(resource, Resource):
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
else:
transaction.response.code = defines.Codes.INTERNAL_SERVER_ERROR.number
return transaction
resource.path = lp
if resource.etag is not None:
transaction.response.etag = resource.etag
transaction.response.location_path = resource.path
if resource.location_query is not None and len(resource.location_query) > 0:
transaction.response.location_query = resource.location_query
transaction.response.code = defines.Codes.CREATED.number
transaction.response.payload = None
assert isinstance(resource, Resource)
if resource.etag is not None:
transaction.response.etag = resource.etag
if resource.max_age is not None:
transaction.response.max_age = resource.max_age
resource.changed = True
transaction.resource = resource
self._parent.root[resource.path] = resource
return transaction
|
CoAPthon
|
positive
|
def expand_ou(original_account, client, manifest):
expanded = []
exclusions = original_account.get('exclude', {}).get('accounts', [])
ou_exclusions = original_account.get('exclude', {}).get('ous', [])
for ou_exclusion in ou_exclusions:
if ou_exclusion.startswith('/'):
ou_id = client.convert_path_to_ou(ou_exclusion)
else:
ou_id = ou_exclusion
children = client.list_children_nested(ParentId=ou_id, ChildType='ACCOUNT')
for child in children:
logger.info(f"Adding {child.get('Id')} to the exclusion list as it was in the ou {ou_exclusion}")
exclusions.append(child.get('Id'))
response = client.list_children_nested(ParentId=original_account.get('ou'), ChildType='ACCOUNT')
for result in response:
new_account_id = result.get('Id')
if new_account_id in exclusions:
logger.info(f'Skipping {new_account_id} as it is in the exclusion list')
continue
<DeepExtract>
response = client.describe_account(AccountId=new_account_id)
new_account = deepcopy(manifest.get('defaults', {}).get('accounts', {}))
new_account.update(deepcopy(original_account))
ou_from_parent = None
if 'ou' in new_account:
ou_from_parent = new_account['ou']
del new_account['ou']
account_details = response.get('Account')
if account_details.get('Status') == 'ACTIVE':
if account_details.get('Name') is not None:
new_account['name'] = account_details.get('Name')
new_account['email'] = account_details.get('Email')
if ou_from_parent is not None:
new_account['expanded_from'] = ou_from_parent
new_account['account_id'] = new_account_id
new_account['organization'] = account_details.get('Arn').split(':')[5].split('/')[1]
new_account = new_account
else:
logger.info(f'Skipping account as it is not ACTIVE: {json.dumps(account_details, default=str)}')
new_account = None
</DeepExtract>
if new_account:
expanded.append(new_account)
return expanded
|
def expand_ou(original_account, client, manifest):
expanded = []
exclusions = original_account.get('exclude', {}).get('accounts', [])
ou_exclusions = original_account.get('exclude', {}).get('ous', [])
for ou_exclusion in ou_exclusions:
if ou_exclusion.startswith('/'):
ou_id = client.convert_path_to_ou(ou_exclusion)
else:
ou_id = ou_exclusion
children = client.list_children_nested(ParentId=ou_id, ChildType='ACCOUNT')
for child in children:
logger.info(f"Adding {child.get('Id')} to the exclusion list as it was in the ou {ou_exclusion}")
exclusions.append(child.get('Id'))
response = client.list_children_nested(ParentId=original_account.get('ou'), ChildType='ACCOUNT')
for result in response:
new_account_id = result.get('Id')
if new_account_id in exclusions:
logger.info(f'Skipping {new_account_id} as it is in the exclusion list')
continue
response = client.describe_account(AccountId=new_account_id)
new_account = deepcopy(manifest.get('defaults', {}).get('accounts', {}))
new_account.update(deepcopy(original_account))
ou_from_parent = None
if 'ou' in new_account:
ou_from_parent = new_account['ou']
del new_account['ou']
account_details = response.get('Account')
if account_details.get('Status') == 'ACTIVE':
if account_details.get('Name') is not None:
new_account['name'] = account_details.get('Name')
new_account['email'] = account_details.get('Email')
if ou_from_parent is not None:
new_account['expanded_from'] = ou_from_parent
new_account['account_id'] = new_account_id
new_account['organization'] = account_details.get('Arn').split(':')[5].split('/')[1]
new_account = new_account
else:
logger.info(f'Skipping account as it is not ACTIVE: {json.dumps(account_details, default=str)}')
new_account = None
if new_account:
expanded.append(new_account)
return expanded
|
aws-service-catalog-puppet
|
positive
|
def _log_large_files(self):
<DeepExtract>
large_files = {f.path: f.size for f in self._get_git_files() if f.size > get_config().get('locker.large_file_threshold', LF_DEFAULT)}
</DeepExtract>
if large_files:
msg = ['LARGE FILES (Hosting service may reject due to size):\n']
for (fpath, size) in large_files.items():
formatted_size = f'{size / MB:.1f} MB'
if formatted_size == '0.0 MB':
formatted_size = f'{str(size)} Bytes'
msg.append(f' {fpath} is {formatted_size}')
self.logger.info('\n'.join(msg) + '\n')
|
def _log_large_files(self):
large_files = {f.path: f.size for f in self._get_git_files() if f.size > get_config().get('locker.large_file_threshold', LF_DEFAULT)}
if large_files:
msg = ['LARGE FILES (Hosting service may reject due to size):\n']
for (fpath, size) in large_files.items():
formatted_size = f'{size / MB:.1f} MB'
if formatted_size == '0.0 MB':
formatted_size = f'{str(size)} Bytes'
msg.append(f' {fpath} is {formatted_size}')
self.logger.info('\n'.join(msg) + '\n')
|
auditree-framework
|
positive
|
def correct_ips(connection, ip_list, module, eni_id):
<DeepExtract>
try:
eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])
if eni_result['NetworkInterfaces']:
eni = eni_result['NetworkInterfaces'][0]
else:
eni = None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, 'Failed to describe eni with id: {0}'.format(eni_id))
</DeepExtract>
private_addresses = set()
if 'PrivateIpAddresses' in eni:
for ip in eni['PrivateIpAddresses']:
private_addresses.add(ip['PrivateIpAddress'])
ip_set = set(ip_list)
return ip_set.issubset(private_addresses)
|
def correct_ips(connection, ip_list, module, eni_id):
try:
eni_result = connection.describe_network_interfaces(aws_retry=True, NetworkInterfaceIds=[eni_id])
if eni_result['NetworkInterfaces']:
eni = eni_result['NetworkInterfaces'][0]
else:
eni = None
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, 'Failed to describe eni with id: {0}'.format(eni_id))
private_addresses = set()
if 'PrivateIpAddresses' in eni:
for ip in eni['PrivateIpAddresses']:
private_addresses.add(ip['PrivateIpAddress'])
ip_set = set(ip_list)
return ip_set.issubset(private_addresses)
|
amazon.aws
|
positive
|
def history_form_view(self, request, object_id, version_id, extra_context=None):
request.current_app = self.admin_site.name
original_opts = self.model._meta
model = getattr(self.model, self.model._meta.simple_history_manager_attribute).model
obj = get_object_or_404(model, **{original_opts.pk.attname: object_id, 'history_id': version_id}).instance
obj._state.adding = False
if not self.has_change_permission(request, obj):
raise PermissionDenied
if SIMPLE_HISTORY_EDIT:
change_history = True
else:
change_history = False
if '_change_history' in request.POST and SIMPLE_HISTORY_EDIT:
history = utils.get_history_manager_for_model(obj)
obj = history.get(pk=version_id).instance
formsets = []
form_class = self.get_form(request, obj)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
new_object = self.save_form(request, form, change=True)
<DeepExtract>
new_object._history_user = request.user
super().save_model(request, new_object, form, True)
</DeepExtract>
form.save_m2m()
self.log_change(request, new_object, self.construct_change_message(request, form, formsets))
return self.response_change(request, new_object)
else:
form = form_class(instance=obj)
admin_form = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.prepopulated_fields, self.get_readonly_fields(request, obj), model_admin=self)
model_name = original_opts.model_name
url_triplet = (self.admin_site.name, original_opts.app_label, model_name)
context = {'title': self.history_form_view_title(obj), 'adminform': admin_form, 'object_id': object_id, 'original': obj, 'is_popup': False, 'media': mark_safe(self.media + admin_form.media), 'errors': helpers.AdminErrorList(form, formsets), 'app_label': original_opts.app_label, 'original_opts': original_opts, 'changelist_url': reverse('%s:%s_%s_changelist' % url_triplet), 'change_url': reverse('%s:%s_%s_change' % url_triplet, args=(obj.pk,)), 'history_url': reverse('%s:%s_%s_history' % url_triplet, args=(obj.pk,)), 'change_history': change_history, 'revert_disabled': self.revert_disabled, 'add': False, 'change': True, 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_file_field': True, 'has_absolute_url': False, 'form_url': '', 'opts': model._meta, 'content_type_id': self.content_type_model_cls.objects.get_for_model(self.model).id, 'save_as': self.save_as, 'save_on_top': self.save_on_top, 'root_path': getattr(self.admin_site, 'root_path', None)}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
extra_kwargs = {}
return self.render_history_view(request, self.object_history_form_template, context, **extra_kwargs)
|
def history_form_view(self, request, object_id, version_id, extra_context=None):
request.current_app = self.admin_site.name
original_opts = self.model._meta
model = getattr(self.model, self.model._meta.simple_history_manager_attribute).model
obj = get_object_or_404(model, **{original_opts.pk.attname: object_id, 'history_id': version_id}).instance
obj._state.adding = False
if not self.has_change_permission(request, obj):
raise PermissionDenied
if SIMPLE_HISTORY_EDIT:
change_history = True
else:
change_history = False
if '_change_history' in request.POST and SIMPLE_HISTORY_EDIT:
history = utils.get_history_manager_for_model(obj)
obj = history.get(pk=version_id).instance
formsets = []
form_class = self.get_form(request, obj)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
new_object = self.save_form(request, form, change=True)
new_object._history_user = request.user
super().save_model(request, new_object, form, True)
form.save_m2m()
self.log_change(request, new_object, self.construct_change_message(request, form, formsets))
return self.response_change(request, new_object)
else:
form = form_class(instance=obj)
admin_form = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.prepopulated_fields, self.get_readonly_fields(request, obj), model_admin=self)
model_name = original_opts.model_name
url_triplet = (self.admin_site.name, original_opts.app_label, model_name)
context = {'title': self.history_form_view_title(obj), 'adminform': admin_form, 'object_id': object_id, 'original': obj, 'is_popup': False, 'media': mark_safe(self.media + admin_form.media), 'errors': helpers.AdminErrorList(form, formsets), 'app_label': original_opts.app_label, 'original_opts': original_opts, 'changelist_url': reverse('%s:%s_%s_changelist' % url_triplet), 'change_url': reverse('%s:%s_%s_change' % url_triplet, args=(obj.pk,)), 'history_url': reverse('%s:%s_%s_history' % url_triplet, args=(obj.pk,)), 'change_history': change_history, 'revert_disabled': self.revert_disabled, 'add': False, 'change': True, 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_file_field': True, 'has_absolute_url': False, 'form_url': '', 'opts': model._meta, 'content_type_id': self.content_type_model_cls.objects.get_for_model(self.model).id, 'save_as': self.save_as, 'save_on_top': self.save_on_top, 'root_path': getattr(self.admin_site, 'root_path', None)}
context.update(self.admin_site.each_context(request))
context.update(extra_context or {})
extra_kwargs = {}
return self.render_history_view(request, self.object_history_form_template, context, **extra_kwargs)
|
django-simple-history
|
positive
|
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
results['img'] = mmcv.imflip(results['img'], direction=results['flip_direction'])
for key in results.get('bbox_fields', []):
<DeepExtract>
assert results[key].shape[-1] % 4 == 0
flipped = results[key].copy()
if results['flip_direction'] == 'horizontal':
w = results['img_shape'][1]
flipped[..., 0::4] = w - results[key][..., 2::4] - 1
flipped[..., 2::4] = w - results[key][..., 0::4] - 1
elif results['flip_direction'] == 'vertical':
h = results['img_shape'][0]
flipped[..., 1::4] = h - results[key][..., 3::4] - 1
flipped[..., 3::4] = h - results[key][..., 1::4] - 1
else:
raise ValueError('Invalid flipping direction "{}"'.format(results['flip_direction']))
results[key] = flipped
</DeepExtract>
for key in results.get('mask_fields', []):
masks = [mmcv.imflip(mask, direction=results['flip_direction']) for mask in results[key]]
if masks:
results[key] = np.stack(masks)
else:
results[key] = np.empty((0,) + results['img_shape'], dtype=np.uint8)
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(results[key], direction=results['flip_direction'])
return results
|
def __call__(self, results):
if 'flip' not in results:
flip = True if np.random.rand() < self.flip_ratio else False
results['flip'] = flip
if 'flip_direction' not in results:
results['flip_direction'] = self.direction
if results['flip']:
results['img'] = mmcv.imflip(results['img'], direction=results['flip_direction'])
for key in results.get('bbox_fields', []):
assert results[key].shape[-1] % 4 == 0
flipped = results[key].copy()
if results['flip_direction'] == 'horizontal':
w = results['img_shape'][1]
flipped[..., 0::4] = w - results[key][..., 2::4] - 1
flipped[..., 2::4] = w - results[key][..., 0::4] - 1
elif results['flip_direction'] == 'vertical':
h = results['img_shape'][0]
flipped[..., 1::4] = h - results[key][..., 3::4] - 1
flipped[..., 3::4] = h - results[key][..., 1::4] - 1
else:
raise ValueError('Invalid flipping direction "{}"'.format(results['flip_direction']))
results[key] = flipped
for key in results.get('mask_fields', []):
masks = [mmcv.imflip(mask, direction=results['flip_direction']) for mask in results[key]]
if masks:
results[key] = np.stack(masks)
else:
results[key] = np.empty((0,) + results['img_shape'], dtype=np.uint8)
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(results[key], direction=results['flip_direction'])
return results
|
DetectoRS
|
positive
|
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True)
qry = qry.with_for_update().all()
<DeepExtract>
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query([dag_stat.dag_id for dag_stat in qry]).filter([dag_stat.dag_id for dag_stat in qry].key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
</DeepExtract>
if len(ids) == 0:
session.commit()
return
<DeepExtract>
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query(itertools.product(ids, State.dag_states)).filter(itertools.product(ids, State.dag_states).key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
</DeepExtract>
qry = session.query(DagRun.dag_id, DagRun.state, func.count('*')).filter(DagRun.dag_id.in_(ids)).group_by(DagRun.dag_id, DagRun.state)
counts = {(dag_id, state): count for (dag_id, state, count) in qry}
for (dag_id, state) in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[dag_id, state]
session.merge(DagStat(dag_id=dag_id, state=state, count=count, dirty=False))
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning('Could not update dag stat table')
log.exception(e)
|
@staticmethod
@provide_session
def update(dag_ids=None, dirty_only=True, session=None):
"""
Updates the stats for dirty/out-of-sync dags
:param dag_ids: dag_ids to be updated
:type dag_ids: list
:param dirty_only: only updated for marked dirty, defaults to True
:type dirty_only: bool
:param session: db session to use
:type session: Session
"""
try:
qry = session.query(DagStat)
if dag_ids:
qry = qry.filter(DagStat.dag_id.in_(set(dag_ids)))
if dirty_only:
qry = qry.filter(DagStat.dirty == True)
qry = qry.with_for_update().all()
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query([dag_stat.dag_id for dag_stat in qry]).filter([dag_stat.dag_id for dag_stat in qry].key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
if len(ids) == 0:
session.commit()
return
if serialize_json:
stored_value = json.dumps(value)
else:
stored_value = value
session.query(itertools.product(ids, State.dag_states)).filter(itertools.product(ids, State.dag_states).key == key).delete()
session.add(Variable(key=key, val=stored_value))
session.flush()
qry = session.query(DagRun.dag_id, DagRun.state, func.count('*')).filter(DagRun.dag_id.in_(ids)).group_by(DagRun.dag_id, DagRun.state)
counts = {(dag_id, state): count for (dag_id, state, count) in qry}
for (dag_id, state) in dagstat_states:
count = 0
if (dag_id, state) in counts:
count = counts[dag_id, state]
session.merge(DagStat(dag_id=dag_id, state=state, count=count, dirty=False))
session.commit()
except Exception as e:
session.rollback()
log = LoggingMixin().log
log.warning('Could not update dag stat table')
log.exception(e)
|
docker-airflow
|
positive
|
def test(python=PYTHON):
"""Run tests on a single version of Python."""
<DeepExtract>
clean()
local('LIBRARY_PATH={library_path} CPATH={include_path} {python} setup.py build_ext --inplace'.format(library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python))
</DeepExtract>
<DeepExtract>
unittest = '{python} -m unittest'.format(python=python)
local('{unittest} discover -v .'.format(unittest=unittest))
</DeepExtract>
|
def test(python=PYTHON):
"""Run tests on a single version of Python."""
clean()
local('LIBRARY_PATH={library_path} CPATH={include_path} {python} setup.py build_ext --inplace'.format(library_path=LIBRARY_PATH, include_path=INCLUDE_PATH, python=python))
unittest = '{python} -m unittest'.format(python=python)
local('{unittest} discover -v .'.format(unittest=unittest))
</DeepExtract>
|
bigfloat
|
positive
|
def download_to_stream(url, stream, silent=False, headers=None):
""" Download url and writes response to stream.
"""
source = None
try:
try:
<DeepExtract>
logger.info('Request to: %s' % url)
source = requests.get(url, headers=headers)
status_error = 'Request on %s failed (status=%s)' % (url, source.status_code)
assert source.status_code == 200, status_error
content_error = 'Request on %s returned empty content' % url
assert len(source.content) > 0, content_error
source = source
</DeepExtract>
except requests.exceptions.ConnectionError:
time.sleep(1)
<DeepExtract>
logger.info('Request to: %s' % url)
source = requests.get(url, headers=headers)
status_error = 'Request on %s failed (status=%s)' % (url, source.status_code)
assert source.status_code == 200, status_error
content_error = 'Request on %s returned empty content' % url
assert len(source.content) > 0, content_error
source = source
</DeepExtract>
except (AssertionError, requests.exceptions.RequestException) as e:
logger.exception(e)
logger.info('Headers sent: %s' % headers)
if hasattr(source, 'text'):
logger.info('Response: %s' % source.text[:150])
if not silent:
raise
if source is None:
return source
try:
stream.write(source.content)
stream.flush()
except IOError as e:
logger.exception(e)
if not silent:
raise
if isinstance(stream, HttpResponse):
stream.status_code = source.status_code
for (header, value) in source.headers.items():
stream[header] = value
return source
|
def download_to_stream(url, stream, silent=False, headers=None):
""" Download url and writes response to stream.
"""
source = None
try:
try:
logger.info('Request to: %s' % url)
source = requests.get(url, headers=headers)
status_error = 'Request on %s failed (status=%s)' % (url, source.status_code)
assert source.status_code == 200, status_error
content_error = 'Request on %s returned empty content' % url
assert len(source.content) > 0, content_error
source = source
except requests.exceptions.ConnectionError:
time.sleep(1)
logger.info('Request to: %s' % url)
source = requests.get(url, headers=headers)
status_error = 'Request on %s failed (status=%s)' % (url, source.status_code)
assert source.status_code == 200, status_error
content_error = 'Request on %s returned empty content' % url
assert len(source.content) > 0, content_error
source = source
except (AssertionError, requests.exceptions.RequestException) as e:
logger.exception(e)
logger.info('Headers sent: %s' % headers)
if hasattr(source, 'text'):
logger.info('Response: %s' % source.text[:150])
if not silent:
raise
if source is None:
return source
try:
stream.write(source.content)
stream.flush()
except IOError as e:
logger.exception(e)
if not silent:
raise
if isinstance(stream, HttpResponse):
stream.status_code = source.status_code
for (header, value) in source.headers.items():
stream[header] = value
return source
|
django-mapentity
|
positive
|
def convert_object_to_pattern(obj, obs_id):
related_objects = obj.related_objects
prop = obj.properties
expression = None
is_custom_object = False
if prop:
if isinstance(prop, Address):
<DeepExtract>
if prop.address_value is None:
if prop.object_reference is None:
expression = None
else:
expression = handle_object_reference_for_pattern(prop.object_reference)
cond = prop.address_value.condition
if prop.category == prop.CAT_IPV4:
expression = create_term('ipv4-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_IPV6:
expression = create_term('ipv6-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_MAC:
expression = create_term('mac-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_EMAIL:
expression = create_term('email-addr:value', cond, make_constant(prop.address_value.value))
else:
warn('The address type %s is not part of Cybox 3.0', 421, prop.category)
</DeepExtract>
elif isinstance(prop, Artifact):
<DeepExtract>
expressions = []
if prop.content_type:
expressions.append(create_term('artifact:mime_type', prop.content_type.condition, prop.content_type))
if prop.raw_artifact:
expressions.append(create_term('artifact:payload_bin', prop.raw_artifact.condition, prop.raw_artifact.value))
if prop.raw_artifact_reference:
expressions.append(create_term('artifact:url', prop.raw_artifact_reference.condition, prop.raw_artifact_reference.value))
if prop.hashes:
expressions.append(convert_hashes_to_pattern(prop.hashes))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, AutonomousSystem):
<DeepExtract>
expressions = []
if prop.number:
expressions.append(add_comparison_expression(prop.number, 'autonomous-system:number'))
if prop.name:
expressions.append(add_comparison_expression(prop.name, 'autonomous-system:name'))
if prop.regional_internet_registry:
expressions.append(add_comparison_expression(prop.regional_internet_registry, 'autonomous-system:rir'))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, URI):
<DeepExtract>
expression = create_term('url:value', prop.value.condition, make_constant(prop.value.value))
</DeepExtract>
elif isinstance(prop, EmailMessage):
<DeepExtract>
expressions = []
if prop.header is not None:
expressions.append(convert_email_header_to_pattern(prop.header, _EMAIL_HEADER_PROPERTIES))
message_id_term = handle_message_id_property(prop.header)
if message_id_term:
expressions.append(message_id_term)
add_headers = convert_email_header_to_pattern(prop.header, _EMAIL_ADDITIONAL_HEADERS_PROPERTIES)
if add_headers:
expressions.append(add_headers)
if prop.attachments is not None:
for attachment in prop.attachments:
new_pattern = convert_attachment_to_ref(attachment)
if isinstance(new_pattern, IdrefPlaceHolder):
expressions.append(ComparisonExpressionForElevator('=', 'email-message:body_multipart[*].body_raw_ref', new_pattern))
else:
expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('email-message:body_multipart[*].body_raw_ref')))
if prop.raw_body is not None:
if not prop.raw_body.value:
warn('%s contains no value', 621, 'Email raw body')
else:
warn('Email raw body not handled yet', 806)
if prop.links is not None:
if get_option_value('spec_version') == '2.1':
lhs = generate_lhs_for_missing_property('email-message:', None, 'link_refs[*].value', 'email-message')
if lhs:
for link in prop.links:
if id_in_observable_mappings(link.object_reference):
referenced_obs = get_obs_from_mapping(link.object_reference)
exp = convert_observable_to_pattern(referenced_obs)
rhs = exp.rhs
else:
rhs = IdrefPlaceHolder(link.object_reference)
expressions.append(ComparisonExpressionForElevator('=', lhs, rhs))
else:
warn('Email links not handled yet', 806)
else:
warn('Observed Data objects cannot refer to other external objects (in STIX 2.0): %s in %s', 434, 'links', 'email-message')
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, File):
<DeepExtract>
expressions = []
if prop.hashes is not None:
hash_expression = convert_hashes_to_pattern(prop.hashes)
if hash_expression:
expressions.append(hash_expression)
file_name_and_path_expression = convert_file_name_and_path_to_pattern(prop)
if file_name_and_path_expression:
expressions.append(file_name_and_path_expression)
properties_expressions = []
for prop_spec in select_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
properties_expressions.append(term)
if properties_expressions:
expressions.extend(properties_expressions)
if isinstance(prop, WinExecutableFile):
windows_executable_file_expression = convert_windows_executable_file_to_pattern(prop)
if windows_executable_file_expression:
expressions.append(windows_executable_file_expression)
else:
warn('No WinExecutableFile properties found in %s', 613, str(prop))
if isinstance(prop, ArchiveFile):
archive_file_expressions = convert_archive_file_to_pattern(prop)
if archive_file_expressions:
expressions.append(archive_file_expressions)
else:
warn('No ArchiveFile properties found in %s', 613, str(prop))
if isinstance(prop, ImageFile):
image_file_expressions = convert_image_file_to_pattern(prop)
if image_file_expressions:
expressions.append(image_file_expressions)
else:
warn('No ImageFile properties found in %s', 613, str(prop))
if isinstance(prop, PDFFile):
pdf_file_expressions = convert_pdf_file_to_pattern(prop)
if pdf_file_expressions:
expressions.append(pdf_file_expressions)
else:
warn('No PDFFile properties found in %s', 613, str(prop))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, WinRegistryKey):
<DeepExtract>
expressions = []
if prop.key or prop.hive:
key_value_term = ''
if prop.hive:
if prop.hive.condition is None or is_equal_condition(prop.hive.condition):
key_value_term += prop.hive.value + '\\'
else:
warn('Condition %s on a hive property not handled', 812, prop.hive.condition)
if prop.key and prop.key.value.startswith(prop.hive.value):
warn('Hive property, %s, is already a prefix of the key property, %s', 623, prop.hive.value, prop.key.value)
key_value_term = prop.key.value
elif prop.key:
key_value_term += prop.key.value
else:
key_value_term = prop.key.value
expressions.append(create_term('windows-registry-key:key', prop.key.condition if prop.key else 'Equals', make_constant(key_value_term)))
if prop.values:
values_expressions = []
for v in prop.values:
value_expressions = []
for prop_spec in _REGISTRY_KEY_VALUES_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(v, prop_1x) and getattr(v, prop_1x):
term = add_comparison_expression(getattr(v, prop_1x), object_path)
if term:
value_expressions.append(term)
if value_expressions:
values_expressions.append(create_boolean_expression('OR', value_expressions))
expressions.extend(values_expressions)
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, Process):
<DeepExtract>
expressions = []
for prop_spec in select_process_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
expressions.append(term)
if prop.image_info:
process_info = convert_image_info_to_pattern(prop.image_info)
if process_info:
expressions.append(process_info)
if hasattr(prop, 'argument_list') and prop.argument_list:
argument_expressions = []
if get_option_value('spec_version') == '2.0':
for a in prop.argument_list:
argument_expressions.append(create_term('process:arguments[*]', a.condition, stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression('AND', argument_expressions))
else:
warn('The argument_list property of ProcessObj is not part of STIX 2.1', 418)
lhs = generate_lhs_for_missing_property('process:', None, 'argument_list[*]', 'process')
if lhs:
for a in prop.argument_list:
argument_expressions.append(create_term(lhs, a.condition, stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression('AND', argument_expressions))
elif not check_for_missing_policy('ignore'):
expressions.append(UnconvertedTerm('ProcessObj.argument_list', 'process'))
if hasattr(prop, 'environment_variable_list') and prop.environment_variable_list:
ev_expressions = []
for ev in prop.environment_variable_list:
ev_expressions.append(create_term('process:environment_variables[*].' + str(ev.name), ev.value.condition, stix2.StringConstant(str(ev.value))))
if ev_expressions:
expressions.append(create_boolean_expression('AND', ev_expressions))
if hasattr(prop, 'child_pid_list') and prop.child_pid_list:
child_pids_expressions = []
for cp in prop.child_pid_list:
child_pids_expressions.append(create_term('process:child_refs[*].pid', cp.condition, stix2.IntegerConstant(cp.value)))
if child_pids_expressions:
expressions.append(create_boolean_expression('AND', child_pids_expressions))
if hasattr(prop, 'network_connection_list') and prop.network_connection_list:
network_connection_expressions = []
for nc in prop.network_connection_list:
new_pattern = convert_network_connection_to_pattern(nc)
network_connection_expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('process:opened_connection_refs[*]')))
if network_connection_expressions:
expressions.append(create_boolean_expression('AND', network_connection_expressions))
if isinstance(prop, WinProcess):
win_process_expression = convert_windows_process_to_pattern(prop)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn('No WinProcess properties found in %s', 615, str(prop))
if isinstance(prop, WinService):
service_expression = convert_windows_service_to_pattern(prop)
if service_expression:
expressions.append(service_expression)
else:
warn('No WinService properties found in %s', 616, str(prop))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, Product):
<DeepExtract>
expressions = []
if prop.product:
expressions.append(add_comparison_expression(prop.product, 'software:name'))
if prop.vendor:
expressions.append(add_comparison_expression(prop.vendor, 'software:vendor'))
if prop.version:
expressions.append(add_comparison_expression(prop.version, 'software:version'))
if prop.language:
expressions.append(add_comparison_expression(prop.language, 'software:languages[*]'))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, DomainName):
<DeepExtract>
pattern = [create_term('domain-name:value', prop.value.condition, make_constant(prop.value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == 'Resolved_To':
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator('=', 'domain-name:resolves_to_refs[*]', new_pattern))
else:
pattern.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('domain-name:resolves_to_refs[*]')))
else:
warn('The %s relationship involving %s is not explicitly supported in STIX 2.x', 427, ro.relationship, identifying_info(ro))
expression = create_boolean_expression('AND', pattern)
</DeepExtract>
elif isinstance(prop, Hostname):
<DeepExtract>
pattern = [create_term('domain-name:value', prop.hostname_value.condition, make_constant(prop.hostname_value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == 'Resolved_To':
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator('=', 'domain-name:resolves_to_refs[*]', new_pattern))
else:
pattern.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('domain-name:resolves_to_refs[*]')))
else:
warn('The %s relationship involving %s is not explicitly supported in STIX 2.x', 427, ro.relationship, identifying_info(ro))
expression = create_boolean_expression('AND', pattern)
</DeepExtract>
elif isinstance(prop, Mutex):
<DeepExtract>
if prop.name:
expression = create_term('mutex:name', prop.name.condition, make_constant(prop.name.value))
else:
expression = None
</DeepExtract>
elif isinstance(prop, NetworkConnection):
<DeepExtract>
expressions = []
if prop.layer3_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer3_protocol.condition, make_constant(prop.layer3_protocol.value.lower())))
if prop.layer4_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer4_protocol.condition, make_constant(prop.layer4_protocol.value.lower())))
if prop.layer7_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer7_protocol.condition, make_constant(prop.layer7_protocol.value.lower())))
if prop.source_socket_address is not None:
expressions.extend(convert_socket_address_to_pattern(prop.source_socket_address, 'src'))
if prop.destination_socket_address is not None:
expressions.extend(convert_socket_address_to_pattern(prop.destination_socket_address, 'dst'))
if prop.layer7_connections is not None:
if prop.layer7_connections.http_session is not None:
extension_expressions = convert_http_session_to_pattern(prop.layer7_connections.http_session)
if extension_expressions:
expressions.append(extension_expressions)
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, Account):
<DeepExtract>
expressions = []
if hasattr(prop, 'disabled') and prop.disabled:
expressions.append(create_term('user-account:is_disabled', 'Equals', stix2.BooleanConstant(prop.disabled)))
for prop_spec in _ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
expressions.append(term)
if prop.authentication and get_option_value('spec_version') == '2.1':
if prop.authentication.authentication_data:
expressions.append(create_term('user-account:credential', 'Equals', stix2.StringConstant(prop.authentication.authentication_data)))
if isinstance(prop, UnixUserAccount):
win_process_expression = convert_unix_user_to_pattern(prop)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn('No UnixUserAccount properties found in %s', 615, str(prop))
elif isinstance(prop, WinComputerAccount):
expressions.append(create_term('user-account:account_type', 'Equals', stix2.StringConstant('windows-domain' if prop.domain else 'windows-local')))
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, Port):
<DeepExtract>
expressions = []
if prop.port_value:
warn('port number is assumed to be a destination port', 725)
expressions.append(create_term('network-traffic:dst_port', prop.port_value.condition, make_constant(prop.port_value.value)))
if prop.layer4_protocol:
if prop.layer4_protocol.value is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer4_protocol.condition, make_constant(prop.layer4_protocol.value)))
if len(expressions) > 1:
expression = create_boolean_expression('AND', expressions)
else:
expression = expressions[0]
</DeepExtract>
elif isinstance(prop, HTTPSession):
<DeepExtract>
if prop.http_request_response:
(requests, responses) = split_into_requests_and_responses(prop.http_request_response)
if len(responses) != 0:
warn('HTTPServerResponse type is not supported in STIX 2.x', 429)
if len(requests) >= 1:
expression = convert_http_client_request_to_pattern(requests[0])
if len(requests) > 1:
warn('Only HTTP_Request_Response used for http-request-ext, using first value', 512)
expression = expression
</DeepExtract>
elif isinstance(prop, NetworkPacket):
<DeepExtract>
if prop.internet_layer:
internet_layer = prop.internet_layer
if internet_layer.ipv4 or internet_layer.ipv6:
warn('Internet_Layer/IP_Packet content not supported in STIX 2.x', 424)
else:
if internet_layer.icmpv4:
icmp_header = internet_layer.icmpv4.icmpv4_header
elif internet_layer.icmpv6:
icmp_header = internet_layer.icmpv6.icmpv6_header
else:
expression = None
expressions = []
if icmp_header.type_:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_hex", icmp_header.type_.condition, stix2.HexConstant(str(icmp_header.type_))))
if icmp_header.code:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_code", icmp_header.code.condition, stix2.HexConstant(str(icmp_header.code))))
handle_missing_properties_in_expression_for_icmp_header(expressions, icmp_header)
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, NetworkSocket):
<DeepExtract>
expressions = []
for prop_spec in _SOCKET_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
value = getattr(prop, prop_1x)
if isinstance(value, bool):
value = 1 if value else 0
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
if prop.address_family:
if prop.address_family in ADDRESS_FAMILY_ENUMERATION:
expressions.append(add_comparison_expression(prop.address_family, "network-traffic:extensions.'socket-ext'.address_family"))
else:
warn('%s in is not a member of the %s enumeration', 627, prop.address_family, 'address family')
if prop.options:
expressions.append(convert_socket_options_to_pattern(prop.options))
if prop.protocol:
expressions.append(add_comparison_expression(prop.protocol, 'network-traffic:protocols[*]'))
handle_missing_properties_in_expression_for_network_socket(expressions, prop)
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, X509Certificate):
<DeepExtract>
expressions = []
if prop.certificate:
cert = prop.certificate
for prop_spec in _X509_PROPERTY_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(cert, prop_1x) and getattr(cert, prop_1x):
term = add_comparison_expression(getattr(cert, prop_1x), object_path)
if term:
expressions.append(term)
if cert.validity:
if cert.validity.not_before:
expressions.append(add_comparison_expression(cert.validity.not_before, 'x509-certificate:validity_not_before'))
if cert.validity.not_after:
expressions.append(add_comparison_expression(cert.validity.not_after, 'x509-certificate:validity_not_after'))
if cert.subject_public_key:
if cert.subject_public_key.public_key_algorithm:
add_comparison_expression(cert.subject_public_key.public_key_algorithm, 'x509-certificate:subject_public_key_algorithm')
if cert.subject_public_key.rsa_public_key:
rsa_key = cert.subject_public_key.rsa_public_key
if rsa_key.modulus:
add_comparison_expression(rsa_key.modulus, 'x509-certificate:subject_public_key_modulus')
if rsa_key.exponent:
add_comparison_expression(rsa_key.exponent, 'x509-certificate:subject_public_key_exponent')
if cert.standard_extensions:
v3_expressions = convert_v3_extension_to_pattern(cert.standard_extensions)
if v3_expressions:
expressions.append(v3_expressions)
if expressions:
expression = create_boolean_expression('AND', expressions)
</DeepExtract>
elif isinstance(prop, SocketAddress):
<DeepExtract>
expressions = list()
if prop.port is not None:
if prop.port.port_value is not None:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_port', prop.port.port_value.condition, stix2.IntegerConstant(int(prop.port.port_value))))
if prop.port.layer4_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.port.layer4_protocol.condition, make_constant(prop.port.layer4_protocol.value.lower())))
if prop.ip_address is not None:
if prop.ip_address.address_value:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.ip_address.address_value.condition, make_constant(prop.ip_address.address_value.value)))
elif prop.ip_address.object_reference:
new_pattern = handle_object_reference_for_pattern(prop.ip_address.object_reference)
if isinstance(new_pattern, IdrefPlaceHolder):
expressions.append(new_pattern)
else:
expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref')))
if prop.hostname is not None:
if prop.ip_address is not None:
(warn('Only one of the properties: Hostname and IP_Address is allowed. Dropping Hostname %s', 645, prop.hostname.hostname_value),)
elif prop.hostname.is_domain_name and prop.hostname.hostname_value is not None:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.hostname.hostname_value.condition, make_constant(prop.hostname.hostname_value.value)))
elif prop.hostname.naming_system is not None and any((x.value == 'DNS' for x in prop.hostname.naming_system)):
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.hostname.hostname_value.condition, make_constant(prop.hostname.hostname_value.value)))
expression = expressions
</DeepExtract>
if expression:
<DeepExtract>
if len(expression) == 1:
expression = expression[0]
elif len(expression) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in expression:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
</DeepExtract>
elif isinstance(prop, Custom):
is_custom_object = True
if check_for_missing_policy('use-custom-properties') or check_for_missing_policy('use-extensions'):
if prop.custom_name:
if check_for_missing_policy('use-custom-properties'):
object_path_root = convert_to_custom_name(prop.custom_name, separator='-')
else:
if re.search('[A-Z]', prop.custom_name):
warn('Custom name %s has been converted to all lower case', 727, prop.custom_name)
object_path_root = prop.custom_name.lower()
<DeepExtract>
expressions = []
for cp in prop.custom_properties.property_:
if not re.match('[a-z0-9_]+', cp.name):
warn('The custom property name %s does not adhere to the specification rules', 617, cp.name)
if ' ' in cp.name:
info('The custom property name %s contains whitespace, replacing it with underscores', 624, cp.name)
custom_name = cp.name.replace(' ', '_')
if False:
custom_name = convert_to_custom_name(cp.name.replace(' ', '_'))
expressions.append(create_term(object_path_root + ':' + custom_name, cp.condition, make_constant(cp.value)))
term = create_boolean_expression('AND', expressions)
</DeepExtract>
if expression:
<DeepExtract>
if len([expression, term]) == 1:
expression = [expression, term][0]
elif len([expression, term]) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in [expression, term]:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
</DeepExtract>
else:
expression = term
else:
warn('Custom object with no name cannot be handled yet', 811)
if not check_for_missing_policy('ignore'):
expression = UnconvertedTerm(obs_id)
else:
warn('Pattern expression with STIX 1.x custom objects in %s is ignored', 817, obs_id)
else:
warn('%s found in %s cannot be converted to a pattern, yet.', 808, str(obj.properties), obs_id)
if not check_for_missing_policy('ignore'):
expression = UnconvertedTerm(obs_id)
if prop.custom_properties is not None and (not is_custom_object):
if check_for_missing_policy('use-custom-properties') or check_for_missing_policy('use-extensions'):
<DeepExtract>
class_name = prop.__class__.__name__
if class_name in _CLASS_NAME_MAPPING:
object_type_name = _CLASS_NAME_MAPPING[class_name]
elif class_name == 'Address' and prop.category in _ADDRESS_NAME_MAPPING:
object_type_name = _ADDRESS_NAME_MAPPING[class_name]
else:
error('Cannot convert CybOX 2.x class name %s to an object_path_root_name', 813, class_name)
object_type_name = None
</DeepExtract>
if check_for_missing_policy('use-custom-properties'):
object_path_root = object_type_name
else:
extension_definition_id = get_extension_definition_id(object_type_name)
if extension_definition_id:
object_path_root = object_type_name + 'extensions.' + extension_definition_id
warn('Used %s for extension property for %s', 317, object_path_root, object_type_name)
else:
object_path_root = None
warn('No extension-definition was found for STIX 1 type %s', 312, object_type_name)
if object_path_root:
<DeepExtract>
expressions = []
for cp in prop.custom_properties.property_:
if not re.match('[a-z0-9_]+', cp.name):
warn('The custom property name %s does not adhere to the specification rules', 617, cp.name)
if ' ' in cp.name:
info('The custom property name %s contains whitespace, replacing it with underscores', 624, cp.name)
custom_name = cp.name.replace(' ', '_')
if use_custom_prefix:
custom_name = convert_to_custom_name(cp.name.replace(' ', '_'))
expressions.append(create_term(object_path_root + ':' + custom_name, cp.condition, make_constant(cp.value)))
term = create_boolean_expression('AND', expressions)
</DeepExtract>
if expression:
<DeepExtract>
if len([expression, term]) == 1:
expression = [expression, term][0]
elif len([expression, term]) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in [expression, term]:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
</DeepExtract>
else:
expression = term
else:
warn('Pattern expression with STIX 1.x custom properties in %s is ignored', 818, obs_id)
if not expression:
warn('No pattern term was created from %s', 422, obs_id)
expression = UnconvertedTerm(obs_id, determine_term_type(prop))
elif obj.id_:
add_id_value(obj.id_, obs_id)
<DeepExtract>
global _PATTERN_CACHE
if expression:
_PATTERN_CACHE[obj.id_] = expression
</DeepExtract>
return expression
|
def convert_object_to_pattern(obj, obs_id):
related_objects = obj.related_objects
prop = obj.properties
expression = None
is_custom_object = False
if prop:
if isinstance(prop, Address):
if prop.address_value is None:
if prop.object_reference is None:
expression = None
else:
expression = handle_object_reference_for_pattern(prop.object_reference)
cond = prop.address_value.condition
if prop.category == prop.CAT_IPV4:
expression = create_term('ipv4-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_IPV6:
expression = create_term('ipv6-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_MAC:
expression = create_term('mac-addr:value', cond, make_constant(prop.address_value.value))
elif prop.category == prop.CAT_EMAIL:
expression = create_term('email-addr:value', cond, make_constant(prop.address_value.value))
else:
warn('The address type %s is not part of Cybox 3.0', 421, prop.category)
elif isinstance(prop, Artifact):
expressions = []
if prop.content_type:
expressions.append(create_term('artifact:mime_type', prop.content_type.condition, prop.content_type))
if prop.raw_artifact:
expressions.append(create_term('artifact:payload_bin', prop.raw_artifact.condition, prop.raw_artifact.value))
if prop.raw_artifact_reference:
expressions.append(create_term('artifact:url', prop.raw_artifact_reference.condition, prop.raw_artifact_reference.value))
if prop.hashes:
expressions.append(convert_hashes_to_pattern(prop.hashes))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, AutonomousSystem):
expressions = []
if prop.number:
expressions.append(add_comparison_expression(prop.number, 'autonomous-system:number'))
if prop.name:
expressions.append(add_comparison_expression(prop.name, 'autonomous-system:name'))
if prop.regional_internet_registry:
expressions.append(add_comparison_expression(prop.regional_internet_registry, 'autonomous-system:rir'))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, URI):
expression = create_term('url:value', prop.value.condition, make_constant(prop.value.value))
elif isinstance(prop, EmailMessage):
expressions = []
if prop.header is not None:
expressions.append(convert_email_header_to_pattern(prop.header, _EMAIL_HEADER_PROPERTIES))
message_id_term = handle_message_id_property(prop.header)
if message_id_term:
expressions.append(message_id_term)
add_headers = convert_email_header_to_pattern(prop.header, _EMAIL_ADDITIONAL_HEADERS_PROPERTIES)
if add_headers:
expressions.append(add_headers)
if prop.attachments is not None:
for attachment in prop.attachments:
new_pattern = convert_attachment_to_ref(attachment)
if isinstance(new_pattern, IdrefPlaceHolder):
expressions.append(ComparisonExpressionForElevator('=', 'email-message:body_multipart[*].body_raw_ref', new_pattern))
else:
expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('email-message:body_multipart[*].body_raw_ref')))
if prop.raw_body is not None:
if not prop.raw_body.value:
warn('%s contains no value', 621, 'Email raw body')
else:
warn('Email raw body not handled yet', 806)
if prop.links is not None:
if get_option_value('spec_version') == '2.1':
lhs = generate_lhs_for_missing_property('email-message:', None, 'link_refs[*].value', 'email-message')
if lhs:
for link in prop.links:
if id_in_observable_mappings(link.object_reference):
referenced_obs = get_obs_from_mapping(link.object_reference)
exp = convert_observable_to_pattern(referenced_obs)
rhs = exp.rhs
else:
rhs = IdrefPlaceHolder(link.object_reference)
expressions.append(ComparisonExpressionForElevator('=', lhs, rhs))
else:
warn('Email links not handled yet', 806)
else:
warn('Observed Data objects cannot refer to other external objects (in STIX 2.0): %s in %s', 434, 'links', 'email-message')
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, File):
expressions = []
if prop.hashes is not None:
hash_expression = convert_hashes_to_pattern(prop.hashes)
if hash_expression:
expressions.append(hash_expression)
file_name_and_path_expression = convert_file_name_and_path_to_pattern(prop)
if file_name_and_path_expression:
expressions.append(file_name_and_path_expression)
properties_expressions = []
for prop_spec in select_file_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
properties_expressions.append(term)
if properties_expressions:
expressions.extend(properties_expressions)
if isinstance(prop, WinExecutableFile):
windows_executable_file_expression = convert_windows_executable_file_to_pattern(prop)
if windows_executable_file_expression:
expressions.append(windows_executable_file_expression)
else:
warn('No WinExecutableFile properties found in %s', 613, str(prop))
if isinstance(prop, ArchiveFile):
archive_file_expressions = convert_archive_file_to_pattern(prop)
if archive_file_expressions:
expressions.append(archive_file_expressions)
else:
warn('No ArchiveFile properties found in %s', 613, str(prop))
if isinstance(prop, ImageFile):
image_file_expressions = convert_image_file_to_pattern(prop)
if image_file_expressions:
expressions.append(image_file_expressions)
else:
warn('No ImageFile properties found in %s', 613, str(prop))
if isinstance(prop, PDFFile):
pdf_file_expressions = convert_pdf_file_to_pattern(prop)
if pdf_file_expressions:
expressions.append(pdf_file_expressions)
else:
warn('No PDFFile properties found in %s', 613, str(prop))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, WinRegistryKey):
expressions = []
if prop.key or prop.hive:
key_value_term = ''
if prop.hive:
if prop.hive.condition is None or is_equal_condition(prop.hive.condition):
key_value_term += prop.hive.value + '\\'
else:
warn('Condition %s on a hive property not handled', 812, prop.hive.condition)
if prop.key and prop.key.value.startswith(prop.hive.value):
warn('Hive property, %s, is already a prefix of the key property, %s', 623, prop.hive.value, prop.key.value)
key_value_term = prop.key.value
elif prop.key:
key_value_term += prop.key.value
else:
key_value_term = prop.key.value
expressions.append(create_term('windows-registry-key:key', prop.key.condition if prop.key else 'Equals', make_constant(key_value_term)))
if prop.values:
values_expressions = []
for v in prop.values:
value_expressions = []
for prop_spec in _REGISTRY_KEY_VALUES_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(v, prop_1x) and getattr(v, prop_1x):
term = add_comparison_expression(getattr(v, prop_1x), object_path)
if term:
value_expressions.append(term)
if value_expressions:
values_expressions.append(create_boolean_expression('OR', value_expressions))
expressions.extend(values_expressions)
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, Process):
expressions = []
for prop_spec in select_process_properties():
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
expressions.append(term)
if prop.image_info:
process_info = convert_image_info_to_pattern(prop.image_info)
if process_info:
expressions.append(process_info)
if hasattr(prop, 'argument_list') and prop.argument_list:
argument_expressions = []
if get_option_value('spec_version') == '2.0':
for a in prop.argument_list:
argument_expressions.append(create_term('process:arguments[*]', a.condition, stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression('AND', argument_expressions))
else:
warn('The argument_list property of ProcessObj is not part of STIX 2.1', 418)
lhs = generate_lhs_for_missing_property('process:', None, 'argument_list[*]', 'process')
if lhs:
for a in prop.argument_list:
argument_expressions.append(create_term(lhs, a.condition, stix2.StringConstant(a.value)))
if argument_expressions:
expressions.append(create_boolean_expression('AND', argument_expressions))
elif not check_for_missing_policy('ignore'):
expressions.append(UnconvertedTerm('ProcessObj.argument_list', 'process'))
if hasattr(prop, 'environment_variable_list') and prop.environment_variable_list:
ev_expressions = []
for ev in prop.environment_variable_list:
ev_expressions.append(create_term('process:environment_variables[*].' + str(ev.name), ev.value.condition, stix2.StringConstant(str(ev.value))))
if ev_expressions:
expressions.append(create_boolean_expression('AND', ev_expressions))
if hasattr(prop, 'child_pid_list') and prop.child_pid_list:
child_pids_expressions = []
for cp in prop.child_pid_list:
child_pids_expressions.append(create_term('process:child_refs[*].pid', cp.condition, stix2.IntegerConstant(cp.value)))
if child_pids_expressions:
expressions.append(create_boolean_expression('AND', child_pids_expressions))
if hasattr(prop, 'network_connection_list') and prop.network_connection_list:
network_connection_expressions = []
for nc in prop.network_connection_list:
new_pattern = convert_network_connection_to_pattern(nc)
network_connection_expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('process:opened_connection_refs[*]')))
if network_connection_expressions:
expressions.append(create_boolean_expression('AND', network_connection_expressions))
if isinstance(prop, WinProcess):
win_process_expression = convert_windows_process_to_pattern(prop)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn('No WinProcess properties found in %s', 615, str(prop))
if isinstance(prop, WinService):
service_expression = convert_windows_service_to_pattern(prop)
if service_expression:
expressions.append(service_expression)
else:
warn('No WinService properties found in %s', 616, str(prop))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, Product):
expressions = []
if prop.product:
expressions.append(add_comparison_expression(prop.product, 'software:name'))
if prop.vendor:
expressions.append(add_comparison_expression(prop.vendor, 'software:vendor'))
if prop.version:
expressions.append(add_comparison_expression(prop.version, 'software:version'))
if prop.language:
expressions.append(add_comparison_expression(prop.language, 'software:languages[*]'))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, DomainName):
pattern = [create_term('domain-name:value', prop.value.condition, make_constant(prop.value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == 'Resolved_To':
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator('=', 'domain-name:resolves_to_refs[*]', new_pattern))
else:
pattern.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('domain-name:resolves_to_refs[*]')))
else:
warn('The %s relationship involving %s is not explicitly supported in STIX 2.x', 427, ro.relationship, identifying_info(ro))
expression = create_boolean_expression('AND', pattern)
elif isinstance(prop, Hostname):
pattern = [create_term('domain-name:value', prop.hostname_value.condition, make_constant(prop.hostname_value.value))]
if related_objects:
for ro in related_objects:
if ro.relationship == 'Resolved_To':
new_pattern = convert_related_object_to_pattern(ro)
if new_pattern:
if isinstance(new_pattern, IdrefPlaceHolder):
pattern.append(ComparisonExpressionForElevator('=', 'domain-name:resolves_to_refs[*]', new_pattern))
else:
pattern.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('domain-name:resolves_to_refs[*]')))
else:
warn('The %s relationship involving %s is not explicitly supported in STIX 2.x', 427, ro.relationship, identifying_info(ro))
expression = create_boolean_expression('AND', pattern)
elif isinstance(prop, Mutex):
if prop.name:
expression = create_term('mutex:name', prop.name.condition, make_constant(prop.name.value))
else:
expression = None
elif isinstance(prop, NetworkConnection):
expressions = []
if prop.layer3_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer3_protocol.condition, make_constant(prop.layer3_protocol.value.lower())))
if prop.layer4_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer4_protocol.condition, make_constant(prop.layer4_protocol.value.lower())))
if prop.layer7_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer7_protocol.condition, make_constant(prop.layer7_protocol.value.lower())))
if prop.source_socket_address is not None:
expressions.extend(convert_socket_address_to_pattern(prop.source_socket_address, 'src'))
if prop.destination_socket_address is not None:
expressions.extend(convert_socket_address_to_pattern(prop.destination_socket_address, 'dst'))
if prop.layer7_connections is not None:
if prop.layer7_connections.http_session is not None:
extension_expressions = convert_http_session_to_pattern(prop.layer7_connections.http_session)
if extension_expressions:
expressions.append(extension_expressions)
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, Account):
expressions = []
if hasattr(prop, 'disabled') and prop.disabled:
expressions.append(create_term('user-account:is_disabled', 'Equals', stix2.BooleanConstant(prop.disabled)))
for prop_spec in _ACCOUNT_PROPERTIES:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
term = add_comparison_expression(getattr(prop, prop_1x), object_path)
if term:
expressions.append(term)
if prop.authentication and get_option_value('spec_version') == '2.1':
if prop.authentication.authentication_data:
expressions.append(create_term('user-account:credential', 'Equals', stix2.StringConstant(prop.authentication.authentication_data)))
if isinstance(prop, UnixUserAccount):
win_process_expression = convert_unix_user_to_pattern(prop)
if win_process_expression:
expressions.append(win_process_expression)
else:
warn('No UnixUserAccount properties found in %s', 615, str(prop))
elif isinstance(prop, WinComputerAccount):
expressions.append(create_term('user-account:account_type', 'Equals', stix2.StringConstant('windows-domain' if prop.domain else 'windows-local')))
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, Port):
expressions = []
if prop.port_value:
warn('port number is assumed to be a destination port', 725)
expressions.append(create_term('network-traffic:dst_port', prop.port_value.condition, make_constant(prop.port_value.value)))
if prop.layer4_protocol:
if prop.layer4_protocol.value is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.layer4_protocol.condition, make_constant(prop.layer4_protocol.value)))
if len(expressions) > 1:
expression = create_boolean_expression('AND', expressions)
else:
expression = expressions[0]
elif isinstance(prop, HTTPSession):
if prop.http_request_response:
(requests, responses) = split_into_requests_and_responses(prop.http_request_response)
if len(responses) != 0:
warn('HTTPServerResponse type is not supported in STIX 2.x', 429)
if len(requests) >= 1:
expression = convert_http_client_request_to_pattern(requests[0])
if len(requests) > 1:
warn('Only HTTP_Request_Response used for http-request-ext, using first value', 512)
expression = expression
elif isinstance(prop, NetworkPacket):
if prop.internet_layer:
internet_layer = prop.internet_layer
if internet_layer.ipv4 or internet_layer.ipv6:
warn('Internet_Layer/IP_Packet content not supported in STIX 2.x', 424)
else:
if internet_layer.icmpv4:
icmp_header = internet_layer.icmpv4.icmpv4_header
elif internet_layer.icmpv6:
icmp_header = internet_layer.icmpv6.icmpv6_header
else:
expression = None
expressions = []
if icmp_header.type_:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_hex", icmp_header.type_.condition, stix2.HexConstant(str(icmp_header.type_))))
if icmp_header.code:
expressions.append(create_term("network-traffic:extensions.'icmp-ext'.icmp_type_code", icmp_header.code.condition, stix2.HexConstant(str(icmp_header.code))))
handle_missing_properties_in_expression_for_icmp_header(expressions, icmp_header)
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, NetworkSocket):
expressions = []
for prop_spec in _SOCKET_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(prop, prop_1x) and getattr(prop, prop_1x):
value = getattr(prop, prop_1x)
if isinstance(value, bool):
value = 1 if value else 0
term = add_comparison_expression(value, object_path)
if term:
expressions.append(term)
if prop.address_family:
if prop.address_family in ADDRESS_FAMILY_ENUMERATION:
expressions.append(add_comparison_expression(prop.address_family, "network-traffic:extensions.'socket-ext'.address_family"))
else:
warn('%s in is not a member of the %s enumeration', 627, prop.address_family, 'address family')
if prop.options:
expressions.append(convert_socket_options_to_pattern(prop.options))
if prop.protocol:
expressions.append(add_comparison_expression(prop.protocol, 'network-traffic:protocols[*]'))
handle_missing_properties_in_expression_for_network_socket(expressions, prop)
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, X509Certificate):
expressions = []
if prop.certificate:
cert = prop.certificate
for prop_spec in _X509_PROPERTY_MAP:
prop_1x = prop_spec[0]
object_path = prop_spec[1]
if hasattr(cert, prop_1x) and getattr(cert, prop_1x):
term = add_comparison_expression(getattr(cert, prop_1x), object_path)
if term:
expressions.append(term)
if cert.validity:
if cert.validity.not_before:
expressions.append(add_comparison_expression(cert.validity.not_before, 'x509-certificate:validity_not_before'))
if cert.validity.not_after:
expressions.append(add_comparison_expression(cert.validity.not_after, 'x509-certificate:validity_not_after'))
if cert.subject_public_key:
if cert.subject_public_key.public_key_algorithm:
add_comparison_expression(cert.subject_public_key.public_key_algorithm, 'x509-certificate:subject_public_key_algorithm')
if cert.subject_public_key.rsa_public_key:
rsa_key = cert.subject_public_key.rsa_public_key
if rsa_key.modulus:
add_comparison_expression(rsa_key.modulus, 'x509-certificate:subject_public_key_modulus')
if rsa_key.exponent:
add_comparison_expression(rsa_key.exponent, 'x509-certificate:subject_public_key_exponent')
if cert.standard_extensions:
v3_expressions = convert_v3_extension_to_pattern(cert.standard_extensions)
if v3_expressions:
expressions.append(v3_expressions)
if expressions:
expression = create_boolean_expression('AND', expressions)
elif isinstance(prop, SocketAddress):
expressions = list()
if prop.port is not None:
if prop.port.port_value is not None:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_port', prop.port.port_value.condition, stix2.IntegerConstant(int(prop.port.port_value))))
if prop.port.layer4_protocol is not None:
expressions.append(create_term('network-traffic:protocols[*]', prop.port.layer4_protocol.condition, make_constant(prop.port.layer4_protocol.value.lower())))
if prop.ip_address is not None:
if prop.ip_address.address_value:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.ip_address.address_value.condition, make_constant(prop.ip_address.address_value.value)))
elif prop.ip_address.object_reference:
new_pattern = handle_object_reference_for_pattern(prop.ip_address.object_reference)
if isinstance(new_pattern, IdrefPlaceHolder):
expressions.append(new_pattern)
else:
expressions.append(new_pattern.collapse_reference(ObjectPathForElevator.make_object_path('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref')))
if prop.hostname is not None:
if prop.ip_address is not None:
(warn('Only one of the properties: Hostname and IP_Address is allowed. Dropping Hostname %s', 645, prop.hostname.hostname_value),)
elif prop.hostname.is_domain_name and prop.hostname.hostname_value is not None:
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.hostname.hostname_value.condition, make_constant(prop.hostname.hostname_value.value)))
elif prop.hostname.naming_system is not None and any((x.value == 'DNS' for x in prop.hostname.naming_system)):
expressions.append(create_term('network-traffic:' + determine_socket_address_direction(prop, obs_id) + '_ref.value', prop.hostname.hostname_value.condition, make_constant(prop.hostname.hostname_value.value)))
expression = expressions
if expression:
if len(expression) == 1:
expression = expression[0]
elif len(expression) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in expression:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
elif isinstance(prop, Custom):
is_custom_object = True
if check_for_missing_policy('use-custom-properties') or check_for_missing_policy('use-extensions'):
if prop.custom_name:
if check_for_missing_policy('use-custom-properties'):
object_path_root = convert_to_custom_name(prop.custom_name, separator='-')
else:
if re.search('[A-Z]', prop.custom_name):
warn('Custom name %s has been converted to all lower case', 727, prop.custom_name)
object_path_root = prop.custom_name.lower()
expressions = []
for cp in prop.custom_properties.property_:
if not re.match('[a-z0-9_]+', cp.name):
warn('The custom property name %s does not adhere to the specification rules', 617, cp.name)
if ' ' in cp.name:
info('The custom property name %s contains whitespace, replacing it with underscores', 624, cp.name)
custom_name = cp.name.replace(' ', '_')
if False:
custom_name = convert_to_custom_name(cp.name.replace(' ', '_'))
expressions.append(create_term(object_path_root + ':' + custom_name, cp.condition, make_constant(cp.value)))
term = create_boolean_expression('AND', expressions)
if expression:
if len([expression, term]) == 1:
expression = [expression, term][0]
elif len([expression, term]) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in [expression, term]:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
else:
expression = term
else:
warn('Custom object with no name cannot be handled yet', 811)
if not check_for_missing_policy('ignore'):
expression = UnconvertedTerm(obs_id)
else:
warn('Pattern expression with STIX 1.x custom objects in %s is ignored', 817, obs_id)
else:
warn('%s found in %s cannot be converted to a pattern, yet.', 808, str(obj.properties), obs_id)
if not check_for_missing_policy('ignore'):
expression = UnconvertedTerm(obs_id)
if prop.custom_properties is not None and (not is_custom_object):
if check_for_missing_policy('use-custom-properties') or check_for_missing_policy('use-extensions'):
class_name = prop.__class__.__name__
if class_name in _CLASS_NAME_MAPPING:
object_type_name = _CLASS_NAME_MAPPING[class_name]
elif class_name == 'Address' and prop.category in _ADDRESS_NAME_MAPPING:
object_type_name = _ADDRESS_NAME_MAPPING[class_name]
else:
error('Cannot convert CybOX 2.x class name %s to an object_path_root_name', 813, class_name)
object_type_name = None
if check_for_missing_policy('use-custom-properties'):
object_path_root = object_type_name
else:
extension_definition_id = get_extension_definition_id(object_type_name)
if extension_definition_id:
object_path_root = object_type_name + 'extensions.' + extension_definition_id
warn('Used %s for extension property for %s', 317, object_path_root, object_type_name)
else:
object_path_root = None
warn('No extension-definition was found for STIX 1 type %s', 312, object_type_name)
if object_path_root:
expressions = []
for cp in prop.custom_properties.property_:
if not re.match('[a-z0-9_]+', cp.name):
warn('The custom property name %s does not adhere to the specification rules', 617, cp.name)
if ' ' in cp.name:
info('The custom property name %s contains whitespace, replacing it with underscores', 624, cp.name)
custom_name = cp.name.replace(' ', '_')
if use_custom_prefix:
custom_name = convert_to_custom_name(cp.name.replace(' ', '_'))
expressions.append(create_term(object_path_root + ':' + custom_name, cp.condition, make_constant(cp.value)))
term = create_boolean_expression('AND', expressions)
if expression:
if len([expression, term]) == 1:
expression = [expression, term][0]
elif len([expression, term]) == 0:
expression = None
exp = BooleanExpressionForElevator('AND', [])
exp.root_types = set()
for arg in [expression, term]:
if not isinstance(arg, IdrefPlaceHolder):
if exp.operator == 'AND':
if not exp.root_types:
exp.root_types = arg.root_types.copy()
else:
exp.root_types &= arg.root_types
else:
exp.root_types |= arg.root_types
exp.add_operand(arg)
if use_parens:
pexp = ParentheticalExpressionForElevator(exp)
if hasattr(exp, 'root_types'):
pexp.root_types = exp.root_types.copy()
expression = pexp
else:
expression = exp
else:
expression = term
else:
warn('Pattern expression with STIX 1.x custom properties in %s is ignored', 818, obs_id)
if not expression:
warn('No pattern term was created from %s', 422, obs_id)
expression = UnconvertedTerm(obs_id, determine_term_type(prop))
elif obj.id_:
add_id_value(obj.id_, obs_id)
global _PATTERN_CACHE
if expression:
_PATTERN_CACHE[obj.id_] = expression
return expression
|
cti-stix-elevator
|
positive
|
@app.route('/', methods=['GET', 'POST'])
@app.route('/index/', methods=['GET', 'POST'])
def index():
form = c_NetSelectForm()
output = None
form.netname.choices = []
for org in ORG_LIST:
for net in org.nets:
form.netname.choices.append(('%s|%s|%s|%s|%s' % (org.id, org.shard, net.id, net.mxsn1, net.mxsn2), '%s [%s]' % (net.name, org.name)))
if request.method == 'POST':
output = c_Output()
netparams = form.netname.data.split('|')
<DeepExtract>
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_SHORT_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_SHORT_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.short = retvalue
</DeepExtract>
<DeepExtract>
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_MEDIUM_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_MEDIUM_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.mid = retvalue
</DeepExtract>
<DeepExtract>
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_LONG_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_LONG_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.long = retvalue
</DeepExtract>
output.timestamp = str(datetime.datetime.now())
return render_template('index.html', form=form, tshort=TIMERANGE_SHORT_MINUTES, tmid=TIMERANGE_MEDIUM_MINUTES, tlong=TIMERANGE_LONG_MINUTES, output=output)
|
@app.route('/', methods=['GET', 'POST'])
@app.route('/index/', methods=['GET', 'POST'])
def index():
form = c_NetSelectForm()
output = None
form.netname.choices = []
for org in ORG_LIST:
for net in org.nets:
form.netname.choices.append(('%s|%s|%s|%s|%s' % (org.id, org.shard, net.id, net.mxsn1, net.mxsn2), '%s [%s]' % (net.name, org.name)))
if request.method == 'POST':
output = c_Output()
netparams = form.netname.data.split('|')
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_SHORT_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_SHORT_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.short = retvalue
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_MEDIUM_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_MEDIUM_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.mid = retvalue
orgid = netparams[0]
orgshard = netparams[1]
netid = netparams[2]
mxserial1 = netparams[3]
mxserial2 = netparams[4]
print('INFO: Running report for net "%s": MX1 "%s", MX2 "%s"' % (netid, mxserial1, mxserial2))
clientlists = []
clist = getclientlist(orgshard, mxserial1, TIMERANGE_LONG_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
if not mxserial2 is None:
clist = getclientlist(orgshard, mxserial2, TIMERANGE_LONG_MINUTES * 60)
if not clist is None:
clientlists.append(clist)
db = sqlite3.connect(':memory:')
dbcursor = db.cursor()
dbcursor.execute('CREATE TABLE clients\n (UsageSent real, UsageRecv real, UsageTotal real, id text, description text, dhcpHostName text, \n mac text, ip text, vlan text)')
db.commit()
for cl in clientlists:
for client in cl:
dbcursor.execute('INSERT INTO clients VALUES (?,?,?,?,?,?,?,?,?)', (client['usage']['sent'], client['usage']['recv'], client['usage']['sent'] + client['usage']['recv'], client['id'], client['description'], client['dhcpHostname'], client['mac'], client['ip'], client['vlan']))
db.commit()
dbcursor = db.cursor()
dbcursor.execute('SELECT UsageTotal, \n UsageSent, \n UsageRecv, \n description, \n dhcpHostName, \n mac, \n ip, \n vlan \n FROM clients ORDER BY UsageTotal DESC LIMIT 10')
retvalue = dbcursor.fetchall()
db.close()
output.long = retvalue
output.timestamp = str(datetime.datetime.now())
return render_template('index.html', form=form, tshort=TIMERANGE_SHORT_MINUTES, tmid=TIMERANGE_MEDIUM_MINUTES, tlong=TIMERANGE_LONG_MINUTES, output=output)
|
automation-scripts
|
positive
|
@log_call
def data(self, datasets_by_query, skip_corrections=False):
data = None
for (pbq, datasets) in datasets_by_query.items():
if data is not None and len(data.time) == 0:
continue
measurements = pbq.products[0].lookup_measurements(pbq.bands)
fuse_func = pbq.fuse_func
if pbq.manual_merge:
<DeepExtract>
if self.style:
flag_bands = set(filter(lambda b: b in self.style.flag_bands, pbq.bands))
non_flag_bands = set(filter(lambda b: b not in self.style.flag_bands, pbq.bands))
else:
non_flag_bands = pbq.bands
flag_bands = set()
time_slices = []
for dt in datasets.time.values:
tds = datasets.sel(time=dt)
merged = None
for ds in tds.values.item():
d = self.read_data_for_single_dataset(ds, measurements, self._geobox, fuse_func=fuse_func)
extent_mask = None
for band in non_flag_bands:
for f in self._product.extent_mask_func:
if extent_mask is None:
extent_mask = f(d, band)
else:
extent_mask &= f(d, band)
if extent_mask is not None:
d = d.where(extent_mask)
if self._product.solar_correction and (not skip_corrections):
for band in non_flag_bands:
d[band] = solar_correct_data(d[band], ds)
if merged is None:
merged = d
else:
merged = merged.combine_first(d)
if merged is None:
continue
for band in flag_bands:
merged[band] = merged[band].astype('uint16', copy=True)
merged[band].attrs = d[band].attrs
time_slices.append(merged)
if not time_slices:
qry_result = None
result = xarray.concat(time_slices, datasets.time)
qry_result = result
</DeepExtract>
else:
<DeepExtract>
CredentialManager.check_cred()
try:
qry_result = datacube.Datacube.load_data(datasets, self._geobox, measurements=measurements, fuse_func=fuse_func, patch_url=self._product.patch_url)
except Exception as e:
_LOG.error('Error (%s) in load_data: %s', e.__class__.__name__, str(e))
raise
</DeepExtract>
if data is None:
data = qry_result
continue
if len(data.time) == 0:
continue
if pbq.ignore_time:
if len(qry_result.time) > 1:
raise WMSException('Cannot ignore time on PQ (flag) bands from a time-aware product')
elif len(qry_result.time) == len(data.time):
qry_result['time'] = data.time
else:
if len(qry_result.time) == 0:
<DeepExtract>
var = None
for var in data.data_vars.variables.keys():
break
if var is None:
raise WMSException('Cannot add default flag data as there is no non-flag data available')
template = getattr(data, var)
data_new_bands = {}
for band in pbq.bands:
default_value = pbq.products[0].measurements[band].nodata
new_data = numpy.ndarray(template.shape, dtype='uint8')
new_data.fill(default_value)
qry_result = template.copy(data=new_data)
data_new_bands[band] = qry_result
data = data.assign(data_new_bands)
for band in pbq.bands:
data[band].attrs['flags_definition'] = pbq.products[0].measurements[band].flags_definition
data = data
</DeepExtract>
continue
else:
data_new_bands = {}
for band in pbq.bands:
band_data = qry_result[band]
timeless_band_data = band_data.sel(time=qry_result.time.values[0])
band_time_slices = []
for dt in data.time.values:
band_time_slices.append(timeless_band_data)
timed_band_data = xarray.concat(band_time_slices, data.time)
data_new_bands[band] = timed_band_data
data = data.assign(data_new_bands)
continue
elif len(qry_result.time) == 0:
<DeepExtract>
var = None
for var in data.data_vars.variables.keys():
break
if var is None:
raise WMSException('Cannot add default flag data as there is no non-flag data available')
template = getattr(data, var)
data_new_bands = {}
for band in pbq.bands:
default_value = pbq.products[0].measurements[band].nodata
new_data = numpy.ndarray(template.shape, dtype='uint8')
new_data.fill(default_value)
qry_result = template.copy(data=new_data)
data_new_bands[band] = qry_result
data = data.assign(data_new_bands)
for band in pbq.bands:
data[band].attrs['flags_definition'] = pbq.products[0].measurements[band].flags_definition
data = data
</DeepExtract>
continue
qry_result.coords['time'] = data.coords['time']
data = xarray.combine_by_coords([data, qry_result], join='exact')
return data
|
@log_call
def data(self, datasets_by_query, skip_corrections=False):
data = None
for (pbq, datasets) in datasets_by_query.items():
if data is not None and len(data.time) == 0:
continue
measurements = pbq.products[0].lookup_measurements(pbq.bands)
fuse_func = pbq.fuse_func
if pbq.manual_merge:
if self.style:
flag_bands = set(filter(lambda b: b in self.style.flag_bands, pbq.bands))
non_flag_bands = set(filter(lambda b: b not in self.style.flag_bands, pbq.bands))
else:
non_flag_bands = pbq.bands
flag_bands = set()
time_slices = []
for dt in datasets.time.values:
tds = datasets.sel(time=dt)
merged = None
for ds in tds.values.item():
d = self.read_data_for_single_dataset(ds, measurements, self._geobox, fuse_func=fuse_func)
extent_mask = None
for band in non_flag_bands:
for f in self._product.extent_mask_func:
if extent_mask is None:
extent_mask = f(d, band)
else:
extent_mask &= f(d, band)
if extent_mask is not None:
d = d.where(extent_mask)
if self._product.solar_correction and (not skip_corrections):
for band in non_flag_bands:
d[band] = solar_correct_data(d[band], ds)
if merged is None:
merged = d
else:
merged = merged.combine_first(d)
if merged is None:
continue
for band in flag_bands:
merged[band] = merged[band].astype('uint16', copy=True)
merged[band].attrs = d[band].attrs
time_slices.append(merged)
if not time_slices:
qry_result = None
result = xarray.concat(time_slices, datasets.time)
qry_result = result
else:
CredentialManager.check_cred()
try:
qry_result = datacube.Datacube.load_data(datasets, self._geobox, measurements=measurements, fuse_func=fuse_func, patch_url=self._product.patch_url)
except Exception as e:
_LOG.error('Error (%s) in load_data: %s', e.__class__.__name__, str(e))
raise
if data is None:
data = qry_result
continue
if len(data.time) == 0:
continue
if pbq.ignore_time:
if len(qry_result.time) > 1:
raise WMSException('Cannot ignore time on PQ (flag) bands from a time-aware product')
elif len(qry_result.time) == len(data.time):
qry_result['time'] = data.time
else:
if len(qry_result.time) == 0:
var = None
for var in data.data_vars.variables.keys():
break
if var is None:
raise WMSException('Cannot add default flag data as there is no non-flag data available')
template = getattr(data, var)
data_new_bands = {}
for band in pbq.bands:
default_value = pbq.products[0].measurements[band].nodata
new_data = numpy.ndarray(template.shape, dtype='uint8')
new_data.fill(default_value)
qry_result = template.copy(data=new_data)
data_new_bands[band] = qry_result
data = data.assign(data_new_bands)
for band in pbq.bands:
data[band].attrs['flags_definition'] = pbq.products[0].measurements[band].flags_definition
data = data
continue
else:
data_new_bands = {}
for band in pbq.bands:
band_data = qry_result[band]
timeless_band_data = band_data.sel(time=qry_result.time.values[0])
band_time_slices = []
for dt in data.time.values:
band_time_slices.append(timeless_band_data)
timed_band_data = xarray.concat(band_time_slices, data.time)
data_new_bands[band] = timed_band_data
data = data.assign(data_new_bands)
continue
elif len(qry_result.time) == 0:
var = None
for var in data.data_vars.variables.keys():
break
if var is None:
raise WMSException('Cannot add default flag data as there is no non-flag data available')
template = getattr(data, var)
data_new_bands = {}
for band in pbq.bands:
default_value = pbq.products[0].measurements[band].nodata
new_data = numpy.ndarray(template.shape, dtype='uint8')
new_data.fill(default_value)
qry_result = template.copy(data=new_data)
data_new_bands[band] = qry_result
data = data.assign(data_new_bands)
for band in pbq.bands:
data[band].attrs['flags_definition'] = pbq.products[0].measurements[band].flags_definition
data = data
continue
qry_result.coords['time'] = data.coords['time']
data = xarray.combine_by_coords([data, qry_result], join='exact')
return data
|
datacube-ows
|
positive
|
def cumulative_sum(t):
"""Mutates t so that each node's label becomes the sum of all labels in
the corresponding subtree rooted at t.
>>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)])
>>> cumulative_sum(t)
>>> t
Tree(16, [Tree(8, [Tree(5)]), Tree(7)])
"""
if t.is_leaf():
return t
for b in t.branches:
<DeepExtract>
if b.is_leaf():
return b
for b in b.branches:
cumulative_sum(b)
b.label += b.label
</DeepExtract>
t.label += b.label
|
def cumulative_sum(t):
"""Mutates t so that each node's label becomes the sum of all labels in
the corresponding subtree rooted at t.
>>> t = Tree(1, [Tree(3, [Tree(5)]), Tree(7)])
>>> cumulative_sum(t)
>>> t
Tree(16, [Tree(8, [Tree(5)]), Tree(7)])
"""
if t.is_leaf():
return t
for b in t.branches:
if b.is_leaf():
return b
for b in b.branches:
cumulative_sum(b)
b.label += b.label
t.label += b.label
|
cs61a
|
positive
|
def render_throughput_breakdown(metadata, output_dir):
throughput_metadata = {}
def get_throughput_metadata(model, batch_size, dtr_dict, baseline_dict, output_dir):
if model not in throughput_metadata:
throughput_metadata[model] = {'dtr': {}, 'baseline': {}}
throughput_metadata[model]['dtr'][batch_size] = []
for datum in dtr_dict[batch_size]['param_sweep']:
throughput_metadata[model]['dtr'][batch_size].append({'memory_budget': datum.get('memory_budget', -1), 'error': datum['error'], **{key: datum.get(key) for key in used_keys}})
if batch_size in baseline_dict:
throughput_metadata[model]['baseline'][batch_size] = {key: baseline_dict[batch_size][key] for key in used_keys}
else:
throughput_metadata[model]['baseline'][batch_size] = {key: 0 for key in used_keys}
return (True, 'success')
<DeepExtract>
for model in metadata.keys():
dtr_dict = metadata[model]['dtr']
baseline_dict = metadata[model]['baseline']
for batch_size in sorted(dtr_dict.keys()):
for exp_kind in dtr_dict[batch_size]:
if exp_kind == 'param_sweep':
(success, msg) = get_throughput_metadata(model, batch_size, dtr_dict, baseline_dict, output_dir)
if not success:
return (False, msg)
return (True, 'success')
</DeepExtract>
flip = lambda f: lambda x: lambda y: f(y, x)
def plot_model(model):
filename = prepare_out_file(output_dir, f'throughput-comparison-{model}.png')
plt.clf()
plt.grid(True)
plt.title(f'Throughput Comparison of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size', fontsize=15, labelpad=10)
plt.ylabel('Throughput (Batch Size / Avg GPU Time (s))')
num_batch_size = len(throughput_metadata[model]['dtr'].keys())
baseline_data = metadata[model]['baseline']
width = 0.15
ind = np.arange(num_batch_size)
x_axis = list(sorted(throughput_metadata[model]['dtr'].keys()))
baseline_data = list(map(flip(throughput_metadata[model]['baseline'].get)(0), x_axis))
plt.bar(ind, [datum['throughput'] for datum in baseline_data], width, label='Baseline')
dtr_data = {'throughput': {}, 'breakdown': {}}
for x in x_axis:
for datum in throughput_metadata[model]['dtr'][x]:
if datum['memory_budget'] not in dtr_data['throughput']:
dtr_data['throughput'][datum['memory_budget']] = []
dtr_data['breakdown'][datum['memory_budget']] = []
dtr_data['throughput'][datum['memory_budget']].append(datum['throughput'] if not datum['error'] else 0)
dtr_data['breakdown'][datum['memory_budget']].append(dict(filter(lambda x: x[0] != 'throughput', datum.items())) if not datum['error'] else None)
num_budget = len(dtr_data['throughput'].keys())
plt.xticks(ind + width * (num_budget / 2), map(str, x_axis))
for (i, (budget, throughput)) in enumerate(sorted(dtr_data['throughput'].items(), key=lambda x: -x[0])):
plt.bar(ind + width * (i + 1), throughput, width, label=f'{round(budget * 1e-09, 1)} GiB')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
filename = prepare_out_file(output_dir, f'time-breakdown-{model}.png')
plt.clf()
plt.title(f'Runtime Breakdown of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size')
plt.ylabel('Time / Batch (ms)')
x_ticks_loc = {ind[i] + width * (num_budget / 2): '\n\n' + str(x_axis[i]) for i in range(num_batch_size)}
plt.grid(True, axis='y')
for (i, (budget, datum)) in enumerate(sorted(dtr_data['breakdown'].items(), key=lambda x: -x[0])):
locs = ind + width * (i + 1)
for loc in locs:
x_tick = f'{round(budget * 1e-09, 1)}\nGiB'
if loc in x_ticks_loc.keys():
x_tick += f'\n{x_ticks_loc[loc]}'
x_ticks_loc[loc] = x_tick
if datum is None:
continue
gathered_data = {key: [] for key in timed_keys + ['cpu_time']}
gathered_data['dispatch_overhead'] = []
for e in datum:
time_acc = 0
for key in gathered_data.keys():
if key != 'dispatch_overhead':
if e is None:
gathered_data[key].append(0)
else:
gathered_data[key].append(e[key])
if key != 'cpu_time' and e is not None:
time_acc += e[key]
if e is not None:
gathered_data['dispatch_overhead'].append(gathered_data['cpu_time'][-1] - time_acc)
else:
gathered_data['dispatch_overhead'].append(0)
height_acc = np.zeros(len(datum))
for key in timed_keys:
if i == 0:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, label=breakdown_namedict[key], color=breakdown_color_scheme[key], bottom=height_acc)
else:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, color=breakdown_color_scheme[key], bottom=height_acc)
height_acc += gathered_data[key]
xticks_data = list(sorted(x_ticks_loc.items(), key=lambda x: -x[0]))
ticks = list(map(lambda x: x[0], xticks_data))
labels = list(map(lambda x: x[1], xticks_data))
plt.xticks(ticks, labels)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
try:
for model in throughput_metadata.keys():
<DeepExtract>
filename = prepare_out_file(output_dir, f'throughput-comparison-{model}.png')
plt.clf()
plt.grid(True)
plt.title(f'Throughput Comparison of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size', fontsize=15, labelpad=10)
plt.ylabel('Throughput (Batch Size / Avg GPU Time (s))')
num_batch_size = len(throughput_metadata[model]['dtr'].keys())
baseline_data = metadata[model]['baseline']
width = 0.15
ind = np.arange(num_batch_size)
x_axis = list(sorted(throughput_metadata[model]['dtr'].keys()))
baseline_data = list(map(flip(throughput_metadata[model]['baseline'].get)(0), x_axis))
plt.bar(ind, [datum['throughput'] for datum in baseline_data], width, label='Baseline')
dtr_data = {'throughput': {}, 'breakdown': {}}
for x in x_axis:
for datum in throughput_metadata[model]['dtr'][x]:
if datum['memory_budget'] not in dtr_data['throughput']:
dtr_data['throughput'][datum['memory_budget']] = []
dtr_data['breakdown'][datum['memory_budget']] = []
dtr_data['throughput'][datum['memory_budget']].append(datum['throughput'] if not datum['error'] else 0)
dtr_data['breakdown'][datum['memory_budget']].append(dict(filter(lambda x: x[0] != 'throughput', datum.items())) if not datum['error'] else None)
num_budget = len(dtr_data['throughput'].keys())
plt.xticks(ind + width * (num_budget / 2), map(str, x_axis))
for (i, (budget, throughput)) in enumerate(sorted(dtr_data['throughput'].items(), key=lambda x: -x[0])):
plt.bar(ind + width * (i + 1), throughput, width, label=f'{round(budget * 1e-09, 1)} GiB')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
filename = prepare_out_file(output_dir, f'time-breakdown-{model}.png')
plt.clf()
plt.title(f'Runtime Breakdown of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size')
plt.ylabel('Time / Batch (ms)')
x_ticks_loc = {ind[i] + width * (num_budget / 2): '\n\n' + str(x_axis[i]) for i in range(num_batch_size)}
plt.grid(True, axis='y')
for (i, (budget, datum)) in enumerate(sorted(dtr_data['breakdown'].items(), key=lambda x: -x[0])):
locs = ind + width * (i + 1)
for loc in locs:
x_tick = f'{round(budget * 1e-09, 1)}\nGiB'
if loc in x_ticks_loc.keys():
x_tick += f'\n{x_ticks_loc[loc]}'
x_ticks_loc[loc] = x_tick
if datum is None:
continue
gathered_data = {key: [] for key in timed_keys + ['cpu_time']}
gathered_data['dispatch_overhead'] = []
for e in datum:
time_acc = 0
for key in gathered_data.keys():
if key != 'dispatch_overhead':
if e is None:
gathered_data[key].append(0)
else:
gathered_data[key].append(e[key])
if key != 'cpu_time' and e is not None:
time_acc += e[key]
if e is not None:
gathered_data['dispatch_overhead'].append(gathered_data['cpu_time'][-1] - time_acc)
else:
gathered_data['dispatch_overhead'].append(0)
height_acc = np.zeros(len(datum))
for key in timed_keys:
if i == 0:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, label=breakdown_namedict[key], color=breakdown_color_scheme[key], bottom=height_acc)
else:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, color=breakdown_color_scheme[key], bottom=height_acc)
height_acc += gathered_data[key]
xticks_data = list(sorted(x_ticks_loc.items(), key=lambda x: -x[0]))
ticks = list(map(lambda x: x[0], xticks_data))
labels = list(map(lambda x: x[1], xticks_data))
plt.xticks(ticks, labels)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
</DeepExtract>
except Exception as e:
return (False, render_exception(e))
return (True, 'success')
|
def render_throughput_breakdown(metadata, output_dir):
throughput_metadata = {}
def get_throughput_metadata(model, batch_size, dtr_dict, baseline_dict, output_dir):
if model not in throughput_metadata:
throughput_metadata[model] = {'dtr': {}, 'baseline': {}}
throughput_metadata[model]['dtr'][batch_size] = []
for datum in dtr_dict[batch_size]['param_sweep']:
throughput_metadata[model]['dtr'][batch_size].append({'memory_budget': datum.get('memory_budget', -1), 'error': datum['error'], **{key: datum.get(key) for key in used_keys}})
if batch_size in baseline_dict:
throughput_metadata[model]['baseline'][batch_size] = {key: baseline_dict[batch_size][key] for key in used_keys}
else:
throughput_metadata[model]['baseline'][batch_size] = {key: 0 for key in used_keys}
return (True, 'success')
for model in metadata.keys():
dtr_dict = metadata[model]['dtr']
baseline_dict = metadata[model]['baseline']
for batch_size in sorted(dtr_dict.keys()):
for exp_kind in dtr_dict[batch_size]:
if exp_kind == 'param_sweep':
(success, msg) = get_throughput_metadata(model, batch_size, dtr_dict, baseline_dict, output_dir)
if not success:
return (False, msg)
return (True, 'success')
flip = lambda f: lambda x: lambda y: f(y, x)
def plot_model(model):
filename = prepare_out_file(output_dir, f'throughput-comparison-{model}.png')
plt.clf()
plt.grid(True)
plt.title(f'Throughput Comparison of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size', fontsize=15, labelpad=10)
plt.ylabel('Throughput (Batch Size / Avg GPU Time (s))')
num_batch_size = len(throughput_metadata[model]['dtr'].keys())
baseline_data = metadata[model]['baseline']
width = 0.15
ind = np.arange(num_batch_size)
x_axis = list(sorted(throughput_metadata[model]['dtr'].keys()))
baseline_data = list(map(flip(throughput_metadata[model]['baseline'].get)(0), x_axis))
plt.bar(ind, [datum['throughput'] for datum in baseline_data], width, label='Baseline')
dtr_data = {'throughput': {}, 'breakdown': {}}
for x in x_axis:
for datum in throughput_metadata[model]['dtr'][x]:
if datum['memory_budget'] not in dtr_data['throughput']:
dtr_data['throughput'][datum['memory_budget']] = []
dtr_data['breakdown'][datum['memory_budget']] = []
dtr_data['throughput'][datum['memory_budget']].append(datum['throughput'] if not datum['error'] else 0)
dtr_data['breakdown'][datum['memory_budget']].append(dict(filter(lambda x: x[0] != 'throughput', datum.items())) if not datum['error'] else None)
num_budget = len(dtr_data['throughput'].keys())
plt.xticks(ind + width * (num_budget / 2), map(str, x_axis))
for (i, (budget, throughput)) in enumerate(sorted(dtr_data['throughput'].items(), key=lambda x: -x[0])):
plt.bar(ind + width * (i + 1), throughput, width, label=f'{round(budget * 1e-09, 1)} GiB')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
filename = prepare_out_file(output_dir, f'time-breakdown-{model}.png')
plt.clf()
plt.title(f'Runtime Breakdown of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size')
plt.ylabel('Time / Batch (ms)')
x_ticks_loc = {ind[i] + width * (num_budget / 2): '\n\n' + str(x_axis[i]) for i in range(num_batch_size)}
plt.grid(True, axis='y')
for (i, (budget, datum)) in enumerate(sorted(dtr_data['breakdown'].items(), key=lambda x: -x[0])):
locs = ind + width * (i + 1)
for loc in locs:
x_tick = f'{round(budget * 1e-09, 1)}\nGiB'
if loc in x_ticks_loc.keys():
x_tick += f'\n{x_ticks_loc[loc]}'
x_ticks_loc[loc] = x_tick
if datum is None:
continue
gathered_data = {key: [] for key in timed_keys + ['cpu_time']}
gathered_data['dispatch_overhead'] = []
for e in datum:
time_acc = 0
for key in gathered_data.keys():
if key != 'dispatch_overhead':
if e is None:
gathered_data[key].append(0)
else:
gathered_data[key].append(e[key])
if key != 'cpu_time' and e is not None:
time_acc += e[key]
if e is not None:
gathered_data['dispatch_overhead'].append(gathered_data['cpu_time'][-1] - time_acc)
else:
gathered_data['dispatch_overhead'].append(0)
height_acc = np.zeros(len(datum))
for key in timed_keys:
if i == 0:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, label=breakdown_namedict[key], color=breakdown_color_scheme[key], bottom=height_acc)
else:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, color=breakdown_color_scheme[key], bottom=height_acc)
height_acc += gathered_data[key]
xticks_data = list(sorted(x_ticks_loc.items(), key=lambda x: -x[0]))
ticks = list(map(lambda x: x[0], xticks_data))
labels = list(map(lambda x: x[1], xticks_data))
plt.xticks(ticks, labels)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
try:
for model in throughput_metadata.keys():
filename = prepare_out_file(output_dir, f'throughput-comparison-{model}.png')
plt.clf()
plt.grid(True)
plt.title(f'Throughput Comparison of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size', fontsize=15, labelpad=10)
plt.ylabel('Throughput (Batch Size / Avg GPU Time (s))')
num_batch_size = len(throughput_metadata[model]['dtr'].keys())
baseline_data = metadata[model]['baseline']
width = 0.15
ind = np.arange(num_batch_size)
x_axis = list(sorted(throughput_metadata[model]['dtr'].keys()))
baseline_data = list(map(flip(throughput_metadata[model]['baseline'].get)(0), x_axis))
plt.bar(ind, [datum['throughput'] for datum in baseline_data], width, label='Baseline')
dtr_data = {'throughput': {}, 'breakdown': {}}
for x in x_axis:
for datum in throughput_metadata[model]['dtr'][x]:
if datum['memory_budget'] not in dtr_data['throughput']:
dtr_data['throughput'][datum['memory_budget']] = []
dtr_data['breakdown'][datum['memory_budget']] = []
dtr_data['throughput'][datum['memory_budget']].append(datum['throughput'] if not datum['error'] else 0)
dtr_data['breakdown'][datum['memory_budget']].append(dict(filter(lambda x: x[0] != 'throughput', datum.items())) if not datum['error'] else None)
num_budget = len(dtr_data['throughput'].keys())
plt.xticks(ind + width * (num_budget / 2), map(str, x_axis))
for (i, (budget, throughput)) in enumerate(sorted(dtr_data['throughput'].items(), key=lambda x: -x[0])):
plt.bar(ind + width * (i + 1), throughput, width, label=f'{round(budget * 1e-09, 1)} GiB')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
filename = prepare_out_file(output_dir, f'time-breakdown-{model}.png')
plt.clf()
plt.title(f'Runtime Breakdown of {NAME_DICT.get(model, model)}')
plt.xlabel('Batch Size')
plt.ylabel('Time / Batch (ms)')
x_ticks_loc = {ind[i] + width * (num_budget / 2): '\n\n' + str(x_axis[i]) for i in range(num_batch_size)}
plt.grid(True, axis='y')
for (i, (budget, datum)) in enumerate(sorted(dtr_data['breakdown'].items(), key=lambda x: -x[0])):
locs = ind + width * (i + 1)
for loc in locs:
x_tick = f'{round(budget * 1e-09, 1)}\nGiB'
if loc in x_ticks_loc.keys():
x_tick += f'\n{x_ticks_loc[loc]}'
x_ticks_loc[loc] = x_tick
if datum is None:
continue
gathered_data = {key: [] for key in timed_keys + ['cpu_time']}
gathered_data['dispatch_overhead'] = []
for e in datum:
time_acc = 0
for key in gathered_data.keys():
if key != 'dispatch_overhead':
if e is None:
gathered_data[key].append(0)
else:
gathered_data[key].append(e[key])
if key != 'cpu_time' and e is not None:
time_acc += e[key]
if e is not None:
gathered_data['dispatch_overhead'].append(gathered_data['cpu_time'][-1] - time_acc)
else:
gathered_data['dispatch_overhead'].append(0)
height_acc = np.zeros(len(datum))
for key in timed_keys:
if i == 0:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, label=breakdown_namedict[key], color=breakdown_color_scheme[key], bottom=height_acc)
else:
plt.bar(ind + width * (i + 1), gathered_data[key], width=width, color=breakdown_color_scheme[key], bottom=height_acc)
height_acc += gathered_data[key]
xticks_data = list(sorted(x_ticks_loc.items(), key=lambda x: -x[0]))
ticks = list(map(lambda x: x[0], xticks_data))
labels = list(map(lambda x: x[1], xticks_data))
plt.xticks(ticks, labels)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(filename, bbox_inches='tight')
except Exception as e:
return (False, render_exception(e))
return (True, 'success')
|
dtr-prototype
|
positive
|
def is_valid_uploaded_hashfile(self, uploaded_hashfile):
<DeepExtract>
files = self.filesystem.get_files(self.uploaded_hashes_path)
</DeepExtract>
return uploaded_hashfile in files
|
def is_valid_uploaded_hashfile(self, uploaded_hashfile):
files = self.filesystem.get_files(self.uploaded_hashes_path)
return uploaded_hashfile in files
|
crackerjack
|
positive
|
def get_quarter(self) -> int:
quarter = self.quarter
if quarter is None:
try:
quarter = self.kwargs[self.quarter_url_kwarg]
except KeyError:
try:
quarter = self.request.GET[self.quarter_url_kwarg]
except KeyError:
raise Http404(_('No quarter specified'))
<DeepExtract>
try:
if not isinstance(quarter, int):
quarter = int(quarter)
try:
self.validate_quarter(quarter)
except ValidationError:
raise Http404(_('Invalid quarter number'))
except ValueError:
raise Http404(_(f'Invalid quarter format. Cannot parse {quarter} into integer.'))
quarter = quarter
</DeepExtract>
return quarter
|
def get_quarter(self) -> int:
quarter = self.quarter
if quarter is None:
try:
quarter = self.kwargs[self.quarter_url_kwarg]
except KeyError:
try:
quarter = self.request.GET[self.quarter_url_kwarg]
except KeyError:
raise Http404(_('No quarter specified'))
try:
if not isinstance(quarter, int):
quarter = int(quarter)
try:
self.validate_quarter(quarter)
except ValidationError:
raise Http404(_('Invalid quarter number'))
except ValueError:
raise Http404(_(f'Invalid quarter format. Cannot parse {quarter} into integer.'))
quarter = quarter
return quarter
|
django-ledger
|
positive
|
def main():
<DeepExtract>
peak_spatial_frequency_bins = np.array([0.35, 0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8.0, 11.2, 16.0, 22.4])
peak_spatial_frequency_simple_foveal_hist = np.array([0, 4, 4, 8, 25, 33, 26, 28, 12, 5, 2, 1])
peak_spatial_frequency_complex_foveal_hist = np.array([0, 0, 0, 9, 9, 7, 10, 23, 12, 8, 3, 3])
peak_spatial_frequency_simple_parafoveal_hist = np.array([2, 4, 10, 12, 18, 7, 18, 3, 4, 0, 0, 0])
peak_spatial_frequency_complex_parafoveal_hist = np.array([1, 2, 1, 2, 5, 15, 13, 9, 3, 2, 0, 0])
peak_spatial_frequency_hist = peak_spatial_frequency_simple_foveal_hist + peak_spatial_frequency_complex_foveal_hist + peak_spatial_frequency_simple_parafoveal_hist + peak_spatial_frequency_complex_parafoveal_hist
peak_spatial_frequency = gen_sample(peak_spatial_frequency_hist, peak_spatial_frequency_bins, scale='log2')
assembly = DataAssembly(peak_spatial_frequency, coords={'neuroid_id': ('neuroid', range(peak_spatial_frequency.shape[0])), 'region': ('neuroid', ['V1'] * peak_spatial_frequency.shape[0]), 'neuronal_property': ['peak_spatial_frequency']}, dims=['neuroid', 'neuronal_property'])
assembly.attrs['number_of_trials'] = 20
for p in assembly.coords['neuronal_property'].values:
assembly.attrs[p + '_bins'] = eval(p + '_bins')
assembly = assembly
</DeepExtract>
assembly.name = ASSEMBLY_NAME
print('Packaging assembly')
package_data_assembly(xarray.DataArray(assembly), assembly_identifier=assembly.name, stimulus_set_identifier=SPATIAL_FREQUENCY_STIM_NAME, assembly_class='PropertyAssembly', bucket_name='brainio.contrib')
|
def main():
peak_spatial_frequency_bins = np.array([0.35, 0.5, 0.7, 1.0, 1.4, 2.0, 2.8, 4.0, 5.6, 8.0, 11.2, 16.0, 22.4])
peak_spatial_frequency_simple_foveal_hist = np.array([0, 4, 4, 8, 25, 33, 26, 28, 12, 5, 2, 1])
peak_spatial_frequency_complex_foveal_hist = np.array([0, 0, 0, 9, 9, 7, 10, 23, 12, 8, 3, 3])
peak_spatial_frequency_simple_parafoveal_hist = np.array([2, 4, 10, 12, 18, 7, 18, 3, 4, 0, 0, 0])
peak_spatial_frequency_complex_parafoveal_hist = np.array([1, 2, 1, 2, 5, 15, 13, 9, 3, 2, 0, 0])
peak_spatial_frequency_hist = peak_spatial_frequency_simple_foveal_hist + peak_spatial_frequency_complex_foveal_hist + peak_spatial_frequency_simple_parafoveal_hist + peak_spatial_frequency_complex_parafoveal_hist
peak_spatial_frequency = gen_sample(peak_spatial_frequency_hist, peak_spatial_frequency_bins, scale='log2')
assembly = DataAssembly(peak_spatial_frequency, coords={'neuroid_id': ('neuroid', range(peak_spatial_frequency.shape[0])), 'region': ('neuroid', ['V1'] * peak_spatial_frequency.shape[0]), 'neuronal_property': ['peak_spatial_frequency']}, dims=['neuroid', 'neuronal_property'])
assembly.attrs['number_of_trials'] = 20
for p in assembly.coords['neuronal_property'].values:
assembly.attrs[p + '_bins'] = eval(p + '_bins')
assembly = assembly
assembly.name = ASSEMBLY_NAME
print('Packaging assembly')
package_data_assembly(xarray.DataArray(assembly), assembly_identifier=assembly.name, stimulus_set_identifier=SPATIAL_FREQUENCY_STIM_NAME, assembly_class='PropertyAssembly', bucket_name='brainio.contrib')
|
brain-score
|
positive
|
def insert(self, table_name: str, records: tp.Union[tp.List[dict], Path]):
<DeepExtract>
table_name_without_partition = table_name.split('$')[0]
table_id = table_name.replace(table_name_without_partition, self.create_full_table_id(table_name_without_partition))
</DeepExtract>
return self.dataset_manager.insert(table_id, records)
|
def insert(self, table_name: str, records: tp.Union[tp.List[dict], Path]):
table_name_without_partition = table_name.split('$')[0]
table_id = table_name.replace(table_name_without_partition, self.create_full_table_id(table_name_without_partition))
return self.dataset_manager.insert(table_id, records)
|
bigflow
|
positive
|
def main(_):
hvd.init()
FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank()))
tf.logging.set_verbosity(tf.logging.INFO)
processors = {'cola': ColaProcessor, 'mnli': MnliProcessor, 'mrpc': MrpcProcessor, 'xnli': XnliProcessor}
bert.tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)
if not FLAGS.do_train and (not FLAGS.do_eval) and (not FLAGS.do_predict):
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = bert.modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError('Task not found: %s' % task_name)
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = bert.tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(hvd.local_rank())
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host), log_step_count_steps=25, session_config=config)
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
num_train_steps = num_train_steps // hvd.size()
num_warmup_steps = num_warmup_steps // hvd.size()
<DeepExtract>
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_real_example = None
if 'is_real_example' in features:
is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = mode == tf.estimator.ModeKeys.TRAIN
(total_loss, per_example_loss, logits, probabilities) = create_model(bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, len(label_list), FLAGS.use_tpu)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if FLAGS.init_checkpoint:
(assignment_map, initialized_variable_names) = bert.modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.init_checkpoint)
if FLAGS.use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
model_fn = tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization_distributed.create_optimizer(total_loss, FLAGS.learning_rate, num_train_steps, num_warmup_steps, FLAGS.use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
model_fn = {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'probabilities': probabilities}, scaffold_fn=scaffold_fn)
model_fn = output_spec
model_fn = model_fn
</DeepExtract>
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, 'train.tf_record')
<DeepExtract>
writer = tf.python_io.TFRecordWriter(train_file)
for (ex_index, example) in enumerate(train_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(train_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
</DeepExtract>
tf.logging.info('***** Running training *****')
tf.logging.info(' Num examples = %d', len(train_examples))
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
tf.logging.info(' Num steps = %d', num_train_steps)
<DeepExtract>
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
train_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(train_file)
if True:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=True))
train_input_fn = d
train_input_fn = input_fn
</DeepExtract>
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=hooks)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, 'eval.tf_record')
<DeepExtract>
writer = tf.python_io.TFRecordWriter(eval_file)
for (ex_index, example) in enumerate(eval_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(eval_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
</DeepExtract>
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples)
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
eval_steps = None
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
<DeepExtract>
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
eval_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(eval_file)
if False:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=eval_drop_remainder))
eval_input_fn = d
eval_input_fn = input_fn
</DeepExtract>
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, 'predict.tf_record')
<DeepExtract>
writer = tf.python_io.TFRecordWriter(predict_file)
for (ex_index, example) in enumerate(predict_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(predict_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
</DeepExtract>
tf.logging.info('***** Running prediction*****')
tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples)
tf.logging.info(' Batch size = %d', FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
<DeepExtract>
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
predict_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(predict_file)
if False:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=predict_drop_remainder))
predict_input_fn = d
predict_input_fn = input_fn
</DeepExtract>
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, 'test_results.tsv')
with tf.gfile.GFile(output_predict_file, 'w') as writer:
num_written_lines = 0
tf.logging.info('***** Predict results *****')
for (i, prediction) in enumerate(result):
probabilities = prediction['probabilities']
if i >= num_actual_predict_examples:
break
output_line = '\t'.join((str(class_probability) for class_probability in probabilities)) + '\n'
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
|
def main(_):
hvd.init()
FLAGS.output_dir = FLAGS.output_dir if hvd.rank() == 0 else os.path.join(FLAGS.output_dir, str(hvd.rank()))
tf.logging.set_verbosity(tf.logging.INFO)
processors = {'cola': ColaProcessor, 'mnli': MnliProcessor, 'mrpc': MrpcProcessor, 'xnli': XnliProcessor}
bert.tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)
if not FLAGS.do_train and (not FLAGS.do_eval) and (not FLAGS.do_predict):
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = bert.modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError('Task not found: %s' % task_name)
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = bert.tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(hvd.local_rank())
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host), log_step_count_steps=25, session_config=config)
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
num_train_steps = num_train_steps // hvd.size()
num_warmup_steps = num_warmup_steps // hvd.size()
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info(' name = %s, shape = %s' % (name, features[name].shape))
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
is_real_example = None
if 'is_real_example' in features:
is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = mode == tf.estimator.ModeKeys.TRAIN
(total_loss, per_example_loss, logits, probabilities) = create_model(bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, len(label_list), FLAGS.use_tpu)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if FLAGS.init_checkpoint:
(assignment_map, initialized_variable_names) = bert.modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.init_checkpoint)
if FLAGS.use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
model_fn = tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization_distributed.create_optimizer(total_loss, FLAGS.learning_rate, num_train_steps, num_warmup_steps, FLAGS.use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
model_fn = {'eval_accuracy': accuracy, 'eval_loss': loss}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions={'probabilities': probabilities}, scaffold_fn=scaffold_fn)
model_fn = output_spec
model_fn = model_fn
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, 'train.tf_record')
writer = tf.python_io.TFRecordWriter(train_file)
for (ex_index, example) in enumerate(train_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(train_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
tf.logging.info('***** Running training *****')
tf.logging.info(' Num examples = %d', len(train_examples))
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
tf.logging.info(' Num steps = %d', num_train_steps)
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
train_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(train_file)
if True:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=True))
train_input_fn = d
train_input_fn = input_fn
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=hooks)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, 'eval.tf_record')
writer = tf.python_io.TFRecordWriter(eval_file)
for (ex_index, example) in enumerate(eval_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(eval_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples)
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
eval_steps = None
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
eval_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(eval_file)
if False:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=eval_drop_remainder))
eval_input_fn = d
eval_input_fn = input_fn
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, 'predict.tf_record')
writer = tf.python_io.TFRecordWriter(predict_file)
for (ex_index, example) in enumerate(predict_examples):
if ex_index % 10000 == 0:
tf.logging.info('Writing example %d of %d' % (ex_index, len(predict_examples)))
feature = convert_single_example(ex_index, example, label_list, FLAGS.max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(feature.input_ids)
features['input_mask'] = create_int_feature(feature.input_mask)
features['segment_ids'] = create_int_feature(feature.segment_ids)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature([int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
tf.logging.info('***** Running prediction*****')
tf.logging.info(' Num examples = %d (%d actual, %d padding)', len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples)
tf.logging.info(' Batch size = %d', FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
name_to_features = {'input_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([], tf.int64), 'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
predict_input_fn = example
def input_fn(params):
"""The actual input function."""
batch_size = params['batch_size']
d = tf.data.TFRecordDataset(predict_file)
if False:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=predict_drop_remainder))
predict_input_fn = d
predict_input_fn = input_fn
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, 'test_results.tsv')
with tf.gfile.GFile(output_predict_file, 'w') as writer:
num_written_lines = 0
tf.logging.info('***** Predict results *****')
for (i, prediction) in enumerate(result):
probabilities = prediction['probabilities']
if i >= num_actual_predict_examples:
break
output_line = '\t'.join((str(class_probability) for class_probability in probabilities)) + '\n'
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
|
DistillBERT
|
positive
|
def get_history_manager_from_history(history_instance):
"""
Return the history manager, based on an existing history instance.
"""
<DeepExtract>
if isinstance(history_instance.instance_type._meta.pk, ForeignKey):
key_name = history_instance.instance_type._meta.pk.name + '_id'
key_name = history_instance.instance_type._meta.pk.name
</DeepExtract>
return get_history_manager_for_model(history_instance.instance_type).filter(**{key_name: getattr(history_instance, key_name)})
|
def get_history_manager_from_history(history_instance):
"""
Return the history manager, based on an existing history instance.
"""
if isinstance(history_instance.instance_type._meta.pk, ForeignKey):
key_name = history_instance.instance_type._meta.pk.name + '_id'
key_name = history_instance.instance_type._meta.pk.name
return get_history_manager_for_model(history_instance.instance_type).filter(**{key_name: getattr(history_instance, key_name)})
|
django-simple-history
|
positive
|
def create_relationships(relationship_type: str, created_by: stix2.Identity, sources: List[_DomainObject], targets: List[_DomainObject], confidence: int, object_markings: List[stix2.MarkingDefinition], start_time: Optional[datetime]=None, stop_time: Optional[datetime]=None) -> List[stix2.Relationship]:
"""Create relationships."""
relationships = []
for source in sources:
for target in targets:
<DeepExtract>
relationship = stix2.Relationship(id=StixCoreRelationship.generate_id(relationship_type, source.id, target.id, start_time, stop_time), created_by_ref=created_by, relationship_type=relationship_type, source_ref=source, target_ref=target, start_time=start_time, stop_time=stop_time, confidence=confidence, object_marking_refs=object_markings, allow_custom=True)
</DeepExtract>
relationships.append(relationship)
return relationships
|
def create_relationships(relationship_type: str, created_by: stix2.Identity, sources: List[_DomainObject], targets: List[_DomainObject], confidence: int, object_markings: List[stix2.MarkingDefinition], start_time: Optional[datetime]=None, stop_time: Optional[datetime]=None) -> List[stix2.Relationship]:
"""Create relationships."""
relationships = []
for source in sources:
for target in targets:
relationship = stix2.Relationship(id=StixCoreRelationship.generate_id(relationship_type, source.id, target.id, start_time, stop_time), created_by_ref=created_by, relationship_type=relationship_type, source_ref=source, target_ref=target, start_time=start_time, stop_time=stop_time, confidence=confidence, object_marking_refs=object_markings, allow_custom=True)
relationships.append(relationship)
return relationships
|
connectors
|
positive
|
def prepare_targets(self, proposals, targets):
labels = []
keypoints = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
<DeepExtract>
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields(['labels', 'keypoints'])
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
</DeepExtract>
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
keypoints_per_image = matched_targets.get_field('keypoints')
<DeepExtract>
x_within = (keypoints_per_image.keypoints[..., 0] >= matched_targets.bbox[:, 0, None]) & (keypoints_per_image.keypoints[..., 0] <= matched_targets.bbox[:, 2, None])
y_within = (keypoints_per_image.keypoints[..., 1] >= matched_targets.bbox[:, 1, None]) & (keypoints_per_image.keypoints[..., 1] <= matched_targets.bbox[:, 3, None])
within_box = x_within & y_within
</DeepExtract>
vis_kp = keypoints_per_image.keypoints[..., 2] > 0
is_visible = (within_box & vis_kp).sum(1) > 0
labels_per_image[~is_visible] = -1
labels.append(labels_per_image)
keypoints.append(keypoints_per_image)
return (labels, keypoints)
|
def prepare_targets(self, proposals, targets):
labels = []
keypoints = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
match_quality_matrix = boxlist_iou(targets_per_image, proposals_per_image)
matched_idxs = self.proposal_matcher(match_quality_matrix)
targets_per_image = targets_per_image.copy_with_fields(['labels', 'keypoints'])
matched_targets = targets_per_image[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
matched_targets = matched_targets
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('labels')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[neg_inds] = 0
keypoints_per_image = matched_targets.get_field('keypoints')
x_within = (keypoints_per_image.keypoints[..., 0] >= matched_targets.bbox[:, 0, None]) & (keypoints_per_image.keypoints[..., 0] <= matched_targets.bbox[:, 2, None])
y_within = (keypoints_per_image.keypoints[..., 1] >= matched_targets.bbox[:, 1, None]) & (keypoints_per_image.keypoints[..., 1] <= matched_targets.bbox[:, 3, None])
within_box = x_within & y_within
vis_kp = keypoints_per_image.keypoints[..., 2] > 0
is_visible = (within_box & vis_kp).sum(1) > 0
labels_per_image[~is_visible] = -1
labels.append(labels_per_image)
keypoints.append(keypoints_per_image)
return (labels, keypoints)
|
DF-Traffic-Sign-Identification
|
positive
|
def __call__(self, r):
<DeepExtract>
authstr = 'Basic ' + to_native_string(b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip())
r.headers['Proxy-Authorization'] = authstr
</DeepExtract>
return r
|
def __call__(self, r):
authstr = 'Basic ' + to_native_string(b64encode(('%s:%s' % (self.username, self.password)).encode('latin1')).strip())
r.headers['Proxy-Authorization'] = authstr
return r
|
Crunchyroll-XML-Decoder
|
positive
|
def _sample_single(field):
<DeepExtract>
(self._h, self._c) = self.lstm(self._inputs, (self._h, self._c))
</DeepExtract>
logit = self.soft[field.name](self._h[-1])
if self.temperature is not None:
logit /= self.temperature
if self.tanh_constant is not None:
logit = self.tanh_constant * torch.tanh(logit)
if field.choose_one:
sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1)
log_prob = self.cross_entropy_loss(logit, sampled)
self._inputs = self.embedding[field.name](sampled)
else:
logit = logit.view(-1, 1)
logit = torch.cat([-logit, logit], 1)
sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1)
skip_prob = torch.sigmoid(logit)
kl = torch.sum(skip_prob * torch.log(skip_prob / self.skip_targets))
self.sample_skip_penalty += kl
log_prob = self.cross_entropy_loss(logit, sampled)
sampled = sampled.nonzero().view(-1)
if sampled.sum().item():
self._inputs = (torch.sum(self.embedding[field.name](sampled.view(-1)), 0) / (1.0 + torch.sum(sampled))).unsqueeze(0)
else:
self._inputs = torch.zeros(1, self.lstm_size, device=self.embedding[field.name].weight.device)
sampled = sampled.detach().numpy().tolist()
self.sample_log_prob += self.entropy_reduction(log_prob)
entropy = (log_prob * torch.exp(-log_prob)).detach()
self.sample_entropy += self.entropy_reduction(entropy)
if len(sampled) == 1:
sampled = sampled[0]
return sampled
|
def _sample_single(field):
(self._h, self._c) = self.lstm(self._inputs, (self._h, self._c))
logit = self.soft[field.name](self._h[-1])
if self.temperature is not None:
logit /= self.temperature
if self.tanh_constant is not None:
logit = self.tanh_constant * torch.tanh(logit)
if field.choose_one:
sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1)
log_prob = self.cross_entropy_loss(logit, sampled)
self._inputs = self.embedding[field.name](sampled)
else:
logit = logit.view(-1, 1)
logit = torch.cat([-logit, logit], 1)
sampled = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1)
skip_prob = torch.sigmoid(logit)
kl = torch.sum(skip_prob * torch.log(skip_prob / self.skip_targets))
self.sample_skip_penalty += kl
log_prob = self.cross_entropy_loss(logit, sampled)
sampled = sampled.nonzero().view(-1)
if sampled.sum().item():
self._inputs = (torch.sum(self.embedding[field.name](sampled.view(-1)), 0) / (1.0 + torch.sum(sampled))).unsqueeze(0)
else:
self._inputs = torch.zeros(1, self.lstm_size, device=self.embedding[field.name].weight.device)
sampled = sampled.detach().numpy().tolist()
self.sample_log_prob += self.entropy_reduction(log_prob)
entropy = (log_prob * torch.exp(-log_prob)).detach()
self.sample_entropy += self.entropy_reduction(entropy)
if len(sampled) == 1:
sampled = sampled[0]
return sampled
|
AutoGL
|
positive
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
<DeepExtract>
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
<DeepExtract>
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
|
centerpose
|
positive
|
def main():
parser = OptionParser()
parser.add_option('-t', '--host', dest='host', help='host name', metavar='HOST', default='localhost')
parser.add_option('-p', '--port', dest='port', type=int, help='port number', metavar='PORT', default=5672)
parser.add_option('-u', '--user', dest='username', help='username', metavar='USERNAME', default='guest')
parser.add_option('-w', '--password', dest='password', help='password', metavar='PASSWORD', default='guest')
parser.add_option('-V', '--vhost', dest='vhost', help='vhost', metavar='VHOST', default='/')
parser.add_option('-e', '--exchange', dest='exchange', help='exchange', metavar='EXCHANGE', default='graphite')
parser.add_option('-v', '--verbose', dest='verbose', help='verbose', default=False, action='store_true')
(options, args) = parser.parse_args()
<DeepExtract>
factory = createAMQPListener(options.username, options.password, options.vhost, options.exchange, spec=spec, channel=channel, verbose=options.verbose)
reactor.connectTCP(options.host, options.port, factory)
</DeepExtract>
reactor.run()
|
def main():
parser = OptionParser()
parser.add_option('-t', '--host', dest='host', help='host name', metavar='HOST', default='localhost')
parser.add_option('-p', '--port', dest='port', type=int, help='port number', metavar='PORT', default=5672)
parser.add_option('-u', '--user', dest='username', help='username', metavar='USERNAME', default='guest')
parser.add_option('-w', '--password', dest='password', help='password', metavar='PASSWORD', default='guest')
parser.add_option('-V', '--vhost', dest='vhost', help='vhost', metavar='VHOST', default='/')
parser.add_option('-e', '--exchange', dest='exchange', help='exchange', metavar='EXCHANGE', default='graphite')
parser.add_option('-v', '--verbose', dest='verbose', help='verbose', default=False, action='store_true')
(options, args) = parser.parse_args()
factory = createAMQPListener(options.username, options.password, options.vhost, options.exchange, spec=spec, channel=channel, verbose=options.verbose)
reactor.connectTCP(options.host, options.port, factory)
reactor.run()
|
carbon
|
positive
|
def setupUi(self, ChooseComputerDialog):
ChooseComputerDialog.setObjectName('ChooseComputerDialog')
ChooseComputerDialog.resize(400, 150)
ChooseComputerDialog.setMinimumSize(QtCore.QSize(400, 150))
self.verticalLayout = QtWidgets.QVBoxLayout(ChooseComputerDialog)
self.verticalLayout.setObjectName('verticalLayout')
self.heading_label = QtWidgets.QLabel(ChooseComputerDialog)
self.heading_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading | QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.heading_label.setObjectName('heading_label')
self.verticalLayout.addWidget(self.heading_label)
self.computers_combo_box = QtWidgets.QComboBox(ChooseComputerDialog)
self.computers_combo_box.setObjectName('computers_combo_box')
self.verticalLayout.addWidget(self.computers_combo_box)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.continue_button = QtWidgets.QPushButton(ChooseComputerDialog)
self.continue_button.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self.continue_button.setObjectName('continue_button')
self.horizontalLayout_2.addWidget(self.continue_button)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
<DeepExtract>
_translate = QtCore.QCoreApplication.translate
ChooseComputerDialog.setWindowTitle(_translate('ChooseComputerDialog', 'Choose Computer - Blobbackup'))
self.heading_label.setText(_translate('ChooseComputerDialog', 'Choose the computer you want to restore from.'))
self.continue_button.setText(_translate('ChooseComputerDialog', 'Continue'))
</DeepExtract>
QtCore.QMetaObject.connectSlotsByName(ChooseComputerDialog)
|
def setupUi(self, ChooseComputerDialog):
ChooseComputerDialog.setObjectName('ChooseComputerDialog')
ChooseComputerDialog.resize(400, 150)
ChooseComputerDialog.setMinimumSize(QtCore.QSize(400, 150))
self.verticalLayout = QtWidgets.QVBoxLayout(ChooseComputerDialog)
self.verticalLayout.setObjectName('verticalLayout')
self.heading_label = QtWidgets.QLabel(ChooseComputerDialog)
self.heading_label.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeading | QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignVCenter)
self.heading_label.setObjectName('heading_label')
self.verticalLayout.addWidget(self.heading_label)
self.computers_combo_box = QtWidgets.QComboBox(ChooseComputerDialog)
self.computers_combo_box.setObjectName('computers_combo_box')
self.verticalLayout.addWidget(self.computers_combo_box)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.continue_button = QtWidgets.QPushButton(ChooseComputerDialog)
self.continue_button.setFocusPolicy(QtCore.Qt.FocusPolicy.NoFocus)
self.continue_button.setObjectName('continue_button')
self.horizontalLayout_2.addWidget(self.continue_button)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
_translate = QtCore.QCoreApplication.translate
ChooseComputerDialog.setWindowTitle(_translate('ChooseComputerDialog', 'Choose Computer - Blobbackup'))
self.heading_label.setText(_translate('ChooseComputerDialog', 'Choose the computer you want to restore from.'))
self.continue_button.setText(_translate('ChooseComputerDialog', 'Continue'))
QtCore.QMetaObject.connectSlotsByName(ChooseComputerDialog)
|
BlobBackup
|
positive
|
def forward(self, p, out):
ASFF = False
if ASFF:
(i, n) = (self.index, self.nl)
p = out[self.layers[i]]
(bs, _, ny, nx) = p.shape
if (self.nx, self.ny) != (nx, ny):
<DeepExtract>
(self.nx, self.ny) = (nx, ny)
self.ng = torch.tensor((nx, ny), dtype=torch.float)
if not self.training:
(yv, xv) = torch.meshgrid([torch.arange(self.ny, device=p.device), torch.arange(self.nx, device=p.device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != p.device:
self.anchor_vec = self.anchor_vec.to(p.device)
self.anchor_wh = self.anchor_wh.to(p.device)
</DeepExtract>
w = torch.sigmoid(p[:, -n:]) * (2 / n)
p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
for j in range(n):
if j != i:
p += w[:, j:j + 1] * F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
elif ONNX_EXPORT:
bs = 1
else:
(bs, _, ny, nx) = p.shape
if (self.nx, self.ny) != (nx, ny):
<DeepExtract>
(self.nx, self.ny) = (nx, ny)
self.ng = torch.tensor((nx, ny), dtype=torch.float)
if not self.training:
(yv, xv) = torch.meshgrid([torch.arange(self.ny, device=p.device), torch.arange(self.nx, device=p.device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != p.device:
self.anchor_vec = self.anchor_vec.to(p.device)
self.anchor_wh = self.anchor_wh.to(p.device)
</DeepExtract>
p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous()
if self.training:
return p
elif ONNX_EXPORT:
m = self.na * self.nx * self.ny
ng = 1.0 / self.ng.repeat(m, 1)
grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2)
anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng
p = p.view(m, self.no)
xy = torch.sigmoid(p[:, 0:2]) + grid
wh = torch.exp(p[:, 2:4]) * anchor_wh
p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5])
return (p_cls, xy * ng, wh)
else:
io = p.clone()
io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid
io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh
io[..., :4] *= self.stride
torch.sigmoid_(io[..., 4:])
return (io.view(bs, -1, self.no), p)
|
def forward(self, p, out):
ASFF = False
if ASFF:
(i, n) = (self.index, self.nl)
p = out[self.layers[i]]
(bs, _, ny, nx) = p.shape
if (self.nx, self.ny) != (nx, ny):
(self.nx, self.ny) = (nx, ny)
self.ng = torch.tensor((nx, ny), dtype=torch.float)
if not self.training:
(yv, xv) = torch.meshgrid([torch.arange(self.ny, device=p.device), torch.arange(self.nx, device=p.device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != p.device:
self.anchor_vec = self.anchor_vec.to(p.device)
self.anchor_wh = self.anchor_wh.to(p.device)
w = torch.sigmoid(p[:, -n:]) * (2 / n)
p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
for j in range(n):
if j != i:
p += w[:, j:j + 1] * F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
elif ONNX_EXPORT:
bs = 1
else:
(bs, _, ny, nx) = p.shape
if (self.nx, self.ny) != (nx, ny):
(self.nx, self.ny) = (nx, ny)
self.ng = torch.tensor((nx, ny), dtype=torch.float)
if not self.training:
(yv, xv) = torch.meshgrid([torch.arange(self.ny, device=p.device), torch.arange(self.nx, device=p.device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != p.device:
self.anchor_vec = self.anchor_vec.to(p.device)
self.anchor_wh = self.anchor_wh.to(p.device)
p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous()
if self.training:
return p
elif ONNX_EXPORT:
m = self.na * self.nx * self.ny
ng = 1.0 / self.ng.repeat(m, 1)
grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2)
anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng
p = p.view(m, self.no)
xy = torch.sigmoid(p[:, 0:2]) + grid
wh = torch.exp(p[:, 2:4]) * anchor_wh
p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5])
return (p_cls, xy * ng, wh)
else:
io = p.clone()
io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid
io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh
io[..., :4] *= self.stride
torch.sigmoid_(io[..., 4:])
return (io.view(bs, -1, self.no), p)
|
convert_yolo_weights
|
positive
|
def get_sample_size(risk_limit: int, contest: Contest, sample_results: Optional[BALLOT_POLLING_SAMPLE_RESULTS], round_sizes: Optional[BALLOT_POLLING_ROUND_SIZES]) -> Dict[str, SampleSizeOption]:
"""
Computes initial sample size parameterized by likelihood that the
initial sample will confirm the election result, assuming no
discrepancies.
Inputs:
risk_limit - the risk-limit for this audit
contest - a sampler_contest object of the contest being audited
sample_results - mapping of candidates to votes in the (cumulative)
sample:
{
candidate1: sampled_votes,
candidate2: sampled_votes,
...
}
Outputs:
samples - dictionary mapping confirmation likelihood to sample size:
{
likelihood1: sample_size,
likelihood2: sample_size,
...
asn: {
"size": sample_size,
"prob": prob # the probability the asn terminates
# in one round
}
}
"""
logging.debug(f'bravo::get_sample_size(risk_limit={risk_limit!r}, contest={contest!r}, sample_results={sample_results!r})')
alpha = Decimal(risk_limit) / 100
assert alpha < 1, 'The risk-limit must be less than one!'
if alpha == 0:
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
quants = [0.7, 0.8, 0.9]
if round_sizes:
num_sampled = sum([round_info.round_size for round_info in round_sizes.values()])
if num_sampled >= contest.ballots:
raise ValueError('All ballots have already been audited!')
winners = contest.margins['winners']
losers = contest.margins['losers']
if not losers:
raise ValueError('Contest must have candidates who did not win!')
if is_tie(contest):
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
if is_landslide(contest):
return {'asn': {'type': 'ASN', 'size': 1, 'prob': 1}}
cumulative_sample = {}
if sample_results:
<DeepExtract>
cumulative_sample = defaultdict(int)
for rd in sample_results:
for cand in sample_results[rd]:
cumulative_sample[cand] += sample_results[rd][cand]
cumulative_sample = cumulative_sample
</DeepExtract>
else:
for candidate in contest.candidates:
cumulative_sample[candidate] = 0
samples: Dict[str, SampleSizeOption] = {}
<DeepExtract>
if is_tie(contest):
raise ValueError('Cannot compute ASN for a tied contest')
winners = contest.margins['winners']
losers = contest.margins['losers']
if not losers:
raise ValueError('Cannot compute ASN for a contest with no losers')
T = get_test_statistics(contest.margins, cumulative_sample)
class SampleSizeWinnerLoserStats(TypedDict):
p_w: Decimal
p_l: Decimal
sample_w: int
sample_l: int
sample_size = 0
sample_size_winner_loser_stats: Optional[SampleSizeWinnerLoserStats] = None
for (winner_name, winner_stats) in winners.items():
for (loser_name, loser_stats) in losers.items():
weighted_alpha = Decimal(1) / alpha / T[winner_name, loser_name]
p_w = Decimal(winner_stats['p_w'])
p_l = Decimal(loser_stats['p_l'])
if p_l == 0:
continue
s_w = p_w / (p_w + p_l)
z_w = (2 * s_w).ln()
z_l = (2 - 2 * s_w).ln()
possible_sample_size = math.ceil((weighted_alpha.ln() + z_w / Decimal(2)) / (p_w * z_w + p_l * z_l))
if possible_sample_size > sample_size:
sample_size = possible_sample_size
sample_size_winner_loser_stats = {'p_w': p_w, 'p_l': p_l, 'sample_w': cumulative_sample[winner_name], 'sample_l': cumulative_sample[loser_name]}
if sample_size == 0:
raise ValueError('Sample indicates the audit is over')
probability_of_completion = expected_prob(alpha, sample_size_winner_loser_stats['p_w'], sample_size_winner_loser_stats['p_l'], sample_size_winner_loser_stats['sample_w'], sample_size_winner_loser_stats['sample_l'], sample_size) if sample_size_winner_loser_stats is not None and contest.num_winners == 1 else None
samples['asn'] = {'type': 'ASN', 'size': sample_size, 'prob': probability_of_completion}
</DeepExtract>
if contest.num_winners != 1:
return samples
for quant in quants:
sample_size = 0
for (winner_name, winner_stats) in winners.items():
for (loser_name, loser_stats) in losers.items():
p_w = Decimal(winner_stats['p_w'])
p_l = Decimal(loser_stats['p_l'])
if p_l == 0:
continue
sample_w = cumulative_sample[winner_name]
sample_l = cumulative_sample[loser_name]
<DeepExtract>
p_wr = p_w + p_l
p_w2 = p_w / p_wr
p_r2 = 1 - p_w2
plus = (p_w2 / Decimal(0.5)).ln()
minus = (p_r2 / Decimal(0.5)).ln()
threshold = (1 / alpha).ln() - (sample_w * plus + sample_l * minus)
if threshold <= 0:
possible_sample_size = 0
z = -stats.norm.ppf(quant)
d = p_w2 * p_r2
f = threshold / (plus - minus)
g = minus / (plus - minus) + p_w2
q_a = g ** 2
q_b = -(Decimal(z) ** 2 * d + 2 * f * g)
q_c = f ** 2
radical = Decimal(0).max(q_b ** 2 - 4 * q_a * q_c).sqrt()
if quant > 0.5:
size = math.floor((-q_b + radical) / (2 * q_a))
else:
size = math.floor((-q_b - radical) / (2 * q_a))
test_stat = Decimal(0)
while test_stat.is_nan() or (test_stat < threshold and size < contest.ballots):
x_c = Decimal(stats.binom.ppf(1.0 - quant, size, float(p_w2)))
test_stat = x_c * plus + (size - x_c) * minus
if test_stat.is_nan() or test_stat < threshold:
size += 1
size_adj = math.ceil(size / p_wr)
possible_sample_size = size_adj
</DeepExtract>
if possible_sample_size > sample_size:
sample_size = possible_sample_size
if sample_size != 0:
samples[str(quant)] = {'type': None, 'size': sample_size, 'prob': quant}
large_election_threshold = 100000
all_ballots_threshold = contest.ballots * 0.25
if contest.ballots > large_election_threshold and '0.9' in samples and (samples['0.9']['size'] >= all_ballots_threshold):
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
logging.debug(f'bravo::get_sample_size => samples={samples!r}')
return samples
|
def get_sample_size(risk_limit: int, contest: Contest, sample_results: Optional[BALLOT_POLLING_SAMPLE_RESULTS], round_sizes: Optional[BALLOT_POLLING_ROUND_SIZES]) -> Dict[str, SampleSizeOption]:
"""
Computes initial sample size parameterized by likelihood that the
initial sample will confirm the election result, assuming no
discrepancies.
Inputs:
risk_limit - the risk-limit for this audit
contest - a sampler_contest object of the contest being audited
sample_results - mapping of candidates to votes in the (cumulative)
sample:
{
candidate1: sampled_votes,
candidate2: sampled_votes,
...
}
Outputs:
samples - dictionary mapping confirmation likelihood to sample size:
{
likelihood1: sample_size,
likelihood2: sample_size,
...
asn: {
"size": sample_size,
"prob": prob # the probability the asn terminates
# in one round
}
}
"""
logging.debug(f'bravo::get_sample_size(risk_limit={risk_limit!r}, contest={contest!r}, sample_results={sample_results!r})')
alpha = Decimal(risk_limit) / 100
assert alpha < 1, 'The risk-limit must be less than one!'
if alpha == 0:
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
quants = [0.7, 0.8, 0.9]
if round_sizes:
num_sampled = sum([round_info.round_size for round_info in round_sizes.values()])
if num_sampled >= contest.ballots:
raise ValueError('All ballots have already been audited!')
winners = contest.margins['winners']
losers = contest.margins['losers']
if not losers:
raise ValueError('Contest must have candidates who did not win!')
if is_tie(contest):
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
if is_landslide(contest):
return {'asn': {'type': 'ASN', 'size': 1, 'prob': 1}}
cumulative_sample = {}
if sample_results:
cumulative_sample = defaultdict(int)
for rd in sample_results:
for cand in sample_results[rd]:
cumulative_sample[cand] += sample_results[rd][cand]
cumulative_sample = cumulative_sample
else:
for candidate in contest.candidates:
cumulative_sample[candidate] = 0
samples: Dict[str, SampleSizeOption] = {}
if is_tie(contest):
raise ValueError('Cannot compute ASN for a tied contest')
winners = contest.margins['winners']
losers = contest.margins['losers']
if not losers:
raise ValueError('Cannot compute ASN for a contest with no losers')
T = get_test_statistics(contest.margins, cumulative_sample)
class SampleSizeWinnerLoserStats(TypedDict):
p_w: Decimal
p_l: Decimal
sample_w: int
sample_l: int
sample_size = 0
sample_size_winner_loser_stats: Optional[SampleSizeWinnerLoserStats] = None
for (winner_name, winner_stats) in winners.items():
for (loser_name, loser_stats) in losers.items():
weighted_alpha = Decimal(1) / alpha / T[winner_name, loser_name]
p_w = Decimal(winner_stats['p_w'])
p_l = Decimal(loser_stats['p_l'])
if p_l == 0:
continue
s_w = p_w / (p_w + p_l)
z_w = (2 * s_w).ln()
z_l = (2 - 2 * s_w).ln()
possible_sample_size = math.ceil((weighted_alpha.ln() + z_w / Decimal(2)) / (p_w * z_w + p_l * z_l))
if possible_sample_size > sample_size:
sample_size = possible_sample_size
sample_size_winner_loser_stats = {'p_w': p_w, 'p_l': p_l, 'sample_w': cumulative_sample[winner_name], 'sample_l': cumulative_sample[loser_name]}
if sample_size == 0:
raise ValueError('Sample indicates the audit is over')
probability_of_completion = expected_prob(alpha, sample_size_winner_loser_stats['p_w'], sample_size_winner_loser_stats['p_l'], sample_size_winner_loser_stats['sample_w'], sample_size_winner_loser_stats['sample_l'], sample_size) if sample_size_winner_loser_stats is not None and contest.num_winners == 1 else None
samples['asn'] = {'type': 'ASN', 'size': sample_size, 'prob': probability_of_completion}
if contest.num_winners != 1:
return samples
for quant in quants:
sample_size = 0
for (winner_name, winner_stats) in winners.items():
for (loser_name, loser_stats) in losers.items():
p_w = Decimal(winner_stats['p_w'])
p_l = Decimal(loser_stats['p_l'])
if p_l == 0:
continue
sample_w = cumulative_sample[winner_name]
sample_l = cumulative_sample[loser_name]
p_wr = p_w + p_l
p_w2 = p_w / p_wr
p_r2 = 1 - p_w2
plus = (p_w2 / Decimal(0.5)).ln()
minus = (p_r2 / Decimal(0.5)).ln()
threshold = (1 / alpha).ln() - (sample_w * plus + sample_l * minus)
if threshold <= 0:
possible_sample_size = 0
z = -stats.norm.ppf(quant)
d = p_w2 * p_r2
f = threshold / (plus - minus)
g = minus / (plus - minus) + p_w2
q_a = g ** 2
q_b = -(Decimal(z) ** 2 * d + 2 * f * g)
q_c = f ** 2
radical = Decimal(0).max(q_b ** 2 - 4 * q_a * q_c).sqrt()
if quant > 0.5:
size = math.floor((-q_b + radical) / (2 * q_a))
else:
size = math.floor((-q_b - radical) / (2 * q_a))
test_stat = Decimal(0)
while test_stat.is_nan() or (test_stat < threshold and size < contest.ballots):
x_c = Decimal(stats.binom.ppf(1.0 - quant, size, float(p_w2)))
test_stat = x_c * plus + (size - x_c) * minus
if test_stat.is_nan() or test_stat < threshold:
size += 1
size_adj = math.ceil(size / p_wr)
possible_sample_size = size_adj
if possible_sample_size > sample_size:
sample_size = possible_sample_size
if sample_size != 0:
samples[str(quant)] = {'type': None, 'size': sample_size, 'prob': quant}
large_election_threshold = 100000
all_ballots_threshold = contest.ballots * 0.25
if contest.ballots > large_election_threshold and '0.9' in samples and (samples['0.9']['size'] >= all_ballots_threshold):
return {'all-ballots': {'type': 'all-ballots', 'size': contest.ballots, 'prob': None}}
logging.debug(f'bravo::get_sample_size => samples={samples!r}')
return samples
|
arlo
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
result = ssov32(d_a - d_b, self.max_pos, self.max_neg)
c = 0
v = overflow(result)
av = advanced_overflow(result)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
<DeepExtract>
offset = self._lookup_register(self.irsb_c.irsb.arch, 'psw')
self.irsb_c.put(psw, offset)
</DeepExtract>
return result
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
result = ssov32(d_a - d_b, self.max_pos, self.max_neg)
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
offset = self._lookup_register(self.irsb_c.irsb.arch, 'psw')
self.irsb_c.put(psw, offset)
return result
|
angr-platforms
|
positive
|
@wraps(f)
def decorator(*args, **kwargs):
if current_app.testing:
return f(*args, **kwargs)
if 'Mdm-Signature' not in request.headers:
raise TypeError('Client did not supply an Mdm-Signature header but signature is required.')
detached_signature = b64decode(request.headers['Mdm-Signature'])
try:
<DeepExtract>
ci = cms.ContentInfo.load(detached_signature)
assert ci['content_type'].native == 'signed_data'
signed: cms.SignedData = ci['content']
current_app.logger.debug('CMS request contains %d certificate(s)', len(signed['certificates']))
signers = []
for signer in signed['signer_infos']:
asn_certificate = _certificate_by_signer_identifier(signed['certificates'], signer['sid'])
assert asn_certificate is not None
certificate = x509.load_der_x509_certificate(asn_certificate.dump(), default_backend())
digest_algorithm = signer['digest_algorithm']
signature_algorithm = signer['signature_algorithm']
hash_function = _cryptography_hash_function(digest_algorithm)
pad_function = _cryptography_pad_function(signature_algorithm)
if hash_function is None or pad_function is None:
raise ValueError('Unsupported signature algorithm: {}'.format(signature_algorithm))
else:
current_app.logger.debug('Using signature algorithm: %s', signature_algorithm.native)
assert signed['encap_content_info']['content_type'].native == 'data'
if True:
data = request.data
else:
data = signed['encap_content_info']['content'].native
if 'signed_attrs' in signer and len(signer['signed_attrs']) > 0:
for i in range(0, len(signer['signed_attrs'])):
signed_attr: CMSAttribute = signer['signed_attrs'][i]
if signed_attr['type'].native == 'message_digest':
current_app.logger.debug('SignerInfo digest: %s', b64encode(signed_attr['values'][0].native))
certificate.public_key().verify(signer['signature'].native, signer['signed_attrs'].dump(), pad_function(), hash_function())
else:
certificate.public_key().verify(signer['signature'].native, data, pad_function(), hash_function())
signers.append(certificate)
if True:
(signers, detached_signature) = (signers, request.data)
else:
(signers, detached_signature) = (signers, signed['encap_content_info']['content'].native)
</DeepExtract>
g.signers = signers
g.signed_data = signed_data
except InvalidSignature as e:
current_app.logger.warn('Invalid Signature in Mdm-Signature header')
if not current_app.config.get('DEBUG', False):
return abort(403)
return f(*args, **kwargs)
|
@wraps(f)
def decorator(*args, **kwargs):
if current_app.testing:
return f(*args, **kwargs)
if 'Mdm-Signature' not in request.headers:
raise TypeError('Client did not supply an Mdm-Signature header but signature is required.')
detached_signature = b64decode(request.headers['Mdm-Signature'])
try:
ci = cms.ContentInfo.load(detached_signature)
assert ci['content_type'].native == 'signed_data'
signed: cms.SignedData = ci['content']
current_app.logger.debug('CMS request contains %d certificate(s)', len(signed['certificates']))
signers = []
for signer in signed['signer_infos']:
asn_certificate = _certificate_by_signer_identifier(signed['certificates'], signer['sid'])
assert asn_certificate is not None
certificate = x509.load_der_x509_certificate(asn_certificate.dump(), default_backend())
digest_algorithm = signer['digest_algorithm']
signature_algorithm = signer['signature_algorithm']
hash_function = _cryptography_hash_function(digest_algorithm)
pad_function = _cryptography_pad_function(signature_algorithm)
if hash_function is None or pad_function is None:
raise ValueError('Unsupported signature algorithm: {}'.format(signature_algorithm))
else:
current_app.logger.debug('Using signature algorithm: %s', signature_algorithm.native)
assert signed['encap_content_info']['content_type'].native == 'data'
if True:
data = request.data
else:
data = signed['encap_content_info']['content'].native
if 'signed_attrs' in signer and len(signer['signed_attrs']) > 0:
for i in range(0, len(signer['signed_attrs'])):
signed_attr: CMSAttribute = signer['signed_attrs'][i]
if signed_attr['type'].native == 'message_digest':
current_app.logger.debug('SignerInfo digest: %s', b64encode(signed_attr['values'][0].native))
certificate.public_key().verify(signer['signature'].native, signer['signed_attrs'].dump(), pad_function(), hash_function())
else:
certificate.public_key().verify(signer['signature'].native, data, pad_function(), hash_function())
signers.append(certificate)
if True:
(signers, detached_signature) = (signers, request.data)
else:
(signers, detached_signature) = (signers, signed['encap_content_info']['content'].native)
g.signers = signers
g.signed_data = signed_data
except InvalidSignature as e:
current_app.logger.warn('Invalid Signature in Mdm-Signature header')
if not current_app.config.get('DEBUG', False):
return abort(403)
return f(*args, **kwargs)
|
commandment
|
positive
|
def renderNames_gl(self, selection=None):
cells = self.sim.cellStates.values()
if len(cells) != self.ncells_names_list:
<DeepExtract>
if self.dlist_names:
glDeleteLists(self.dlist_names, 1)
index = glGenLists(1)
glNewList(index, GL_COMPILE)
self.render_cell_names()
glEndList()
self.dlist_names = index
</DeepExtract>
self.ncells_names_list = len(cells)
glCallList(self.dlist_names)
|
def renderNames_gl(self, selection=None):
cells = self.sim.cellStates.values()
if len(cells) != self.ncells_names_list:
if self.dlist_names:
glDeleteLists(self.dlist_names, 1)
index = glGenLists(1)
glNewList(index, GL_COMPILE)
self.render_cell_names()
glEndList()
self.dlist_names = index
self.ncells_names_list = len(cells)
glCallList(self.dlist_names)
|
CellModeller
|
positive
|
def parse_kraken_trades(data_row, parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict['time'])
<DeepExtract>
for quote_asset in sorted(QUOTE_ASSETS, reverse=True):
if row_dict['pair'].endswith(quote_asset):
base_asset = row_dict['pair'][:-len(quote_asset)]
if len(base_asset) < 3:
if base_asset in ASSETS_SHORT:
(base_asset, quote_asset) = (base_asset, quote_asset)
else:
(base_asset, quote_asset) = (base_asset, quote_asset)
(base_asset, quote_asset) = (None, None)
</DeepExtract>
if base_asset is None or quote_asset is None:
raise UnexpectedTradingPairError(parser.in_header.index('pair'), 'pair', row_dict['pair'])
if row_dict['type'] == 'buy':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_TRADE, data_row.timestamp, buy_quantity=row_dict['vol'], buy_asset=_normalise_asset(base_asset), sell_quantity=row_dict['cost'], sell_asset=_normalise_asset(quote_asset), fee_quantity=row_dict['fee'], fee_asset=_normalise_asset(quote_asset), wallet=WALLET)
elif row_dict['type'] == 'sell':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_TRADE, data_row.timestamp, buy_quantity=row_dict['cost'], buy_asset=_normalise_asset(quote_asset), sell_quantity=row_dict['vol'], sell_asset=_normalise_asset(base_asset), fee_quantity=row_dict['fee'], fee_asset=_normalise_asset(quote_asset), wallet=WALLET)
else:
raise UnexpectedTypeError(parser.in_header.index('type'), 'type', row_dict['type'])
|
def parse_kraken_trades(data_row, parser, **_kwargs):
row_dict = data_row.row_dict
data_row.timestamp = DataParser.parse_timestamp(row_dict['time'])
for quote_asset in sorted(QUOTE_ASSETS, reverse=True):
if row_dict['pair'].endswith(quote_asset):
base_asset = row_dict['pair'][:-len(quote_asset)]
if len(base_asset) < 3:
if base_asset in ASSETS_SHORT:
(base_asset, quote_asset) = (base_asset, quote_asset)
else:
(base_asset, quote_asset) = (base_asset, quote_asset)
(base_asset, quote_asset) = (None, None)
if base_asset is None or quote_asset is None:
raise UnexpectedTradingPairError(parser.in_header.index('pair'), 'pair', row_dict['pair'])
if row_dict['type'] == 'buy':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_TRADE, data_row.timestamp, buy_quantity=row_dict['vol'], buy_asset=_normalise_asset(base_asset), sell_quantity=row_dict['cost'], sell_asset=_normalise_asset(quote_asset), fee_quantity=row_dict['fee'], fee_asset=_normalise_asset(quote_asset), wallet=WALLET)
elif row_dict['type'] == 'sell':
data_row.t_record = TransactionOutRecord(TransactionOutRecord.TYPE_TRADE, data_row.timestamp, buy_quantity=row_dict['cost'], buy_asset=_normalise_asset(quote_asset), sell_quantity=row_dict['vol'], sell_asset=_normalise_asset(base_asset), fee_quantity=row_dict['fee'], fee_asset=_normalise_asset(quote_asset), wallet=WALLET)
else:
raise UnexpectedTypeError(parser.in_header.index('type'), 'type', row_dict['type'])
|
BittyTax
|
positive
|
def _on_sub_tick(self, process_id):
running = self._running
if running:
if self._batch_size is None:
print('Unable to start TrainingGenerator: batch_size must be set')
running = False
if self._resolution is None:
print('Unable to start TrainingGenerator: resolution must be set')
running = False
if self._random_warp is None:
print('Unable to start TrainingGenerator: random_warp must be set')
running = False
if running:
if self._sent_buffers_count < 2:
batch_size = self._batch_size
resolution = self._resolution
face_coverage = 1.0
rw_grid_cell_range = [3, 7]
rw_grid_rot_deg_range = [-180, 180]
rw_grid_scale_range = [-0.25, 2.5]
rw_grid_tx_range = [-0.5, 0.5]
rw_grid_ty_range = [-0.5, 0.5]
align_rot_deg_range = [-180, 180]
align_scale_range = [0.0, 2.5]
align_tx_range = [-0.5, 0.5]
align_ty_range = [-0.5, 0.5]
random_mask_complexity = 3
sharpen_chance = 25
motion_blur_chance = 25
gaussian_blur_chance = 25
reresize_chance = 25
recompress_chance = 25
img_aligned_list = []
img_aligned_shifted_list = []
shift_mat_list = []
if self._n_batch < batch_size:
while True:
<DeepExtract>
if len(self._ufm_uuid_indexes) == 0:
self._ufm_uuid_indexes = [*range(len(self._ufm_uuids))]
np.random.shuffle(self._ufm_uuid_indexes)
idx = self._ufm_uuid_indexes.pop()
uuid1 = self._ufm_uuids[idx]
</DeepExtract>
ufm1 = self._fs.get_UFaceMark_by_uuid(uuid1)
flmrks1 = ufm1.get_FLandmarks2D_best()
if flmrks1 is None:
print(f'Corrupted faceset, no FLandmarks2D for UFaceMark {ufm1.get_uuid()}')
continue
uimg1 = self._fs.get_UImage_by_uuid(ufm1.get_UImage_uuid())
if uimg1 is None:
print(f'Corrupted faceset, no UImage for UFaceMark {ufm1.get_uuid()}')
continue
img1 = uimg1.get_image()
if img1 is None:
print(f'Corrupted faceset, no image in UImage {uimg1.get_uuid()}')
continue
(img_aligned, _) = flmrks1.cut(img1, face_coverage, resolution)
img_aligned = img_aligned.astype(np.float32) / 255.0
(_, img_to_face_uni_mat1) = flmrks1.calc_cut(img1.shape[0:2], face_coverage, resolution)
fw1 = lib_face.FaceWarper(img_to_face_uni_mat1, align_rot_deg=align_rot_deg_range, align_scale=align_scale_range, align_tx=align_tx_range, align_ty=align_ty_range, rw_grid_cell_count=rw_grid_cell_range, rw_grid_rot_deg=rw_grid_rot_deg_range, rw_grid_scale=rw_grid_scale_range, rw_grid_tx=rw_grid_tx_range, rw_grid_ty=rw_grid_ty_range)
img_aligned_shifted = fw1.transform(img1, resolution, random_warp=self._random_warp).astype(np.float32) / 255.0
ip = lib_img.ImageProcessor(img_aligned_shifted)
rnd = np.random
if rnd.randint(2) == 0:
ip.hsv(rnd.uniform(0, 1), rnd.uniform(-0.5, 0.5), rnd.uniform(-0.5, 0.5), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
ip.levels([[rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)], [rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)], [rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)]], mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(2) == 0:
if rnd.randint(100) < sharpen_chance:
if rnd.randint(2) == 0:
ip.box_sharpen(size=rnd.randint(1, 11), power=rnd.uniform(0.5, 5.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
ip.gaussian_sharpen(sigma=1.0, power=rnd.uniform(0.5, 5.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
if rnd.randint(100) < motion_blur_chance:
ip.motion_blur(size=rnd.randint(1, 11), angle=rnd.randint(360), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(100) < gaussian_blur_chance:
ip.gaussian_blur(sigma=rnd.uniform(0.5, 3.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if np.random.randint(2) == 0:
if rnd.randint(100) < reresize_chance:
ip.reresize(rnd.uniform(0.0, 0.75), interpolation=ip.Interpolation.NEAREST, mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if np.random.randint(2) == 0:
if rnd.randint(100) < reresize_chance:
ip.reresize(rnd.uniform(0.0, 0.75), interpolation=ip.Interpolation.LINEAR, mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(100) < recompress_chance:
ip.jpeg_recompress(quality=rnd.randint(10, 75), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
img_aligned_shifted = ip.get_image('HWC')
self._img_aligned_list.append(img_aligned)
self._img_aligned_shifted_list.append(img_aligned_shifted)
self._shift_mat_list.append(fw1.get_aligned_random_transform_mat())
self._n_batch += 1
break
if self._n_batch == batch_size:
data = Data()
data.batch_size = batch_size
data.resolution = resolution
data.img_aligned = np.array(self._img_aligned_list).transpose((0, 3, 1, 2))
data.img_aligned_shifted = np.array(self._img_aligned_shifted_list).transpose((0, 3, 1, 2))
data.shift_uni_mats = np.array(self._shift_mat_list)
self._send_msg('data', data)
self._sent_buffers_count += 1
self._n_batch = 0
self._img_aligned_list = []
self._img_aligned_shifted_list = []
self._shift_mat_list = []
|
def _on_sub_tick(self, process_id):
running = self._running
if running:
if self._batch_size is None:
print('Unable to start TrainingGenerator: batch_size must be set')
running = False
if self._resolution is None:
print('Unable to start TrainingGenerator: resolution must be set')
running = False
if self._random_warp is None:
print('Unable to start TrainingGenerator: random_warp must be set')
running = False
if running:
if self._sent_buffers_count < 2:
batch_size = self._batch_size
resolution = self._resolution
face_coverage = 1.0
rw_grid_cell_range = [3, 7]
rw_grid_rot_deg_range = [-180, 180]
rw_grid_scale_range = [-0.25, 2.5]
rw_grid_tx_range = [-0.5, 0.5]
rw_grid_ty_range = [-0.5, 0.5]
align_rot_deg_range = [-180, 180]
align_scale_range = [0.0, 2.5]
align_tx_range = [-0.5, 0.5]
align_ty_range = [-0.5, 0.5]
random_mask_complexity = 3
sharpen_chance = 25
motion_blur_chance = 25
gaussian_blur_chance = 25
reresize_chance = 25
recompress_chance = 25
img_aligned_list = []
img_aligned_shifted_list = []
shift_mat_list = []
if self._n_batch < batch_size:
while True:
if len(self._ufm_uuid_indexes) == 0:
self._ufm_uuid_indexes = [*range(len(self._ufm_uuids))]
np.random.shuffle(self._ufm_uuid_indexes)
idx = self._ufm_uuid_indexes.pop()
uuid1 = self._ufm_uuids[idx]
ufm1 = self._fs.get_UFaceMark_by_uuid(uuid1)
flmrks1 = ufm1.get_FLandmarks2D_best()
if flmrks1 is None:
print(f'Corrupted faceset, no FLandmarks2D for UFaceMark {ufm1.get_uuid()}')
continue
uimg1 = self._fs.get_UImage_by_uuid(ufm1.get_UImage_uuid())
if uimg1 is None:
print(f'Corrupted faceset, no UImage for UFaceMark {ufm1.get_uuid()}')
continue
img1 = uimg1.get_image()
if img1 is None:
print(f'Corrupted faceset, no image in UImage {uimg1.get_uuid()}')
continue
(img_aligned, _) = flmrks1.cut(img1, face_coverage, resolution)
img_aligned = img_aligned.astype(np.float32) / 255.0
(_, img_to_face_uni_mat1) = flmrks1.calc_cut(img1.shape[0:2], face_coverage, resolution)
fw1 = lib_face.FaceWarper(img_to_face_uni_mat1, align_rot_deg=align_rot_deg_range, align_scale=align_scale_range, align_tx=align_tx_range, align_ty=align_ty_range, rw_grid_cell_count=rw_grid_cell_range, rw_grid_rot_deg=rw_grid_rot_deg_range, rw_grid_scale=rw_grid_scale_range, rw_grid_tx=rw_grid_tx_range, rw_grid_ty=rw_grid_ty_range)
img_aligned_shifted = fw1.transform(img1, resolution, random_warp=self._random_warp).astype(np.float32) / 255.0
ip = lib_img.ImageProcessor(img_aligned_shifted)
rnd = np.random
if rnd.randint(2) == 0:
ip.hsv(rnd.uniform(0, 1), rnd.uniform(-0.5, 0.5), rnd.uniform(-0.5, 0.5), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
ip.levels([[rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)], [rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)], [rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0), rnd.uniform(0.5, 1.5), rnd.uniform(0, 0.25), rnd.uniform(0.75, 1.0)]], mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(2) == 0:
if rnd.randint(100) < sharpen_chance:
if rnd.randint(2) == 0:
ip.box_sharpen(size=rnd.randint(1, 11), power=rnd.uniform(0.5, 5.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
ip.gaussian_sharpen(sigma=1.0, power=rnd.uniform(0.5, 5.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
else:
if rnd.randint(100) < motion_blur_chance:
ip.motion_blur(size=rnd.randint(1, 11), angle=rnd.randint(360), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(100) < gaussian_blur_chance:
ip.gaussian_blur(sigma=rnd.uniform(0.5, 3.0), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if np.random.randint(2) == 0:
if rnd.randint(100) < reresize_chance:
ip.reresize(rnd.uniform(0.0, 0.75), interpolation=ip.Interpolation.NEAREST, mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if np.random.randint(2) == 0:
if rnd.randint(100) < reresize_chance:
ip.reresize(rnd.uniform(0.0, 0.75), interpolation=ip.Interpolation.LINEAR, mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
if rnd.randint(100) < recompress_chance:
ip.jpeg_recompress(quality=rnd.randint(10, 75), mask=lib_sd.random_circle_faded_multi((resolution, resolution), complexity=random_mask_complexity))
img_aligned_shifted = ip.get_image('HWC')
self._img_aligned_list.append(img_aligned)
self._img_aligned_shifted_list.append(img_aligned_shifted)
self._shift_mat_list.append(fw1.get_aligned_random_transform_mat())
self._n_batch += 1
break
if self._n_batch == batch_size:
data = Data()
data.batch_size = batch_size
data.resolution = resolution
data.img_aligned = np.array(self._img_aligned_list).transpose((0, 3, 1, 2))
data.img_aligned_shifted = np.array(self._img_aligned_shifted_list).transpose((0, 3, 1, 2))
data.shift_uni_mats = np.array(self._shift_mat_list)
self._send_msg('data', data)
self._sent_buffers_count += 1
self._n_batch = 0
self._img_aligned_list = []
self._img_aligned_shifted_list = []
self._shift_mat_list = []
|
DeepFaceLive
|
positive
|
def side_length_for(id):
if id == 1:
return 1
side_length = 1
while side_length * side_length < id:
<DeepExtract>
side_length = side_length + 2
</DeepExtract>
return side_length
|
def side_length_for(id):
if id == 1:
return 1
side_length = 1
while side_length * side_length < id:
side_length = side_length + 2
return side_length
|
advent_of_code_2017
|
positive
|
def getColor(self, row, column):
if self.need_recalc_colors:
<DeepExtract>
self._calc_modes()
self._calc_modes()
self._calc_modes()
self._calc_colors()
self.need_recalc_colors = False
</DeepExtract>
try:
return self.slots[row][column].color
except IndexError:
return Color(255, 255, 255)
|
def getColor(self, row, column):
if self.need_recalc_colors:
self._calc_modes()
self._calc_modes()
self._calc_modes()
self._calc_colors()
self.need_recalc_colors = False
try:
return self.slots[row][column].color
except IndexError:
return Color(255, 255, 255)
|
color-palette
|
positive
|
def add_joint_connections_to_image(img_demo, joints, joint_pairs, joint_names):
for joint_pair in joint_pairs:
ind_1 = joint_names.index(joint_pair[0])
ind_2 = joint_names.index(joint_pair[1])
if flag_color_sticks is True:
<DeepExtract>
color_dict = {'purple': (255, 0, 255), 'yellow': (0, 255, 255), 'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'skyblue': (235, 206, 135)}
color_scalar = color_dict[joint_pair[2]]
color = color_scalar
</DeepExtract>
else:
<DeepExtract>
color_dict = {'purple': (255, 0, 255), 'yellow': (0, 255, 255), 'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'skyblue': (235, 206, 135)}
color_scalar = color_dict['red']
color = color_scalar
</DeepExtract>
(y1, x1, sure1) = joints[ind_1]
(y2, x2, sure2) = joints[ind_2]
if flag_only_draw_sure is False:
sure1 = sure2 = 1
if sure1 == 1 and sure2 == 1:
cv2.line(img_demo, (x1, y1), (x2, y2), color, 8)
return img_demo
|
def add_joint_connections_to_image(img_demo, joints, joint_pairs, joint_names):
for joint_pair in joint_pairs:
ind_1 = joint_names.index(joint_pair[0])
ind_2 = joint_names.index(joint_pair[1])
if flag_color_sticks is True:
color_dict = {'purple': (255, 0, 255), 'yellow': (0, 255, 255), 'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'skyblue': (235, 206, 135)}
color_scalar = color_dict[joint_pair[2]]
color = color_scalar
else:
color_dict = {'purple': (255, 0, 255), 'yellow': (0, 255, 255), 'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'skyblue': (235, 206, 135)}
color_scalar = color_dict['red']
color = color_scalar
(y1, x1, sure1) = joints[ind_1]
(y2, x2, sure2) = joints[ind_2]
if flag_only_draw_sure is False:
sure1 = sure2 = 1
if sure1 == 1 and sure2 == 1:
cv2.line(img_demo, (x1, y1), (x2, y2), color, 8)
return img_demo
|
cvToolkit
|
positive
|
def project(self, servers, bailiwick_map, default_bailiwick):
if servers.difference(self.responses):
raise ValueError('A DNSQuery can only project responses from servers that have been queried.')
<DeepExtract>
clone = DNSQuery(self.qname, self.rdtype, self.rdclass, self.flags, self.edns, self.edns_max_udp_payload, self.edns_flags, self.edns_options, self.tcp)
if False:
for server in self.responses:
bailiwick = bailiwick_map.get(server, default_bailiwick)
for (client, response) in self.responses[server].items():
response_clone = response.copy()
response_clone.query = clone
clone.add_response(server, client, response_clone, bailiwick)
clone = clone
</DeepExtract>
for server in servers:
bailiwick = bailiwick_map.get(server, default_bailiwick)
for (client, response) in self.responses[server].items():
response_clone = response.copy()
response_clone.query = clone
clone.add_response(server, client, response_clone, bailiwick)
return clone
|
def project(self, servers, bailiwick_map, default_bailiwick):
if servers.difference(self.responses):
raise ValueError('A DNSQuery can only project responses from servers that have been queried.')
clone = DNSQuery(self.qname, self.rdtype, self.rdclass, self.flags, self.edns, self.edns_max_udp_payload, self.edns_flags, self.edns_options, self.tcp)
if False:
for server in self.responses:
bailiwick = bailiwick_map.get(server, default_bailiwick)
for (client, response) in self.responses[server].items():
response_clone = response.copy()
response_clone.query = clone
clone.add_response(server, client, response_clone, bailiwick)
clone = clone
for server in servers:
bailiwick = bailiwick_map.get(server, default_bailiwick)
for (client, response) in self.responses[server].items():
response_clone = response.copy()
response_clone.query = clone
clone.add_response(server, client, response_clone, bailiwick)
return clone
|
dnsviz
|
positive
|
def main():
<DeepExtract>
p3b4Bmk = bmk.BenchmarkP3B4(bmk.file_path, default_model, 'keras', prog='p3b4_baseline', desc='Hierarchical Self-Attention Network for data extraction - Pilot 3 Benchmark 4')
gParameters = candle.finalize_parameters(p3b4Bmk)
gParameters = gParameters
</DeepExtract>
<DeepExtract>
fpath = fetch_data(gParameters)
learning_rate = gParameters['learning_rate']
batch_size = gParameters['batch_size']
epochs = gParameters['epochs']
dropout = gParameters['dropout']
embed_train = gParameters['embed_train']
optimizer = gParameters['optimizer']
wv_len = gParameters['wv_len']
attention_size = gParameters['attention_size']
attention_heads = gParameters['attention_heads']
max_words = gParameters['max_words']
max_lines = gParameters['max_lines']
train_x = np.load(fpath + '/train_X.npy')
train_y = np.load(fpath + '/train_Y.npy')
test_x = np.load(fpath + '/test_X.npy')
test_y = np.load(fpath + '/test_Y.npy')
num_classes = []
for task in range(len(train_y[0, :])):
cat = np.unique(train_y[:, task])
num_classes.append(len(cat))
train_y[:, task] = [np.where(cat == x)[0][0] for x in train_y[:, task]]
test_y[:, task] = [np.where(cat == x)[0][0] for x in test_y[:, task]]
num_tasks = len(num_classes)
max_vocab = np.max(train_x)
max_vocab2 = np.max(test_x)
if max_vocab2 > max_vocab:
max_vocab = max_vocab2
vocab_size = max_vocab + 1
vocab = np.random.rand(vocab_size, wv_len)
train_x = train_x.reshape((train_x.shape[0], max_lines, max_words))
test_x = test_x.reshape((test_x.shape[0], max_lines, max_words))
X_train = train_x
X_test = test_x
y_trains = []
y_tests = []
for k in range(num_tasks):
y_trains.append(train_y[:, k])
y_tests.append(test_y[:, k])
model = mthisan(vocab, num_classes, max_lines, max_words, attention_heads=attention_heads, attention_size=attention_size)
ret = model.train(X_train, y_trains, batch_size=batch_size, epochs=epochs, validation_data=[X_test, y_tests])
avg_loss = ret
</DeepExtract>
print('Return: ', avg_loss.history['val_loss'][-1])
|
def main():
p3b4Bmk = bmk.BenchmarkP3B4(bmk.file_path, default_model, 'keras', prog='p3b4_baseline', desc='Hierarchical Self-Attention Network for data extraction - Pilot 3 Benchmark 4')
gParameters = candle.finalize_parameters(p3b4Bmk)
gParameters = gParameters
fpath = fetch_data(gParameters)
learning_rate = gParameters['learning_rate']
batch_size = gParameters['batch_size']
epochs = gParameters['epochs']
dropout = gParameters['dropout']
embed_train = gParameters['embed_train']
optimizer = gParameters['optimizer']
wv_len = gParameters['wv_len']
attention_size = gParameters['attention_size']
attention_heads = gParameters['attention_heads']
max_words = gParameters['max_words']
max_lines = gParameters['max_lines']
train_x = np.load(fpath + '/train_X.npy')
train_y = np.load(fpath + '/train_Y.npy')
test_x = np.load(fpath + '/test_X.npy')
test_y = np.load(fpath + '/test_Y.npy')
num_classes = []
for task in range(len(train_y[0, :])):
cat = np.unique(train_y[:, task])
num_classes.append(len(cat))
train_y[:, task] = [np.where(cat == x)[0][0] for x in train_y[:, task]]
test_y[:, task] = [np.where(cat == x)[0][0] for x in test_y[:, task]]
num_tasks = len(num_classes)
max_vocab = np.max(train_x)
max_vocab2 = np.max(test_x)
if max_vocab2 > max_vocab:
max_vocab = max_vocab2
vocab_size = max_vocab + 1
vocab = np.random.rand(vocab_size, wv_len)
train_x = train_x.reshape((train_x.shape[0], max_lines, max_words))
test_x = test_x.reshape((test_x.shape[0], max_lines, max_words))
X_train = train_x
X_test = test_x
y_trains = []
y_tests = []
for k in range(num_tasks):
y_trains.append(train_y[:, k])
y_tests.append(test_y[:, k])
model = mthisan(vocab, num_classes, max_lines, max_words, attention_heads=attention_heads, attention_size=attention_size)
ret = model.train(X_train, y_trains, batch_size=batch_size, epochs=epochs, validation_data=[X_test, y_tests])
avg_loss = ret
print('Return: ', avg_loss.history['val_loss'][-1])
|
Benchmarks
|
positive
|
def command_debug(buffer, command, args):
<DeepExtract>
hdata = weechat.hdata_get('buffer')
buffer = weechat.hdata_get_list(hdata, 'gui_buffers')
result = []
while buffer:
number = weechat.hdata_integer(hdata, buffer, 'number')
result.append((number, buffer))
buffer = weechat.hdata_pointer(hdata, buffer, 'next_buffer')
(hdata, buffers) = (hdata, result)
</DeepExtract>
<DeepExtract>
if not buffers:
buffers = []
result = {}
for (number, buffer) in buffers:
if number not in result:
result[number] = MergedBuffers(number)
result[number].append(buffer)
buffers = result.values()
</DeepExtract>
<DeepExtract>
weechat.prnt(buffer, 'autosort: {0}'.format('Individual evaluation results:'))
</DeepExtract>
start = perf_counter()
<DeepExtract>
def key(buffer):
extra_vars = {}
for (helper_name, helper) in sorted(config.helpers.items()):
expanded = weechat.string_eval_expression(helper, {'buffer': buffer}, {}, {})
extra_vars[helper_name] = expanded if config.case_sensitive else casefold(expanded)
result = []
for rule in config.rules:
expanded = weechat.string_eval_expression(rule, {'buffer': buffer}, extra_vars, {})
result.append(expanded if config.case_sensitive else casefold(expanded))
key = result
key = key
</DeepExtract>
results = []
for merged in buffers:
for buffer in merged:
fullname = weechat.hdata_string(hdata, buffer, 'full_name')
results.append((fullname, key(buffer)))
elapsed = perf_counter() - start
for (fullname, result) in results:
<DeepExtract>
if not isinstance(fullname, str):
if isinstance(fullname, bytes):
fullname = fullname.encode('utf-8')
if isinstance(fullname, unicode):
fullname = fullname.decode('utf-8')
fullname = fullname
</DeepExtract>
result = [ensure_str(x) for x in result]
<DeepExtract>
weechat.prnt(buffer, 'autosort: {0}'.format('{0}: {1}'.format(fullname, result)))
</DeepExtract>
<DeepExtract>
weechat.prnt(buffer, 'autosort: {0}'.format('Computing evalutaion results took {0:.4f} seconds.'.format(elapsed)))
</DeepExtract>
return weechat.WEECHAT_RC_OK
|
def command_debug(buffer, command, args):
hdata = weechat.hdata_get('buffer')
buffer = weechat.hdata_get_list(hdata, 'gui_buffers')
result = []
while buffer:
number = weechat.hdata_integer(hdata, buffer, 'number')
result.append((number, buffer))
buffer = weechat.hdata_pointer(hdata, buffer, 'next_buffer')
(hdata, buffers) = (hdata, result)
if not buffers:
buffers = []
result = {}
for (number, buffer) in buffers:
if number not in result:
result[number] = MergedBuffers(number)
result[number].append(buffer)
buffers = result.values()
weechat.prnt(buffer, 'autosort: {0}'.format('Individual evaluation results:'))
start = perf_counter()
def key(buffer):
extra_vars = {}
for (helper_name, helper) in sorted(config.helpers.items()):
expanded = weechat.string_eval_expression(helper, {'buffer': buffer}, {}, {})
extra_vars[helper_name] = expanded if config.case_sensitive else casefold(expanded)
result = []
for rule in config.rules:
expanded = weechat.string_eval_expression(rule, {'buffer': buffer}, extra_vars, {})
result.append(expanded if config.case_sensitive else casefold(expanded))
key = result
key = key
results = []
for merged in buffers:
for buffer in merged:
fullname = weechat.hdata_string(hdata, buffer, 'full_name')
results.append((fullname, key(buffer)))
elapsed = perf_counter() - start
for (fullname, result) in results:
if not isinstance(fullname, str):
if isinstance(fullname, bytes):
fullname = fullname.encode('utf-8')
if isinstance(fullname, unicode):
fullname = fullname.decode('utf-8')
fullname = fullname
result = [ensure_str(x) for x in result]
weechat.prnt(buffer, 'autosort: {0}'.format('{0}: {1}'.format(fullname, result)))
weechat.prnt(buffer, 'autosort: {0}'.format('Computing evalutaion results took {0:.4f} seconds.'.format(elapsed)))
return weechat.WEECHAT_RC_OK
|
awesome-dots
|
positive
|
def notify_show(data, bufferp, uber_empty, tagsn, isdisplayed, ishilight, prefix, message):
mynick = weechat.buffer_get_string(bufferp, 'localvar_nick')
if weechat.buffer_get_string(bufferp, 'localvar_type') == 'private' and prefix != mynick:
<DeepExtract>
API_TOKEN = weechat.config_get_plugin('api_token')
if API_TOKEN != '':
url = 'https://irssinotifier.appspot.com/API/Message'
postdata = urllib.urlencode({'apiToken': API_TOKEN, 'nick': encrypt(prefix), 'channel': encrypt(prefix), 'message': encrypt(message), 'version': 13})
version = weechat.info_get('version_number', '') or 0
hook1 = weechat.hook_process_hashtable('url:' + url, {'postfields': postdata}, 2000, '', '')
</DeepExtract>
elif ishilight == '1':
buffer = weechat.buffer_get_string(bufferp, 'short_name') or weechat.buffer_get_string(bufferp, 'name')
<DeepExtract>
API_TOKEN = weechat.config_get_plugin('api_token')
if API_TOKEN != '':
url = 'https://irssinotifier.appspot.com/API/Message'
postdata = urllib.urlencode({'apiToken': API_TOKEN, 'nick': encrypt(prefix), 'channel': encrypt(buffer), 'message': encrypt(message), 'version': 13})
version = weechat.info_get('version_number', '') or 0
hook1 = weechat.hook_process_hashtable('url:' + url, {'postfields': postdata}, 2000, '', '')
</DeepExtract>
return weechat.WEECHAT_RC_OK
|
def notify_show(data, bufferp, uber_empty, tagsn, isdisplayed, ishilight, prefix, message):
mynick = weechat.buffer_get_string(bufferp, 'localvar_nick')
if weechat.buffer_get_string(bufferp, 'localvar_type') == 'private' and prefix != mynick:
API_TOKEN = weechat.config_get_plugin('api_token')
if API_TOKEN != '':
url = 'https://irssinotifier.appspot.com/API/Message'
postdata = urllib.urlencode({'apiToken': API_TOKEN, 'nick': encrypt(prefix), 'channel': encrypt(prefix), 'message': encrypt(message), 'version': 13})
version = weechat.info_get('version_number', '') or 0
hook1 = weechat.hook_process_hashtable('url:' + url, {'postfields': postdata}, 2000, '', '')
elif ishilight == '1':
buffer = weechat.buffer_get_string(bufferp, 'short_name') or weechat.buffer_get_string(bufferp, 'name')
API_TOKEN = weechat.config_get_plugin('api_token')
if API_TOKEN != '':
url = 'https://irssinotifier.appspot.com/API/Message'
postdata = urllib.urlencode({'apiToken': API_TOKEN, 'nick': encrypt(prefix), 'channel': encrypt(buffer), 'message': encrypt(message), 'version': 13})
version = weechat.info_get('version_number', '') or 0
hook1 = weechat.hook_process_hashtable('url:' + url, {'postfields': postdata}, 2000, '', '')
return weechat.WEECHAT_RC_OK
|
dotfiles
|
positive
|
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""Train a model (defined in Chapter 3).
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9], legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
<DeepExtract>
metric = Accumulator(3)
for (X, y) in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
if isinstance(loss, tf.keras.losses.Loss):
l = loss(y, y_hat)
else:
l = loss(y_hat, y)
if isinstance(updater, tf.keras.optimizers.Optimizer):
params = net.trainable_variables
grads = tape.gradient(l, params)
updater.apply_gradients(zip(grads, params))
else:
updater(X.shape[0], tape.gradient(l, updater.params))
l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)
metric.add(l_sum, accuracy(y_hat, y), tf.size(y))
train_metrics = (metric[0] / metric[2], metric[1] / metric[2])
</DeepExtract>
<DeepExtract>
metric = Accumulator(2)
for (X, y) in test_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
test_acc = metric[0] / metric[1]
</DeepExtract>
animator.add(epoch + 1, train_metrics + (test_acc,))
(train_loss, train_acc) = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
|
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater):
"""Train a model (defined in Chapter 3).
Defined in :numref:`sec_softmax_scratch`"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9], legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
metric = Accumulator(3)
for (X, y) in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
if isinstance(loss, tf.keras.losses.Loss):
l = loss(y, y_hat)
else:
l = loss(y_hat, y)
if isinstance(updater, tf.keras.optimizers.Optimizer):
params = net.trainable_variables
grads = tape.gradient(l, params)
updater.apply_gradients(zip(grads, params))
else:
updater(X.shape[0], tape.gradient(l, updater.params))
l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)
metric.add(l_sum, accuracy(y_hat, y), tf.size(y))
train_metrics = (metric[0] / metric[2], metric[1] / metric[2])
metric = Accumulator(2)
for (X, y) in test_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
test_acc = metric[0] / metric[1]
animator.add(epoch + 1, train_metrics + (test_acc,))
(train_loss, train_acc) = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
|
d2l-vn
|
positive
|
def monkey_patch(state_path: Optional[str]) -> None:
"""
Apply all monkey patches to swap in high performance implementations.
This function must be called before any of the ethereum modules are
imported anywhere.
"""
<DeepExtract>
import ethereum.tangerine_whistle.state as slow_state
from . import state_db as fast_state
optimized_state_db_patches = {'State': fast_state.State, 'get_account_optional': fast_state.get_account_optional, 'set_account': fast_state.set_account, 'destroy_storage': fast_state.destroy_storage, 'get_storage': fast_state.get_storage, 'get_storage_original': fast_state.get_storage_original, 'set_storage': fast_state.set_storage, 'state_root': fast_state.state_root, 'storage_root': fast_state.storage_root, 'begin_transaction': fast_state.begin_transaction, 'rollback_transaction': fast_state.rollback_transaction, 'commit_transaction': fast_state.commit_transaction, 'close_state': fast_state.close_state}
for (name, value) in optimized_state_db_patches.items():
setattr(slow_state, name, value)
if state_path is not None:
fast_state.State.default_path = state_path
</DeepExtract>
<DeepExtract>
import ethereum.tangerine_whistle.fork as slow_spec
from . import fork as fast_spec
slow_spec.validate_proof_of_work = fast_spec.validate_proof_of_work
</DeepExtract>
|
def monkey_patch(state_path: Optional[str]) -> None:
"""
Apply all monkey patches to swap in high performance implementations.
This function must be called before any of the ethereum modules are
imported anywhere.
"""
import ethereum.tangerine_whistle.state as slow_state
from . import state_db as fast_state
optimized_state_db_patches = {'State': fast_state.State, 'get_account_optional': fast_state.get_account_optional, 'set_account': fast_state.set_account, 'destroy_storage': fast_state.destroy_storage, 'get_storage': fast_state.get_storage, 'get_storage_original': fast_state.get_storage_original, 'set_storage': fast_state.set_storage, 'state_root': fast_state.state_root, 'storage_root': fast_state.storage_root, 'begin_transaction': fast_state.begin_transaction, 'rollback_transaction': fast_state.rollback_transaction, 'commit_transaction': fast_state.commit_transaction, 'close_state': fast_state.close_state}
for (name, value) in optimized_state_db_patches.items():
setattr(slow_state, name, value)
if state_path is not None:
fast_state.State.default_path = state_path
import ethereum.tangerine_whistle.fork as slow_spec
from . import fork as fast_spec
slow_spec.validate_proof_of_work = fast_spec.validate_proof_of_work
</DeepExtract>
|
eth1.0-specs
|
positive
|
def call(self, harm_amp, harm_amp_target, harm_dist, harm_dist_target, f0_hz, f0_hz_target):
"""Add losses to the model."""
self.built = True
losses_dict = {}
weights = tf.cast(harm_amp_target >= self.amp_threshold, tf.float32)
<DeepExtract>
if log:
harm_amp = core.log10(tf.maximum(amin, harm_amp))
harm_amp_target = core.log10(tf.maximum(amin, harm_amp_target))
harm_amp_loss = mean_difference(harm_amp, harm_amp_target, loss_type, weights)
</DeepExtract>
losses_dict['harm_amp_loss'] = self.amp_weight * harm_amp_loss
<DeepExtract>
if log:
harm_dist = core.log10(tf.maximum(amin, harm_dist))
harm_dist_target = core.log10(tf.maximum(amin, harm_dist_target))
harm_dist_loss = mean_difference(harm_dist, harm_dist_target, loss_type, weights)
</DeepExtract>
losses_dict['harm_dist_loss'] = self.dist_weight * harm_dist_loss
<DeepExtract>
f_midi = hz_to_midi(f0_hz)
f_midi_target = hz_to_midi(f0_hz_target)
f0_hz_loss = mean_difference(f_midi, f_midi_target, loss_type, weights)
</DeepExtract>
losses_dict['f0_hz_loss'] = self.f0_weight * f0_hz_loss
return losses_dict
|
def call(self, harm_amp, harm_amp_target, harm_dist, harm_dist_target, f0_hz, f0_hz_target):
"""Add losses to the model."""
self.built = True
losses_dict = {}
weights = tf.cast(harm_amp_target >= self.amp_threshold, tf.float32)
if log:
harm_amp = core.log10(tf.maximum(amin, harm_amp))
harm_amp_target = core.log10(tf.maximum(amin, harm_amp_target))
harm_amp_loss = mean_difference(harm_amp, harm_amp_target, loss_type, weights)
losses_dict['harm_amp_loss'] = self.amp_weight * harm_amp_loss
if log:
harm_dist = core.log10(tf.maximum(amin, harm_dist))
harm_dist_target = core.log10(tf.maximum(amin, harm_dist_target))
harm_dist_loss = mean_difference(harm_dist, harm_dist_target, loss_type, weights)
losses_dict['harm_dist_loss'] = self.dist_weight * harm_dist_loss
f_midi = hz_to_midi(f0_hz)
f_midi_target = hz_to_midi(f0_hz_target)
f0_hz_loss = mean_difference(f_midi, f_midi_target, loss_type, weights)
losses_dict['f0_hz_loss'] = self.f0_weight * f0_hz_loss
return losses_dict
|
ddsp
|
positive
|
def train(self):
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)
<DeepExtract>
print(' [*] Reading checkpoints...')
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(' [*] Success to read {}'.format(ckpt_name))
(could_load, checkpoint_counter) = (True, counter)
else:
print(' [*] Failed to find a checkpoint')
(could_load, checkpoint_counter) = (False, 0)
</DeepExtract>
if could_load:
epoch_lr = self.init_lr
start_epoch = int(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
if start_epoch >= int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.01
elif start_epoch >= int(self.epoch * 0.5) and start_epoch < int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
print(' [*] Load SUCCESS')
else:
epoch_lr = self.init_lr
start_epoch = 0
start_batch_id = 0
counter = 1
print(' [!] Load failed...')
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
if epoch == int(self.epoch * 0.5) or epoch == int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
for idx in range(start_batch_id, self.iteration):
idxs = np.random.choice(np.arange(len(self.train_x)), self.batch_size)
batch_x = self.train_x[idxs]
batch_y = self.train_y[idxs]
batch_u = self.train_u[idxs]
batch_x = data_augmentation(batch_x, self.img_size, self.dataset_name)
train_feed_dict = {self.train_inptus: batch_x, self.train_labels: batch_y, self.train_domains: batch_u, self.lr: epoch_lr}
(_, summary_str, train_loss, train_accuracy) = self.sess.run([self.optim, self.train_summary, self.train_loss, self.train_accuracy], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
dvars = [var for var in tf.trainable_variables() if var.name.find('common_specialized_wt') >= 0 or var.name.find('emb_mat') >= 0]
if idx == self.iteration - 1:
(test_loss, test_accuracy) = (0, 0)
nsteps = len(self.test_x) // self.batch_size
for _ in range(nsteps):
batch_test_x = self.test_x[_ * self.batch_size:(_ + 1) * self.batch_size]
batch_test_y = self.test_y[_ * self.batch_size:(_ + 1) * self.batch_size]
batch_test_u = self.test_u[_ * self.batch_size:(_ + 1) * self.batch_size]
test_feed_dict = {self.test_inptus: batch_test_x, self.test_labels: batch_test_y, self.test_domains: batch_test_u}
(summary_str, _test_loss, _test_accuracy) = self.sess.run([self.test_summary, self.test_loss, self.test_accuracy], feed_dict=test_feed_dict)
test_loss += _test_loss
test_accuracy += _test_accuracy
test_loss /= nsteps
test_accuracy /= nsteps
self.writer.add_summary(summary_str, counter)
counter += 1
print('Epoch: [%2d] [%5d/%5d] time: %4.4f, train_accuracy: %.2f, test_accuracy: %.2f, learning_rate : %.4f loss: %.2f' % (epoch, idx, self.iteration, time.time() - start_time, train_accuracy, test_accuracy, epoch_lr, train_loss))
print('Debug vars: ', self.sess.run(dvars))
start_batch_id = 0
<DeepExtract>
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess, os.path.join(self.checkpoint_dir, self.model_name + '.model'), global_step=counter)
</DeepExtract>
<DeepExtract>
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess, os.path.join(self.checkpoint_dir, self.model_name + '.model'), global_step=counter)
</DeepExtract>
if self.args.classifier == 'mos':
print('EMB mat: ', self.sess.run(dvars))
classifier = [var for var in tf.trainable_variables() if var.name.find('sm_matrices') >= 0][0]
np_classifier = self.sess.run(classifier)
(train_x, train_y, train_u) = (self.train_x, self.train_y, self.original_train_u)
nsteps = len(train_x) // self.batch_size
np_train_reprs = []
for _ in range(nsteps):
idxs = range(_ * self.batch_size, (_ + 1) * self.batch_size)
(batch_x, batch_y, batch_u) = (train_x[idxs], train_y[idxs], train_u[idxs])
feed_dict = {self.test_inptus: batch_x, self.test_labels: batch_y, self.test_domains: batch_u}
_train_reprs = self.sess.run(self.test_reprs, feed_dict=feed_dict)
np_train_reprs += _train_reprs.tolist()
np_train_reprs = np.array(np_train_reprs)
with open('logs/dataset=%s_seed=%d_supervised-debug.pkl' % (self.dataset_name, self.args.seed), 'wb') as f:
pickle.dump([np_train_reprs, train_y, train_u, np_classifier, train_u], f)
|
def train(self):
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph)
print(' [*] Reading checkpoints...')
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(self.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(self.checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(' [*] Success to read {}'.format(ckpt_name))
(could_load, checkpoint_counter) = (True, counter)
else:
print(' [*] Failed to find a checkpoint')
(could_load, checkpoint_counter) = (False, 0)
if could_load:
epoch_lr = self.init_lr
start_epoch = int(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
if start_epoch >= int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.01
elif start_epoch >= int(self.epoch * 0.5) and start_epoch < int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
print(' [*] Load SUCCESS')
else:
epoch_lr = self.init_lr
start_epoch = 0
start_batch_id = 0
counter = 1
print(' [!] Load failed...')
start_time = time.time()
for epoch in range(start_epoch, self.epoch):
if epoch == int(self.epoch * 0.5) or epoch == int(self.epoch * 0.75):
epoch_lr = epoch_lr * 0.1
for idx in range(start_batch_id, self.iteration):
idxs = np.random.choice(np.arange(len(self.train_x)), self.batch_size)
batch_x = self.train_x[idxs]
batch_y = self.train_y[idxs]
batch_u = self.train_u[idxs]
batch_x = data_augmentation(batch_x, self.img_size, self.dataset_name)
train_feed_dict = {self.train_inptus: batch_x, self.train_labels: batch_y, self.train_domains: batch_u, self.lr: epoch_lr}
(_, summary_str, train_loss, train_accuracy) = self.sess.run([self.optim, self.train_summary, self.train_loss, self.train_accuracy], feed_dict=train_feed_dict)
self.writer.add_summary(summary_str, counter)
dvars = [var for var in tf.trainable_variables() if var.name.find('common_specialized_wt') >= 0 or var.name.find('emb_mat') >= 0]
if idx == self.iteration - 1:
(test_loss, test_accuracy) = (0, 0)
nsteps = len(self.test_x) // self.batch_size
for _ in range(nsteps):
batch_test_x = self.test_x[_ * self.batch_size:(_ + 1) * self.batch_size]
batch_test_y = self.test_y[_ * self.batch_size:(_ + 1) * self.batch_size]
batch_test_u = self.test_u[_ * self.batch_size:(_ + 1) * self.batch_size]
test_feed_dict = {self.test_inptus: batch_test_x, self.test_labels: batch_test_y, self.test_domains: batch_test_u}
(summary_str, _test_loss, _test_accuracy) = self.sess.run([self.test_summary, self.test_loss, self.test_accuracy], feed_dict=test_feed_dict)
test_loss += _test_loss
test_accuracy += _test_accuracy
test_loss /= nsteps
test_accuracy /= nsteps
self.writer.add_summary(summary_str, counter)
counter += 1
print('Epoch: [%2d] [%5d/%5d] time: %4.4f, train_accuracy: %.2f, test_accuracy: %.2f, learning_rate : %.4f loss: %.2f' % (epoch, idx, self.iteration, time.time() - start_time, train_accuracy, test_accuracy, epoch_lr, train_loss))
print('Debug vars: ', self.sess.run(dvars))
start_batch_id = 0
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess, os.path.join(self.checkpoint_dir, self.model_name + '.model'), global_step=counter)
self.checkpoint_dir = os.path.join(self.checkpoint_dir, self.model_dir)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.saver.save(self.sess, os.path.join(self.checkpoint_dir, self.model_name + '.model'), global_step=counter)
if self.args.classifier == 'mos':
print('EMB mat: ', self.sess.run(dvars))
classifier = [var for var in tf.trainable_variables() if var.name.find('sm_matrices') >= 0][0]
np_classifier = self.sess.run(classifier)
(train_x, train_y, train_u) = (self.train_x, self.train_y, self.original_train_u)
nsteps = len(train_x) // self.batch_size
np_train_reprs = []
for _ in range(nsteps):
idxs = range(_ * self.batch_size, (_ + 1) * self.batch_size)
(batch_x, batch_y, batch_u) = (train_x[idxs], train_y[idxs], train_u[idxs])
feed_dict = {self.test_inptus: batch_x, self.test_labels: batch_y, self.test_domains: batch_u}
_train_reprs = self.sess.run(self.test_reprs, feed_dict=feed_dict)
np_train_reprs += _train_reprs.tolist()
np_train_reprs = np.array(np_train_reprs)
with open('logs/dataset=%s_seed=%d_supervised-debug.pkl' % (self.dataset_name, self.args.seed), 'wb') as f:
pickle.dump([np_train_reprs, train_y, train_u, np_classifier, train_u], f)
|
CSD
|
positive
|
def test_user_signup_existing_user(self):
user_dict = dict(username=self.super_username, password=self.super_password, email=self.super_email)
<DeepExtract>
resp = self.api_client.post('/api/detective/common/v1/user/signup/', format='json', data=user_dict)
</DeepExtract>
self.assertHttpForbidden(resp)
|
def test_user_signup_existing_user(self):
user_dict = dict(username=self.super_username, password=self.super_password, email=self.super_email)
resp = self.api_client.post('/api/detective/common/v1/user/signup/', format='json', data=user_dict)
self.assertHttpForbidden(resp)
|
detective.io
|
positive
|
def __init__(self, ui, filtfun=lambda x: x):
self.filtfun = filtfun
self.ui = ui
self.isinitialized = False
<DeepExtract>
if self.isinitialized:
focusposition = self.bufferlist.get_focus()[1]
else:
focusposition = 0
self.isinitialized = True
lines = list()
displayedbuffers = [b for b in self.ui.buffers if self.filtfun(b)]
for (num, b) in enumerate(displayedbuffers):
line = BufferlineWidget(b)
if num % 2 == 0:
attr = settings.get_theming_attribute('bufferlist', 'line_even')
else:
attr = settings.get_theming_attribute('bufferlist', 'line_odd')
focus_att = settings.get_theming_attribute('bufferlist', 'line_focus')
buf = urwid.AttrMap(line, attr, focus_att)
num = urwid.Text('%3d:' % self.index_of(b))
lines.append(urwid.Columns([('fixed', 4, num), buf]))
self.bufferlist = urwid.ListBox(urwid.SimpleListWalker(lines))
num_buffers = len(displayedbuffers)
if focusposition is not None and num_buffers > 0:
self.bufferlist.set_focus(focusposition % num_buffers)
self.body = self.bufferlist
</DeepExtract>
Buffer.__init__(self, ui, self.body)
|
def __init__(self, ui, filtfun=lambda x: x):
self.filtfun = filtfun
self.ui = ui
self.isinitialized = False
if self.isinitialized:
focusposition = self.bufferlist.get_focus()[1]
else:
focusposition = 0
self.isinitialized = True
lines = list()
displayedbuffers = [b for b in self.ui.buffers if self.filtfun(b)]
for (num, b) in enumerate(displayedbuffers):
line = BufferlineWidget(b)
if num % 2 == 0:
attr = settings.get_theming_attribute('bufferlist', 'line_even')
else:
attr = settings.get_theming_attribute('bufferlist', 'line_odd')
focus_att = settings.get_theming_attribute('bufferlist', 'line_focus')
buf = urwid.AttrMap(line, attr, focus_att)
num = urwid.Text('%3d:' % self.index_of(b))
lines.append(urwid.Columns([('fixed', 4, num), buf]))
self.bufferlist = urwid.ListBox(urwid.SimpleListWalker(lines))
num_buffers = len(displayedbuffers)
if focusposition is not None and num_buffers > 0:
self.bufferlist.set_focus(focusposition % num_buffers)
self.body = self.bufferlist
Buffer.__init__(self, ui, self.body)
|
alot
|
positive
|
def recursive_simplification(model, tokenizer, ranker, sentence, tokens, positions, max_seq_length, tokenized, threshold=0.5, num_selections=10, ignore_list=[]):
sentence_object = Sentence(tokenized, threshold, ignore_list)
if len(sentence_object.complex_words) > 0:
((index, complexity), *tail) = sentence_object.complex_words
word_object = Word(sentence_object, index)
print('originial word---------', sentence_object.tokenized[index])
<DeepExtract>
mask_position = positions[index]
if isinstance(mask_position, list):
feature = convert_whole_word_to_feature(tokens, mask_position, max_seq_length, tokenizer)
else:
feature = convert_token_to_feature(tokens, mask_position, max_seq_length, tokenizer)
tokens_tensor = torch.tensor([feature.input_ids])
token_type_ids = torch.tensor([feature.input_type_ids])
attention_mask = torch.tensor([feature.input_mask])
tokens_tensor = tokens_tensor.to('cuda')
token_type_ids = token_type_ids.to('cuda')
attention_mask = attention_mask.to('cuda')
with torch.no_grad():
(all_attentions, prediction_scores) = model(tokens_tensor, token_type_ids, attention_mask)
if isinstance(mask_position, list):
predicted_top = prediction_scores[0, mask_position[0]].topk(80)
else:
predicted_top = prediction_scores[0, mask_position].topk(80)
pre_tokens = tokenizer.convert_ids_to_tokens(predicted_top[1].cpu().numpy())
cgBERT = BERT_candidate_generation(tokenized[index], pre_tokens, predicted_top[0].cpu().numpy(), ranker.ps, num_selections)
cgBERT = cgBERT
</DeepExtract>
print(cgBERT[:10])
<DeepExtract>
length = len(tokenized)
half = int(11 / 2)
assert index >= 0 and index < length
context = ''
if length <= 11:
context = tokenized
elif index < length - half and index >= half:
context = tokenized[index - half:index + half + 1]
elif index < half:
context = tokenized[0:11]
elif index >= length - half:
context = tokenized[length - 11:length]
else:
print('Wrong!')
mask_context = context
</DeepExtract>
words_tag = nltk.pos_tag(tokenized)
complex_word_tag = words_tag[index][1]
<DeepExtract>
if complex_word_tag[0] == 'V' or complex_word_tag[0] == 'N':
complex_word_tag = complex_word_tag
if complex_word_tag[0] == 'R':
complex_word_tag = 'r'
if complex_word_tag[0] == 'J' or complex_word_tag[0] == 'I':
complex_word_tag = 'a'
else:
complex_word_tag = 's'
</DeepExtract>
cgPPDB = ranker.ppdb.predict(tokenized[index], complex_word_tag)
print(tokenized[index])
<DeepExtract>
(ss, sis_scores, count_scores) = preprocess_SR(tokenized[index], cgBERT, ranker.fasttext_dico, ranker.fasttext_emb, ranker.word_count)
if len(ss) == 0:
pre_word = tokenized[index]
if len(sis_scores) > 0:
seq = sorted(sis_scores, reverse=True)
sis_rank = [seq.index(v) + 1 for v in sis_scores]
rank_count = sorted(count_scores, reverse=True)
count_rank = [rank_count.index(v) + 1 for v in count_scores]
(lm_score, source_lm) = LM_score(tokenized[index], mask_context, ss, tokenizer, model)
rank_lm = sorted(lm_score)
lm_rank = [rank_lm.index(v) + 1 for v in lm_score]
bert_rank = []
ppdb_rank = []
for i in range(len(ss)):
bert_rank.append(i + 1)
if ss[i] in cgPPDB:
ppdb_rank.append(1)
else:
ppdb_rank.append(len(ss) / 3)
if len(sis_scores) > 0:
all_ranks = [bert + sis + count + LM + ppdb for (bert, sis, count, LM, ppdb) in zip(bert_rank, sis_rank, count_rank, lm_rank, ppdb_rank)]
else:
all_ranks = [bert + count + LM + ppdb for (bert, count, LM, ppdb) in zip(bert_rank, count_rank, lm_rank, ppdb_rank)]
min_rank_index_list = map(all_ranks.index, heapq.nsmallest(len(all_ranks), all_ranks))
rank_words = []
for rank_index in list(min_rank_index_list):
rank_words.append(ss[rank_index])
pre_index = all_ranks.index(min(all_ranks))
pre_count = count_scores[pre_index]
if tokenized[index] in ranker.word_count:
source_count = ranker.word_count[tokenized[index]]
else:
source_count = 0
pre_lm = lm_score[pre_index]
if source_lm > pre_lm or pre_count > source_count:
pre_word = ss[pre_index]
else:
pre_word = tokenized[index]
pre_word = pre_word
</DeepExtract>
print('substitute word-----------', pre_word)
synonym = [pre_word]
if synonym != []:
sentence_object.make_simplification(synonym, word_object.index)
return recursive_simplification(model, tokenizer, ranker, sentence, tokens, positions, max_seq_length, sentence_object.tokenized, threshold, num_selections, sentence_object.ignore_index)
else:
return sentence_object.tokenized
|
def recursive_simplification(model, tokenizer, ranker, sentence, tokens, positions, max_seq_length, tokenized, threshold=0.5, num_selections=10, ignore_list=[]):
sentence_object = Sentence(tokenized, threshold, ignore_list)
if len(sentence_object.complex_words) > 0:
((index, complexity), *tail) = sentence_object.complex_words
word_object = Word(sentence_object, index)
print('originial word---------', sentence_object.tokenized[index])
mask_position = positions[index]
if isinstance(mask_position, list):
feature = convert_whole_word_to_feature(tokens, mask_position, max_seq_length, tokenizer)
else:
feature = convert_token_to_feature(tokens, mask_position, max_seq_length, tokenizer)
tokens_tensor = torch.tensor([feature.input_ids])
token_type_ids = torch.tensor([feature.input_type_ids])
attention_mask = torch.tensor([feature.input_mask])
tokens_tensor = tokens_tensor.to('cuda')
token_type_ids = token_type_ids.to('cuda')
attention_mask = attention_mask.to('cuda')
with torch.no_grad():
(all_attentions, prediction_scores) = model(tokens_tensor, token_type_ids, attention_mask)
if isinstance(mask_position, list):
predicted_top = prediction_scores[0, mask_position[0]].topk(80)
else:
predicted_top = prediction_scores[0, mask_position].topk(80)
pre_tokens = tokenizer.convert_ids_to_tokens(predicted_top[1].cpu().numpy())
cgBERT = BERT_candidate_generation(tokenized[index], pre_tokens, predicted_top[0].cpu().numpy(), ranker.ps, num_selections)
cgBERT = cgBERT
print(cgBERT[:10])
length = len(tokenized)
half = int(11 / 2)
assert index >= 0 and index < length
context = ''
if length <= 11:
context = tokenized
elif index < length - half and index >= half:
context = tokenized[index - half:index + half + 1]
elif index < half:
context = tokenized[0:11]
elif index >= length - half:
context = tokenized[length - 11:length]
else:
print('Wrong!')
mask_context = context
words_tag = nltk.pos_tag(tokenized)
complex_word_tag = words_tag[index][1]
if complex_word_tag[0] == 'V' or complex_word_tag[0] == 'N':
complex_word_tag = complex_word_tag
if complex_word_tag[0] == 'R':
complex_word_tag = 'r'
if complex_word_tag[0] == 'J' or complex_word_tag[0] == 'I':
complex_word_tag = 'a'
else:
complex_word_tag = 's'
cgPPDB = ranker.ppdb.predict(tokenized[index], complex_word_tag)
print(tokenized[index])
(ss, sis_scores, count_scores) = preprocess_SR(tokenized[index], cgBERT, ranker.fasttext_dico, ranker.fasttext_emb, ranker.word_count)
if len(ss) == 0:
pre_word = tokenized[index]
if len(sis_scores) > 0:
seq = sorted(sis_scores, reverse=True)
sis_rank = [seq.index(v) + 1 for v in sis_scores]
rank_count = sorted(count_scores, reverse=True)
count_rank = [rank_count.index(v) + 1 for v in count_scores]
(lm_score, source_lm) = LM_score(tokenized[index], mask_context, ss, tokenizer, model)
rank_lm = sorted(lm_score)
lm_rank = [rank_lm.index(v) + 1 for v in lm_score]
bert_rank = []
ppdb_rank = []
for i in range(len(ss)):
bert_rank.append(i + 1)
if ss[i] in cgPPDB:
ppdb_rank.append(1)
else:
ppdb_rank.append(len(ss) / 3)
if len(sis_scores) > 0:
all_ranks = [bert + sis + count + LM + ppdb for (bert, sis, count, LM, ppdb) in zip(bert_rank, sis_rank, count_rank, lm_rank, ppdb_rank)]
else:
all_ranks = [bert + count + LM + ppdb for (bert, count, LM, ppdb) in zip(bert_rank, count_rank, lm_rank, ppdb_rank)]
min_rank_index_list = map(all_ranks.index, heapq.nsmallest(len(all_ranks), all_ranks))
rank_words = []
for rank_index in list(min_rank_index_list):
rank_words.append(ss[rank_index])
pre_index = all_ranks.index(min(all_ranks))
pre_count = count_scores[pre_index]
if tokenized[index] in ranker.word_count:
source_count = ranker.word_count[tokenized[index]]
else:
source_count = 0
pre_lm = lm_score[pre_index]
if source_lm > pre_lm or pre_count > source_count:
pre_word = ss[pre_index]
else:
pre_word = tokenized[index]
pre_word = pre_word
print('substitute word-----------', pre_word)
synonym = [pre_word]
if synonym != []:
sentence_object.make_simplification(synonym, word_object.index)
return recursive_simplification(model, tokenizer, ranker, sentence, tokens, positions, max_seq_length, sentence_object.tokenized, threshold, num_selections, sentence_object.ignore_index)
else:
return sentence_object.tokenized
|
BERT-LS
|
positive
|
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
<DeepExtract>
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
elif subnode.text:
subnode.text += text
else:
subnode.text = text
result = []
strartIndex = 0
while text:
index = text.find(self.__placeholder_prefix, strartIndex)
if index != -1:
(id, phEndIndex) = self.__findPlaceholder(text, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = text[strartIndex:index]
linkText(text)
if not isString(node):
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else:
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else:
end = index + len(prefix)
linkText(text[strartIndex:end])
strartIndex = end
else:
text = text[strartIndex:]
linkText(text)
text = ''
childResult = result
</DeepExtract>
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
|
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
elif subnode.text:
subnode.text += text
else:
subnode.text = text
result = []
strartIndex = 0
while text:
index = text.find(self.__placeholder_prefix, strartIndex)
if index != -1:
(id, phEndIndex) = self.__findPlaceholder(text, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = text[strartIndex:index]
linkText(text)
if not isString(node):
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child, False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else:
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else:
end = index + len(prefix)
linkText(text[strartIndex:end])
strartIndex = end
else:
text = text[strartIndex:]
linkText(text)
text = ''
childResult = result
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
|
arecibo
|
positive
|
def test_two_inputs_first_violate(self):
file_paths = ['file1.java']
violations1 = self.MANY_VIOLATIONS
violations2 = self.FEW_VIOLATIONS
measured1 = self.FEW_MEASURED
measured2 = self.MANY_MEASURED
<DeepExtract>
root = etree.Element('coverage')
if source_paths:
sources = etree.SubElement(root, 'sources')
for path in source_paths:
source = etree.SubElement(sources, 'source')
source.text = path
packages = etree.SubElement(root, 'packages')
classes = etree.SubElement(packages, 'classes')
violation_lines = {violation.line for violation in violations1}
for path in file_paths:
src_node = etree.SubElement(classes, 'class')
src_node.set('filename', path)
etree.SubElement(src_node, 'methods')
lines_node = etree.SubElement(src_node, 'lines')
for line_num in measured1:
is_covered = line_num not in violation_lines
line = etree.SubElement(lines_node, 'line')
hits = 1 if is_covered else 0
line.set('hits', str(hits))
line.set('number', str(line_num))
xml = root
</DeepExtract>
<DeepExtract>
root = etree.Element('coverage')
if source_paths:
sources = etree.SubElement(root, 'sources')
for path in source_paths:
source = etree.SubElement(sources, 'source')
source.text = path
packages = etree.SubElement(root, 'packages')
classes = etree.SubElement(packages, 'classes')
violation_lines = {violation.line for violation in violations2}
for path in file_paths:
src_node = etree.SubElement(classes, 'class')
src_node.set('filename', path)
etree.SubElement(src_node, 'methods')
lines_node = etree.SubElement(src_node, 'lines')
for line_num in measured2:
is_covered = line_num not in violation_lines
line = etree.SubElement(lines_node, 'line')
hits = 1 if is_covered else 0
line.set('hits', str(hits))
line.set('number', str(line_num))
xml2 = root
</DeepExtract>
coverage = XmlCoverageReporter([xml, xml2])
assert violations1 & violations2 == coverage.violations('file1.java')
assert measured1 | measured2 == coverage.measured_lines('file1.java')
|
def test_two_inputs_first_violate(self):
file_paths = ['file1.java']
violations1 = self.MANY_VIOLATIONS
violations2 = self.FEW_VIOLATIONS
measured1 = self.FEW_MEASURED
measured2 = self.MANY_MEASURED
root = etree.Element('coverage')
if source_paths:
sources = etree.SubElement(root, 'sources')
for path in source_paths:
source = etree.SubElement(sources, 'source')
source.text = path
packages = etree.SubElement(root, 'packages')
classes = etree.SubElement(packages, 'classes')
violation_lines = {violation.line for violation in violations1}
for path in file_paths:
src_node = etree.SubElement(classes, 'class')
src_node.set('filename', path)
etree.SubElement(src_node, 'methods')
lines_node = etree.SubElement(src_node, 'lines')
for line_num in measured1:
is_covered = line_num not in violation_lines
line = etree.SubElement(lines_node, 'line')
hits = 1 if is_covered else 0
line.set('hits', str(hits))
line.set('number', str(line_num))
xml = root
root = etree.Element('coverage')
if source_paths:
sources = etree.SubElement(root, 'sources')
for path in source_paths:
source = etree.SubElement(sources, 'source')
source.text = path
packages = etree.SubElement(root, 'packages')
classes = etree.SubElement(packages, 'classes')
violation_lines = {violation.line for violation in violations2}
for path in file_paths:
src_node = etree.SubElement(classes, 'class')
src_node.set('filename', path)
etree.SubElement(src_node, 'methods')
lines_node = etree.SubElement(src_node, 'lines')
for line_num in measured2:
is_covered = line_num not in violation_lines
line = etree.SubElement(lines_node, 'line')
hits = 1 if is_covered else 0
line.set('hits', str(hits))
line.set('number', str(line_num))
xml2 = root
coverage = XmlCoverageReporter([xml, xml2])
assert violations1 & violations2 == coverage.violations('file1.java')
assert measured1 | measured2 == coverage.measured_lines('file1.java')
|
diff_cover
|
positive
|
def main():
"""Perform all steps for the entire conversion process."""
utils.require_root()
process_phase = ConversionPhase.INIT
<DeepExtract>
try:
logger_module.archive_old_logger_files('convert2rhel.log', logger_module.LOG_DIR)
except (IOError, OSError) as e:
print('Warning: Unable to archive previous log: %s' % e)
logger_module.setup_logger_handler('convert2rhel.log', logger_module.LOG_DIR)
</DeepExtract>
toolopts.CLI()
try:
process_phase = ConversionPhase.POST_CLI
loggerinst.task('Prepare: Show Red Hat software EULA')
<DeepExtract>
eula_filepath = os.path.join(utils.DATA_DIR, 'GLOBAL_EULA_RHEL')
eula_text = utils.get_file_content(eula_filepath)
if eula_text:
loggerinst.info(eula_text)
else:
loggerinst.critical('EULA file not found.')
return
</DeepExtract>
loggerinst.task('Prepare: Inform about telemetry')
breadcrumbs.breadcrumbs.print_data_collection()
loggerinst.task('Prepare: Gather system information')
systeminfo.system_info.resolve_system_info()
systeminfo.system_info.print_system_information()
breadcrumbs.breadcrumbs.collect_early_data()
loggerinst.task('Prepare: Clear YUM/DNF version locks')
pkghandler.clear_versionlock()
loggerinst.task('Prepare: Clean yum cache metadata')
pkgmanager.clean_yum_metadata()
checks.perform_system_checks()
loggerinst.task('Prepare: Backup System')
redhatrelease.system_release_file.backup()
redhatrelease.os_release_file.backup()
repo.backup_yum_repos()
repo.backup_varsdir()
process_phase = ConversionPhase.PRE_PONR_CHANGES
<DeepExtract>
pkghandler.has_duplicate_repos_across_disablerepo_enablerepo_options()
loggerinst.task('Convert: List third-party packages')
pkghandler.list_third_party_pkgs()
loggerinst.task('Convert: Remove excluded packages')
pkghandler.remove_excluded_pkgs()
loggerinst.task('Convert: Resolve possible edge cases')
special_cases.check_and_resolve()
loggerinst.task('Convert: Import Red Hat GPG keys')
pkghandler.install_gpg_keys()
rhel_repoids = []
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Download packages')
subscription.download_rhsm_pkgs()
loggerinst.task('Convert: Subscription Manager - Replace')
subscription.replace_subscription_manager()
loggerinst.task('Convert: Subscription Manager - Verify installation')
subscription.verify_rhsm_installed()
loggerinst.task('Convert: Install RHEL certificates for RHSM')
system_cert = cert.SystemCert()
system_cert.install()
loggerinst.task('Convert: Remove packages containing .repo files')
pkghandler.remove_repofile_pkgs()
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Subscribe system')
subscription.subscribe_system()
loggerinst.task('Convert: Get RHEL repository IDs')
rhel_repoids = repo.get_rhel_repoids()
loggerinst.task('Convert: Subscription Manager - Check required repositories')
subscription.check_needed_repos_availability(rhel_repoids)
loggerinst.task('Convert: Subscription Manager - Disable all repositories')
subscription.disable_repos()
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Enable RHEL repositories')
subscription.enable_repos(rhel_repoids)
loggerinst.task('Convert: Final system checks before main conversion')
checks.perform_pre_ponr_checks()
</DeepExtract>
loggerinst.warning('********************************************************')
loggerinst.warning('The tool allows rollback of any action until this point.')
loggerinst.warning('By continuing all further changes on the system will need to be reverted manually by the user, if necessary.')
loggerinst.warning('********************************************************')
utils.ask_to_continue()
process_phase = ConversionPhase.POST_PONR_CHANGES
<DeepExtract>
transaction_handler = pkgmanager.create_transaction_handler()
loggerinst.task('Convert: Replace system packages')
transaction_handler.run_transaction()
loggerinst.task('Convert: Prepare kernel')
pkghandler.preserve_only_rhel_kernel()
loggerinst.task('Convert: List remaining non-Red Hat packages')
pkghandler.list_non_red_hat_pkgs_left()
loggerinst.task('Convert: Configure the bootloader')
grub.post_ponr_set_efi_configuration()
loggerinst.task('Convert: Patch yum configuration file')
redhatrelease.YumConf().patch()
loggerinst.task('Convert: Lock releasever in RHEL repositories')
subscription.lock_releasever_in_rhel_repositories()
return
</DeepExtract>
loggerinst.task('Final: Show RPM files modified by the conversion')
systeminfo.system_info.modified_rpm_files_diff()
loggerinst.task('Final: Update GRUB2 configuration')
grub.update_grub_after_conversion()
loggerinst.task('Final: Remove temporary folder %s' % utils.TMP_DIR)
utils.remove_tmp_dir()
loggerinst.task('Final: Check kernel boot files')
checks.check_kernel_boot_files()
breadcrumbs.breadcrumbs.finish_collection(success=True)
loggerinst.task('Final: Update RHSM custom facts')
subscription.update_rhsm_custom_facts()
loggerinst.info('\nConversion successful!\n')
utils.restart_system()
except (Exception, SystemExit, KeyboardInterrupt) as err:
utils.log_traceback(toolopts.tool_opts.debug)
no_changes_msg = 'No changes were made to the system.'
breadcrumbs.breadcrumbs.finish_collection(success=False)
if is_help_msg_exit(process_phase, err):
return 0
elif process_phase == ConversionPhase.INIT:
print(no_changes_msg)
elif process_phase == ConversionPhase.POST_CLI:
loggerinst.info(no_changes_msg)
elif process_phase == ConversionPhase.PRE_PONR_CHANGES:
<DeepExtract>
loggerinst.warning('Abnormal exit! Performing rollback ...')
subscription.rollback()
backup.changed_pkgs_control.restore_pkgs()
repo.restore_varsdir()
repo.restore_yum_repos()
redhatrelease.system_release_file.restore()
redhatrelease.os_release_file.restore()
pkghandler.versionlock_file.restore()
system_cert = cert.SystemCert()
system_cert.remove()
try:
backup.backup_control.pop_all()
except IndexError as e:
if e.args[0] == 'No backups to restore':
loggerinst.info('During rollback there were no backups to restore')
else:
raise
return
</DeepExtract>
elif process_phase == ConversionPhase.POST_PONR_CHANGES:
loggerinst.warning('The conversion process failed.\n\nThe system is left in an undetermined state that Convert2RHEL cannot fix. The system might not be fully converted, and might incorrectly be reporting as a Red Hat Enterprise Linux machine.\n\nIt is strongly recommended to store the Convert2RHEL logs for later investigation, and restore the system from a backup.')
subscription.update_rhsm_custom_facts()
return 1
return 0
|
def main():
"""Perform all steps for the entire conversion process."""
utils.require_root()
process_phase = ConversionPhase.INIT
try:
logger_module.archive_old_logger_files('convert2rhel.log', logger_module.LOG_DIR)
except (IOError, OSError) as e:
print('Warning: Unable to archive previous log: %s' % e)
logger_module.setup_logger_handler('convert2rhel.log', logger_module.LOG_DIR)
toolopts.CLI()
try:
process_phase = ConversionPhase.POST_CLI
loggerinst.task('Prepare: Show Red Hat software EULA')
eula_filepath = os.path.join(utils.DATA_DIR, 'GLOBAL_EULA_RHEL')
eula_text = utils.get_file_content(eula_filepath)
if eula_text:
loggerinst.info(eula_text)
else:
loggerinst.critical('EULA file not found.')
return
loggerinst.task('Prepare: Inform about telemetry')
breadcrumbs.breadcrumbs.print_data_collection()
loggerinst.task('Prepare: Gather system information')
systeminfo.system_info.resolve_system_info()
systeminfo.system_info.print_system_information()
breadcrumbs.breadcrumbs.collect_early_data()
loggerinst.task('Prepare: Clear YUM/DNF version locks')
pkghandler.clear_versionlock()
loggerinst.task('Prepare: Clean yum cache metadata')
pkgmanager.clean_yum_metadata()
checks.perform_system_checks()
loggerinst.task('Prepare: Backup System')
redhatrelease.system_release_file.backup()
redhatrelease.os_release_file.backup()
repo.backup_yum_repos()
repo.backup_varsdir()
process_phase = ConversionPhase.PRE_PONR_CHANGES
pkghandler.has_duplicate_repos_across_disablerepo_enablerepo_options()
loggerinst.task('Convert: List third-party packages')
pkghandler.list_third_party_pkgs()
loggerinst.task('Convert: Remove excluded packages')
pkghandler.remove_excluded_pkgs()
loggerinst.task('Convert: Resolve possible edge cases')
special_cases.check_and_resolve()
loggerinst.task('Convert: Import Red Hat GPG keys')
pkghandler.install_gpg_keys()
rhel_repoids = []
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Download packages')
subscription.download_rhsm_pkgs()
loggerinst.task('Convert: Subscription Manager - Replace')
subscription.replace_subscription_manager()
loggerinst.task('Convert: Subscription Manager - Verify installation')
subscription.verify_rhsm_installed()
loggerinst.task('Convert: Install RHEL certificates for RHSM')
system_cert = cert.SystemCert()
system_cert.install()
loggerinst.task('Convert: Remove packages containing .repo files')
pkghandler.remove_repofile_pkgs()
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Subscribe system')
subscription.subscribe_system()
loggerinst.task('Convert: Get RHEL repository IDs')
rhel_repoids = repo.get_rhel_repoids()
loggerinst.task('Convert: Subscription Manager - Check required repositories')
subscription.check_needed_repos_availability(rhel_repoids)
loggerinst.task('Convert: Subscription Manager - Disable all repositories')
subscription.disable_repos()
if not toolopts.tool_opts.no_rhsm:
loggerinst.task('Convert: Subscription Manager - Enable RHEL repositories')
subscription.enable_repos(rhel_repoids)
loggerinst.task('Convert: Final system checks before main conversion')
checks.perform_pre_ponr_checks()
loggerinst.warning('********************************************************')
loggerinst.warning('The tool allows rollback of any action until this point.')
loggerinst.warning('By continuing all further changes on the system will need to be reverted manually by the user, if necessary.')
loggerinst.warning('********************************************************')
utils.ask_to_continue()
process_phase = ConversionPhase.POST_PONR_CHANGES
transaction_handler = pkgmanager.create_transaction_handler()
loggerinst.task('Convert: Replace system packages')
transaction_handler.run_transaction()
loggerinst.task('Convert: Prepare kernel')
pkghandler.preserve_only_rhel_kernel()
loggerinst.task('Convert: List remaining non-Red Hat packages')
pkghandler.list_non_red_hat_pkgs_left()
loggerinst.task('Convert: Configure the bootloader')
grub.post_ponr_set_efi_configuration()
loggerinst.task('Convert: Patch yum configuration file')
redhatrelease.YumConf().patch()
loggerinst.task('Convert: Lock releasever in RHEL repositories')
subscription.lock_releasever_in_rhel_repositories()
return
loggerinst.task('Final: Show RPM files modified by the conversion')
systeminfo.system_info.modified_rpm_files_diff()
loggerinst.task('Final: Update GRUB2 configuration')
grub.update_grub_after_conversion()
loggerinst.task('Final: Remove temporary folder %s' % utils.TMP_DIR)
utils.remove_tmp_dir()
loggerinst.task('Final: Check kernel boot files')
checks.check_kernel_boot_files()
breadcrumbs.breadcrumbs.finish_collection(success=True)
loggerinst.task('Final: Update RHSM custom facts')
subscription.update_rhsm_custom_facts()
loggerinst.info('\nConversion successful!\n')
utils.restart_system()
except (Exception, SystemExit, KeyboardInterrupt) as err:
utils.log_traceback(toolopts.tool_opts.debug)
no_changes_msg = 'No changes were made to the system.'
breadcrumbs.breadcrumbs.finish_collection(success=False)
if is_help_msg_exit(process_phase, err):
return 0
elif process_phase == ConversionPhase.INIT:
print(no_changes_msg)
elif process_phase == ConversionPhase.POST_CLI:
loggerinst.info(no_changes_msg)
elif process_phase == ConversionPhase.PRE_PONR_CHANGES:
loggerinst.warning('Abnormal exit! Performing rollback ...')
subscription.rollback()
backup.changed_pkgs_control.restore_pkgs()
repo.restore_varsdir()
repo.restore_yum_repos()
redhatrelease.system_release_file.restore()
redhatrelease.os_release_file.restore()
pkghandler.versionlock_file.restore()
system_cert = cert.SystemCert()
system_cert.remove()
try:
backup.backup_control.pop_all()
except IndexError as e:
if e.args[0] == 'No backups to restore':
loggerinst.info('During rollback there were no backups to restore')
else:
raise
return
elif process_phase == ConversionPhase.POST_PONR_CHANGES:
loggerinst.warning('The conversion process failed.\n\nThe system is left in an undetermined state that Convert2RHEL cannot fix. The system might not be fully converted, and might incorrectly be reporting as a Red Hat Enterprise Linux machine.\n\nIt is strongly recommended to store the Convert2RHEL logs for later investigation, and restore the system from a backup.')
subscription.update_rhsm_custom_facts()
return 1
return 0
|
convert2rhel
|
positive
|
def log_loss_under_uniform(self, model, data, priority_function_kwargs):
import torch.nn.functional as F
log_probs_prior = []
log_probs_biased = []
log_probs_importance = []
kles = []
mses = []
for i in range(0, data.shape[0], self.batch_size):
img = normalize_image(data[i:min(data.shape[0], i + self.batch_size), :])
torch_img = ptu.from_numpy(img)
(reconstructions, obs_distribution_params, latent_distribution_params) = self.model(torch_img)
priority_function_kwargs['sampling_method'] = 'true_prior_sampling'
<DeepExtract>
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
</DeepExtract>
log_prob_prior = log_d.mean()
priority_function_kwargs['sampling_method'] = 'biased_sampling'
<DeepExtract>
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
</DeepExtract>
log_prob_biased = log_d.mean()
priority_function_kwargs['sampling_method'] = 'importance_sampling'
<DeepExtract>
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
</DeepExtract>
log_prob_importance = (log_p - log_q + log_d).mean()
kle = model.kl_divergence(latent_distribution_params)
mse = F.mse_loss(torch_img, reconstructions, reduction='elementwise_mean')
mses.append(mse.item())
kles.append(kle.item())
log_probs_prior.append(log_prob_prior.item())
log_probs_biased.append(log_prob_biased.item())
log_probs_importance.append(log_prob_importance.item())
logger.record_tabular('Uniform Data Log Prob (True Prior)', np.mean(log_probs_prior))
logger.record_tabular('Uniform Data Log Prob (Biased)', np.mean(log_probs_biased))
logger.record_tabular('Uniform Data Log Prob (Importance)', np.mean(log_probs_importance))
logger.record_tabular('Uniform Data KL', np.mean(kles))
logger.record_tabular('Uniform Data MSE', np.mean(mses))
|
def log_loss_under_uniform(self, model, data, priority_function_kwargs):
import torch.nn.functional as F
log_probs_prior = []
log_probs_biased = []
log_probs_importance = []
kles = []
mses = []
for i in range(0, data.shape[0], self.batch_size):
img = normalize_image(data[i:min(data.shape[0], i + self.batch_size), :])
torch_img = ptu.from_numpy(img)
(reconstructions, obs_distribution_params, latent_distribution_params) = self.model(torch_img)
priority_function_kwargs['sampling_method'] = 'true_prior_sampling'
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
log_prob_prior = log_d.mean()
priority_function_kwargs['sampling_method'] = 'biased_sampling'
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
log_prob_biased = log_d.mean()
priority_function_kwargs['sampling_method'] = 'importance_sampling'
assert img.dtype == np.float64, 'images should be normalized'
imgs = ptu.from_numpy(img)
latent_distribution_params = model.encode(imgs)
batch_size = img.shape[0]
representation_size = model.representation_size
(log_p, log_q, log_d) = (ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)), ptu.zeros((batch_size, num_latents_to_sample)))
true_prior = Normal(ptu.zeros((batch_size, representation_size)), ptu.ones((batch_size, representation_size)))
(mus, logvars) = latent_distribution_params
for i in range(num_latents_to_sample):
if sampling_method == 'importance_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'biased_sampling':
latents = model.rsample(latent_distribution_params)
elif sampling_method == 'true_prior_sampling':
latents = true_prior.rsample()
else:
raise EnvironmentError('Invalid Sampling Method Provided')
stds = logvars.exp().pow(0.5)
vae_dist = Normal(mus, stds)
log_p_z = true_prior.log_prob(latents).sum(dim=1)
log_q_z_given_x = vae_dist.log_prob(latents).sum(dim=1)
if decoder_distribution == 'bernoulli':
decoded = model.decode(latents)[0]
log_d_x_given_z = torch.log(imgs * decoded + (1 - imgs) * (1 - decoded) + 1e-08).sum(dim=1)
elif decoder_distribution == 'gaussian_identity_variance':
(_, obs_distribution_params) = model.decode(latents)
(dec_mu, dec_logvar) = obs_distribution_params
dec_var = dec_logvar.exp()
decoder_dist = Normal(dec_mu, dec_var.pow(0.5))
log_d_x_given_z = decoder_dist.log_prob(imgs).sum(dim=1)
else:
raise EnvironmentError('Invalid Decoder Distribution Provided')
log_p[:, i] = log_p_z
log_q[:, i] = log_q_z_given_x
log_d[:, i] = log_d_x_given_z
(log_p, log_q, log_d) = (log_p, log_q, log_d)
log_prob_importance = (log_p - log_q + log_d).mean()
kle = model.kl_divergence(latent_distribution_params)
mse = F.mse_loss(torch_img, reconstructions, reduction='elementwise_mean')
mses.append(mse.item())
kles.append(kle.item())
log_probs_prior.append(log_prob_prior.item())
log_probs_biased.append(log_prob_biased.item())
log_probs_importance.append(log_prob_importance.item())
logger.record_tabular('Uniform Data Log Prob (True Prior)', np.mean(log_probs_prior))
logger.record_tabular('Uniform Data Log Prob (Biased)', np.mean(log_probs_biased))
logger.record_tabular('Uniform Data Log Prob (Importance)', np.mean(log_probs_importance))
logger.record_tabular('Uniform Data KL', np.mean(kles))
logger.record_tabular('Uniform Data MSE', np.mean(mses))
|
CQL
|
positive
|
@classmethod
def sync_call(cls, connection, service_name, object_path, interface_name, method_name, parameters, reply_type, flags=DBUS_FLAG_NONE, timeout=GLibClient.DBUS_TIMEOUT_NONE):
"""Synchronously call a DBus method.
:return: a result of the DBus call
"""
<DeepExtract>
if parameters is None:
(parameters, fd_list) = (None, None)
fd_list = []
def _get_idx(fd):
fd_list.append(fd)
(parameters, fd_list) = len(fd_list) - 1
variant_without_fds = UnixFDSwap.apply(parameters, _get_idx)
if not fd_list:
(parameters, fd_list) = (parameters, None)
(parameters, fd_list) = (variant_without_fds, Gio.UnixFDList.new_from_array(fd_list))
</DeepExtract>
result = connection.call_with_unix_fd_list_sync(service_name, object_path, interface_name, method_name, parameters, reply_type, flags, timeout, fd_list, None)
return restore_fds(*result)
|
@classmethod
def sync_call(cls, connection, service_name, object_path, interface_name, method_name, parameters, reply_type, flags=DBUS_FLAG_NONE, timeout=GLibClient.DBUS_TIMEOUT_NONE):
"""Synchronously call a DBus method.
:return: a result of the DBus call
"""
if parameters is None:
(parameters, fd_list) = (None, None)
fd_list = []
def _get_idx(fd):
fd_list.append(fd)
(parameters, fd_list) = len(fd_list) - 1
variant_without_fds = UnixFDSwap.apply(parameters, _get_idx)
if not fd_list:
(parameters, fd_list) = (parameters, None)
(parameters, fd_list) = (variant_without_fds, Gio.UnixFDList.new_from_array(fd_list))
result = connection.call_with_unix_fd_list_sync(service_name, object_path, interface_name, method_name, parameters, reply_type, flags, timeout, fd_list, None)
return restore_fds(*result)
|
dasbus
|
positive
|
def test_decode_longitude():
<DeepExtract>
if 6.956388888888889 is None:
assert Reader().decode_longitude('00657.383E') is None
else:
assert abs(Reader().decode_longitude('00657.383E') - 6.956388888888889) < 0.0001
</DeepExtract>
<DeepExtract>
if -99.71176666666662 is None:
assert Reader().decode_longitude('09942.706W') is None
else:
assert abs(Reader().decode_longitude('09942.706W') - -99.71176666666662) < 0.0001
</DeepExtract>
with pytest.raises(ParserError):
Reader().decode_longitude('-09942.706W')
with pytest.raises(ParserError):
Reader().decode_longitude('09942a706W')
with pytest.raises(ParserError):
Reader().decode_longitude('18142.706W')
|
def test_decode_longitude():
if 6.956388888888889 is None:
assert Reader().decode_longitude('00657.383E') is None
else:
assert abs(Reader().decode_longitude('00657.383E') - 6.956388888888889) < 0.0001
if -99.71176666666662 is None:
assert Reader().decode_longitude('09942.706W') is None
else:
assert abs(Reader().decode_longitude('09942.706W') - -99.71176666666662) < 0.0001
with pytest.raises(ParserError):
Reader().decode_longitude('-09942.706W')
with pytest.raises(ParserError):
Reader().decode_longitude('09942a706W')
with pytest.raises(ParserError):
Reader().decode_longitude('18142.706W')
|
aerofiles
|
positive
|
def __init__(self, apk_path, binary_stream, entry_point=None, entry_point_params=(), android_sdk=None, supported_jni_archs=None, jni_libs=None, jni_libs_ld_path=None, **options):
"""
:param apk_path: Path to APK.
:param android_sdk: Path to Android SDK folder (e.g. "/home/angr/android/platforms")
The following parameters are optional
:param entry_point: Fully qualified name of method that should be used as the entry point.
:param supported_jni_archs: List of supported JNI architectures (ABIs) in descending order of preference.
:param jni_libs: Name(s) of JNI libs to load (if any). If not specified, we try to extract
JNI libs from the APK.
:param jni_libs_ld_path: Path(s) where to find libs defined by param jni_libs.
Note: Directory of the APK is added by default.
"""
log.info('Loading APK from %s ...', apk_path)
if not android_sdk:
raise ValueError('\nPath to Android SDK must be specified explicitly, e.g.\n loading_opts = { "android_sdk" : "/home/angr/android/platforms" }\n proj = angr.Project("/path/to/apk/target.apk", main_opts=loading_opts)')
if not supported_jni_archs:
supported_jni_archs = default_jni_archs
if not jni_libs:
log.info('No JNI libs provided. Trying to parse them from the APK.')
<DeepExtract>
with ZipFile(apk_path) as apk:
filelist = apk.namelist()
lib_filelist = [list(filter(None, f.split('/'))) for f in filelist if f.startswith('lib')]
jni_libs = {lib_path[2] for lib_path in lib_filelist if len(lib_path) > 2}
available_jni_archs = {lib_path[1] for lib_path in lib_filelist if len(lib_path) > 2}
if not jni_libs:
log.info('No JNI libs found.')
(jni_libs, jni_libs_ld_path) = (None, None)
log.info('Found JNI lib(s): %s', ', '.join(jni_libs))
jni_archs = [arch for arch in supported_jni_archs if arch in available_jni_archs]
if not jni_archs:
raise ValueError("Couldn't find a supported JNI arch. Available %s. Supported %s." % (available_jni_archs, supported_jni_archs))
jni_arch = jni_archs[0]
log.info('Libs are available with arch(s): %s. Picking %s.', ', '.join(available_jni_archs), jni_arch)
tmp_dir = tempfile.mkdtemp()
for lib in jni_libs:
apk_file = f'lib/{jni_arch}/{lib}'
apk.extract(apk_file, path=tmp_dir)
jni_libs_ld_path = os.path.join(tmp_dir, 'lib', jni_arch)
log.info('Extracted lib(s) to %s', jni_libs_ld_path)
(jni_libs, jni_libs_ld_path) = (jni_libs, jni_libs_ld_path)
</DeepExtract>
else:
log.info('Using user defined JNI lib(s) %s (load path(s) %s)', jni_libs, jni_libs_ld_path)
apk_parser = APKParser(apk_path) if PYAXMLPARSER_INSTALLED else None
if not entry_point:
if apk_parser:
main_activity = apk_parser.get_main_activity()
entry_point = main_activity + '.' + 'onCreate'
entry_point_params = ('android.os.Bundle',)
else:
log.error('Install pyaxmlparser to identify APK entry point.')
raise ImportError
super().__init__(apk_path, binary_stream, input_format='apk', android_sdk=android_sdk, entry_point=entry_point, entry_point_params=entry_point_params, jni_libs=jni_libs, jni_libs_ld_path=jni_libs_ld_path, **options)
if apk_parser:
self.components = {'activity': [], 'service': [], 'receiver': [], 'provider': []}
self.callbacks = {'activity': [], 'service': [], 'receiver': [], 'provider': []}
<DeepExtract>
component_getter = {'activity': apk_parser.get_activities, 'service': apk_parser.get_services, 'receiver': apk_parser.get_receivers, 'provider': apk_parser.get_providers}
for (key, getter) in component_getter.items():
class_names = getter()
(self.components[key], self.callbacks[key]) = self._extract_lifecycle(class_names, key)
</DeepExtract>
else:
self.components = None
self.callbacks = None
log.warning('Install pyaxmlparser, if you want to identify components with callbacks.')
|
def __init__(self, apk_path, binary_stream, entry_point=None, entry_point_params=(), android_sdk=None, supported_jni_archs=None, jni_libs=None, jni_libs_ld_path=None, **options):
"""
:param apk_path: Path to APK.
:param android_sdk: Path to Android SDK folder (e.g. "/home/angr/android/platforms")
The following parameters are optional
:param entry_point: Fully qualified name of method that should be used as the entry point.
:param supported_jni_archs: List of supported JNI architectures (ABIs) in descending order of preference.
:param jni_libs: Name(s) of JNI libs to load (if any). If not specified, we try to extract
JNI libs from the APK.
:param jni_libs_ld_path: Path(s) where to find libs defined by param jni_libs.
Note: Directory of the APK is added by default.
"""
log.info('Loading APK from %s ...', apk_path)
if not android_sdk:
raise ValueError('\nPath to Android SDK must be specified explicitly, e.g.\n loading_opts = { "android_sdk" : "/home/angr/android/platforms" }\n proj = angr.Project("/path/to/apk/target.apk", main_opts=loading_opts)')
if not supported_jni_archs:
supported_jni_archs = default_jni_archs
if not jni_libs:
log.info('No JNI libs provided. Trying to parse them from the APK.')
with ZipFile(apk_path) as apk:
filelist = apk.namelist()
lib_filelist = [list(filter(None, f.split('/'))) for f in filelist if f.startswith('lib')]
jni_libs = {lib_path[2] for lib_path in lib_filelist if len(lib_path) > 2}
available_jni_archs = {lib_path[1] for lib_path in lib_filelist if len(lib_path) > 2}
if not jni_libs:
log.info('No JNI libs found.')
(jni_libs, jni_libs_ld_path) = (None, None)
log.info('Found JNI lib(s): %s', ', '.join(jni_libs))
jni_archs = [arch for arch in supported_jni_archs if arch in available_jni_archs]
if not jni_archs:
raise ValueError("Couldn't find a supported JNI arch. Available %s. Supported %s." % (available_jni_archs, supported_jni_archs))
jni_arch = jni_archs[0]
log.info('Libs are available with arch(s): %s. Picking %s.', ', '.join(available_jni_archs), jni_arch)
tmp_dir = tempfile.mkdtemp()
for lib in jni_libs:
apk_file = f'lib/{jni_arch}/{lib}'
apk.extract(apk_file, path=tmp_dir)
jni_libs_ld_path = os.path.join(tmp_dir, 'lib', jni_arch)
log.info('Extracted lib(s) to %s', jni_libs_ld_path)
(jni_libs, jni_libs_ld_path) = (jni_libs, jni_libs_ld_path)
else:
log.info('Using user defined JNI lib(s) %s (load path(s) %s)', jni_libs, jni_libs_ld_path)
apk_parser = APKParser(apk_path) if PYAXMLPARSER_INSTALLED else None
if not entry_point:
if apk_parser:
main_activity = apk_parser.get_main_activity()
entry_point = main_activity + '.' + 'onCreate'
entry_point_params = ('android.os.Bundle',)
else:
log.error('Install pyaxmlparser to identify APK entry point.')
raise ImportError
super().__init__(apk_path, binary_stream, input_format='apk', android_sdk=android_sdk, entry_point=entry_point, entry_point_params=entry_point_params, jni_libs=jni_libs, jni_libs_ld_path=jni_libs_ld_path, **options)
if apk_parser:
self.components = {'activity': [], 'service': [], 'receiver': [], 'provider': []}
self.callbacks = {'activity': [], 'service': [], 'receiver': [], 'provider': []}
component_getter = {'activity': apk_parser.get_activities, 'service': apk_parser.get_services, 'receiver': apk_parser.get_receivers, 'provider': apk_parser.get_providers}
for (key, getter) in component_getter.items():
class_names = getter()
(self.components[key], self.callbacks[key]) = self._extract_lifecycle(class_names, key)
else:
self.components = None
self.callbacks = None
log.warning('Install pyaxmlparser, if you want to identify components with callbacks.')
|
cle
|
positive
|
def update_query(query: results_api.QueryType, score_data: task_api.ScoreData, task: json_task.JsonTask) -> results_api.QueryType:
"""Updates query dict with additional data.
Args:
query: query to update
score_data: score data corresponding to this query
task: task corresponding to this query
Returns:
updated query
"""
shots = score_data.number_of_shots
if isinstance(query, results_api.GenerativeQuery):
<DeepExtract>
samples = query.samples
examples = task.shot_examples[shots]
updated_samples = []
for (sample, example) in zip(samples, examples):
target_dict = collections.defaultdict(dict)
targets = example['target']
output = sample.output
context = sample.input
if not context.endswith(example['input']):
raise ValueError(f'inconsistent context/input for {task.name} {shots}-shot')
for t in targets:
for metric in task.generative_metrics:
if metric == 'exact_str_match':
metric_fn = task_metrics.GENERATIVE_FN[metric]
target_dict[t].update(metric_fn([[t]], [output]))
else:
target_dict[t].update({metric: None})
updated_samples.append(results_api.GenerativeSample(input=sample.input, output=sample.output, targets=dict(target_dict), raw_output=sample.raw_output))
new_query = results_api.GenerativeQuery(function=query.function, max_length=query.max_length, stop_string=query.stop_string, output_regex=query.output_regex, samples=updated_samples, shots=shots, task=task.name)
</DeepExtract>
else:
<DeepExtract>
samples = query.samples
examples = task.shot_examples[shots]
updated_samples = []
for (sample, example) in zip(samples, examples):
context = sample.input
scores = sample.scores
choice = example.get('choice')
target_scores = example.get('target_scores')
if not context.endswith(example['input']):
raise ValueError(f'inconsistent context/input for m/c task {task.name} {shots}-shot')
if target_scores is None:
new_query = results_api.ScoringQuery(function=query.function, absolute_normalization=query.absolute_normalization, samples=query.samples, shots=shots, task=task.name)
metric_fn = task_metrics.MULTIPLE_CHOICE_FN['multiple_choice_grade']
tmp_sample = {'choice': choice, 'log_prob': scores, 'target_scores': target_scores}
correct = metric_fn([tmp_sample])
updated_samples.append(results_api.MultipleChoiceSample(input=sample.input, targets=sample.targets, scores=sample.scores, target_values=target_scores, correct=correct, absolute_scores=sample.absolute_scores, normalized_scores=sample.normalized_scores))
new_query = results_api.MultipleChoiceQuery(function=query.function, absolute_normalization=query.absolute_normalization, samples=updated_samples, shots=shots, task=task.name)
</DeepExtract>
return new_query
|
def update_query(query: results_api.QueryType, score_data: task_api.ScoreData, task: json_task.JsonTask) -> results_api.QueryType:
"""Updates query dict with additional data.
Args:
query: query to update
score_data: score data corresponding to this query
task: task corresponding to this query
Returns:
updated query
"""
shots = score_data.number_of_shots
if isinstance(query, results_api.GenerativeQuery):
samples = query.samples
examples = task.shot_examples[shots]
updated_samples = []
for (sample, example) in zip(samples, examples):
target_dict = collections.defaultdict(dict)
targets = example['target']
output = sample.output
context = sample.input
if not context.endswith(example['input']):
raise ValueError(f'inconsistent context/input for {task.name} {shots}-shot')
for t in targets:
for metric in task.generative_metrics:
if metric == 'exact_str_match':
metric_fn = task_metrics.GENERATIVE_FN[metric]
target_dict[t].update(metric_fn([[t]], [output]))
else:
target_dict[t].update({metric: None})
updated_samples.append(results_api.GenerativeSample(input=sample.input, output=sample.output, targets=dict(target_dict), raw_output=sample.raw_output))
new_query = results_api.GenerativeQuery(function=query.function, max_length=query.max_length, stop_string=query.stop_string, output_regex=query.output_regex, samples=updated_samples, shots=shots, task=task.name)
else:
samples = query.samples
examples = task.shot_examples[shots]
updated_samples = []
for (sample, example) in zip(samples, examples):
context = sample.input
scores = sample.scores
choice = example.get('choice')
target_scores = example.get('target_scores')
if not context.endswith(example['input']):
raise ValueError(f'inconsistent context/input for m/c task {task.name} {shots}-shot')
if target_scores is None:
new_query = results_api.ScoringQuery(function=query.function, absolute_normalization=query.absolute_normalization, samples=query.samples, shots=shots, task=task.name)
metric_fn = task_metrics.MULTIPLE_CHOICE_FN['multiple_choice_grade']
tmp_sample = {'choice': choice, 'log_prob': scores, 'target_scores': target_scores}
correct = metric_fn([tmp_sample])
updated_samples.append(results_api.MultipleChoiceSample(input=sample.input, targets=sample.targets, scores=sample.scores, target_values=target_scores, correct=correct, absolute_scores=sample.absolute_scores, normalized_scores=sample.normalized_scores))
new_query = results_api.MultipleChoiceQuery(function=query.function, absolute_normalization=query.absolute_normalization, samples=updated_samples, shots=shots, task=task.name)
return new_query
|
BIG-bench
|
positive
|
def test_create_icons(self):
<DeepExtract>
file_obj = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
image = Image.objects.create(owner=self.superuser, original_filename=self.image_name, file=file_obj)
image = image
</DeepExtract>
image.save()
icons = image.icons
file_basename = os.path.basename(image.file.path)
self.assertEqual(len(icons), len(filer_settings.FILER_ADMIN_ICON_SIZES))
for size in filer_settings.FILER_ADMIN_ICON_SIZES:
self.assertEqual(os.path.basename(icons[size]), file_basename + '__%sx%s_q85_crop_subsampling-2_upscale.jpg' % (size, size))
|
def test_create_icons(self):
file_obj = DjangoFile(open(self.filename, 'rb'), name=self.image_name)
image = Image.objects.create(owner=self.superuser, original_filename=self.image_name, file=file_obj)
image = image
image.save()
icons = image.icons
file_basename = os.path.basename(image.file.path)
self.assertEqual(len(icons), len(filer_settings.FILER_ADMIN_ICON_SIZES))
for size in filer_settings.FILER_ADMIN_ICON_SIZES:
self.assertEqual(os.path.basename(icons[size]), file_basename + '__%sx%s_q85_crop_subsampling-2_upscale.jpg' % (size, size))
|
django-filer
|
positive
|
def run_eval(self, results, save_dir):
<DeepExtract>
json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w'))
</DeepExtract>
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, 'keypoints')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_dets, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
|
def run_eval(self, results, save_dir):
json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w'))
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, 'keypoints')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_dets, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
|
centerNet-deep-sort
|
positive
|
def get_scheme(self):
"""
Get the scheme of the inputs parameters and return as a string.
"""
doc = Document()
element_scheme = doc.createElement('scheme')
doc.appendChild(element_scheme)
element_title = doc.createElement('title')
element_scheme.appendChild(element_title)
element_title_text = doc.createTextNode(self.title)
element_title.appendChild(element_title_text)
element_desc = doc.createElement('description')
element_scheme.appendChild(element_desc)
element_desc_text = doc.createTextNode(self.description)
element_desc.appendChild(element_desc_text)
element_external_validation = doc.createElement('use_external_validation')
element_scheme.appendChild(element_external_validation)
element_external_validation_text = doc.createTextNode(self.use_external_validation)
element_external_validation.appendChild(element_external_validation_text)
element_streaming_mode = doc.createElement('streaming_mode')
element_scheme.appendChild(element_streaming_mode)
element_streaming_mode_text = doc.createTextNode(self.streaming_mode)
element_streaming_mode.appendChild(element_streaming_mode_text)
element_use_single_instance = doc.createElement('use_single_instance')
element_scheme.appendChild(element_use_single_instance)
element_use_single_instance_text = doc.createTextNode(self.use_single_instance)
element_use_single_instance.appendChild(element_use_single_instance_text)
element_endpoint = doc.createElement('endpoint')
element_scheme.appendChild(element_endpoint)
element_args = doc.createElement('args')
element_endpoint.appendChild(element_args)
<DeepExtract>
for arg in self.args:
element_arg = doc.createElement('arg')
element_arg.setAttribute('name', arg.name)
element_args.appendChild(element_arg)
element_title = doc.createElement('title')
element_arg.appendChild(element_title)
element_title_text = doc.createTextNode(arg.title)
element_title.appendChild(element_title_text)
element_desc = doc.createElement('description')
element_arg.appendChild(element_desc)
element_desc_text = doc.createTextNode(arg.description)
element_desc.appendChild(element_desc_text)
element_data_type = doc.createElement('data_type')
element_arg.appendChild(element_data_type)
element_data_type_text = doc.createTextNode(arg.get_data_type())
element_data_type.appendChild(element_data_type_text)
element_required_on_create = doc.createElement('required_on_create')
element_arg.appendChild(element_required_on_create)
element_required_on_create_text = doc.createTextNode('true' if arg.required_on_create else 'false')
element_required_on_create.appendChild(element_required_on_create_text)
element_required_on_edit = doc.createElement('required_on_edit')
element_arg.appendChild(element_required_on_edit)
element_required_on_edit_text = doc.createTextNode('true' if arg.required_on_edit else 'false')
element_required_on_edit.appendChild(element_required_on_edit_text)
</DeepExtract>
return doc.toxml()
|
def get_scheme(self):
"""
Get the scheme of the inputs parameters and return as a string.
"""
doc = Document()
element_scheme = doc.createElement('scheme')
doc.appendChild(element_scheme)
element_title = doc.createElement('title')
element_scheme.appendChild(element_title)
element_title_text = doc.createTextNode(self.title)
element_title.appendChild(element_title_text)
element_desc = doc.createElement('description')
element_scheme.appendChild(element_desc)
element_desc_text = doc.createTextNode(self.description)
element_desc.appendChild(element_desc_text)
element_external_validation = doc.createElement('use_external_validation')
element_scheme.appendChild(element_external_validation)
element_external_validation_text = doc.createTextNode(self.use_external_validation)
element_external_validation.appendChild(element_external_validation_text)
element_streaming_mode = doc.createElement('streaming_mode')
element_scheme.appendChild(element_streaming_mode)
element_streaming_mode_text = doc.createTextNode(self.streaming_mode)
element_streaming_mode.appendChild(element_streaming_mode_text)
element_use_single_instance = doc.createElement('use_single_instance')
element_scheme.appendChild(element_use_single_instance)
element_use_single_instance_text = doc.createTextNode(self.use_single_instance)
element_use_single_instance.appendChild(element_use_single_instance_text)
element_endpoint = doc.createElement('endpoint')
element_scheme.appendChild(element_endpoint)
element_args = doc.createElement('args')
element_endpoint.appendChild(element_args)
for arg in self.args:
element_arg = doc.createElement('arg')
element_arg.setAttribute('name', arg.name)
element_args.appendChild(element_arg)
element_title = doc.createElement('title')
element_arg.appendChild(element_title)
element_title_text = doc.createTextNode(arg.title)
element_title.appendChild(element_title_text)
element_desc = doc.createElement('description')
element_arg.appendChild(element_desc)
element_desc_text = doc.createTextNode(arg.description)
element_desc.appendChild(element_desc_text)
element_data_type = doc.createElement('data_type')
element_arg.appendChild(element_data_type)
element_data_type_text = doc.createTextNode(arg.get_data_type())
element_data_type.appendChild(element_data_type_text)
element_required_on_create = doc.createElement('required_on_create')
element_arg.appendChild(element_required_on_create)
element_required_on_create_text = doc.createTextNode('true' if arg.required_on_create else 'false')
element_required_on_create.appendChild(element_required_on_create_text)
element_required_on_edit = doc.createElement('required_on_edit')
element_arg.appendChild(element_required_on_edit)
element_required_on_edit_text = doc.createTextNode('true' if arg.required_on_edit else 'false')
element_required_on_edit.appendChild(element_required_on_edit_text)
return doc.toxml()
|
eventgen
|
positive
|
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
<DeepExtract>
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
roidb = imdb.roidb
</DeepExtract>
return roidb
|
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
roidb = imdb.roidb
return roidb
|
DA_Detection
|
positive
|
def rename_handle_duplicates(self, from_columns: List[str], to_columns: List[str]) -> ColumnContainer:
"""
Same as `rename` but additionally handles presence of
duplicates in `from_columns`
"""
<DeepExtract>
cc = ColumnContainer(self._frontend_columns.copy(), self._frontend_backend_mapping.copy())
</DeepExtract>
cc._frontend_backend_mapping.update({str(column_to): self._frontend_backend_mapping[str(column_from)] for (column_from, column_to) in zip(from_columns, to_columns)})
columns = dict(zip(from_columns, to_columns))
cc._frontend_columns = [str(columns.get(col, col)) for col in self._frontend_columns]
return cc
|
def rename_handle_duplicates(self, from_columns: List[str], to_columns: List[str]) -> ColumnContainer:
"""
Same as `rename` but additionally handles presence of
duplicates in `from_columns`
"""
cc = ColumnContainer(self._frontend_columns.copy(), self._frontend_backend_mapping.copy())
cc._frontend_backend_mapping.update({str(column_to): self._frontend_backend_mapping[str(column_from)] for (column_from, column_to) in zip(from_columns, to_columns)})
columns = dict(zip(from_columns, to_columns))
cc._frontend_columns = [str(columns.get(col, col)) for col in self._frontend_columns]
return cc
|
dask-sql
|
positive
|
def __call__(self, parser, namespace, values, option_string):
<DeepExtract>
if not values.endswith('.py'):
values = values + '.py'
filepath = os.path.join(_AVATAR_CODES_DIRECTORY, values)
with open(filepath) as f:
values = f.read()
</DeepExtract>
setattr(namespace, self.dest, values)
|
def __call__(self, parser, namespace, values, option_string):
if not values.endswith('.py'):
values = values + '.py'
filepath = os.path.join(_AVATAR_CODES_DIRECTORY, values)
with open(filepath) as f:
values = f.read()
setattr(namespace, self.dest, values)
|
aimmo
|
positive
|
def step(self, action):
for _ in range(self.simrate):
<DeepExtract>
(ref_pos, ref_vel) = self.get_ref_state(self.phase + 1)
target = action + ref_pos[self.pos_idx]
real_action = target
self.prev_action = real_action
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u)
</DeepExtract>
height = self.sim.qpos()[2]
self.time += 1
self.phase += 1
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
done = not (height > 0.4 and height < 3.0)
<DeepExtract>
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
(ref_pos, ref_vel) = self.get_ref_state(self.phase)
weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05]
joint_error = 0
footpos_error = 0
com_vel_error = 0
(ref_rfoot, ref_lfoot) = self.get_ref_footdist(self.phase + 1)
lfoot = self.cassie_state.leftFoot.position[:]
rfoot = self.cassie_state.rightFoot.position[:]
for j in [0, 1, 2]:
footpos_error += np.linalg.norm(lfoot[j] - ref_lfoot[j]) + np.linalg.norm(rfoot[j] - ref_rfoot[j])
if self.debug:
print('ref_rfoot: {} rfoot: {}'.format(ref_rfoot, rfoot))
print('ref_lfoot: {} lfoot: {}'.format(ref_lfoot, lfoot))
print(footpos_error)
ref_cvel = self.get_ref_com_vel(self.phase + 1)
cvel = self.cassie_state.pelvis.translationalVelocity
for j in [0, 1, 2]:
com_vel_error += np.linalg.norm(cvel[j] - ref_cvel[j])
for (i, j) in enumerate(self.pos_idx):
target = ref_pos[j]
actual = qpos[j]
if j == 20 or j == 34:
joint_error += 0
else:
joint_error += 30 * weight[i] * (target - actual) ** 2
reward = 0.5 * np.exp(-joint_error) + 0.25 * np.exp(-footpos_error) + 0.25 * np.exp(-com_vel_error)
if self.debug:
print('reward: {6}\njoint:\t{0:.2f}, % = {1:.2f}\nfoot:\t{2:.2f}, % = {3:.2f}\ncom_vel:\t{4:.2f}, % = {5:.2f}\n\n'.format(0.5 * np.exp(-joint_error), 0.5 * np.exp(-joint_error) / reward * 100, 0.25 * np.exp(-footpos_error), 0.25 * np.exp(-footpos_error) / reward * 100, 0.25 * np.exp(-com_vel_error), 0.25 * np.exp(-com_vel_error) / reward * 100, reward))
print('actual speed: {}\tdesired_speed: {}'.format(qvel[0], self.speed))
reward = reward
</DeepExtract>
if reward < 0.3:
done = True
return (self.get_full_state(), reward, done, {})
|
def step(self, action):
for _ in range(self.simrate):
(ref_pos, ref_vel) = self.get_ref_state(self.phase + 1)
target = action + ref_pos[self.pos_idx]
real_action = target
self.prev_action = real_action
self.u = pd_in_t()
for i in range(5):
self.u.leftLeg.motorPd.pGain[i] = self.P[i]
self.u.rightLeg.motorPd.pGain[i] = self.P[i]
self.u.leftLeg.motorPd.dGain[i] = self.D[i]
self.u.rightLeg.motorPd.dGain[i] = self.D[i]
self.u.leftLeg.motorPd.torque[i] = 0
self.u.rightLeg.motorPd.torque[i] = 0
self.u.leftLeg.motorPd.pTarget[i] = target[i]
self.u.rightLeg.motorPd.pTarget[i] = target[i + 5]
self.u.leftLeg.motorPd.dTarget[i] = 0
self.u.rightLeg.motorPd.dTarget[i] = 0
self.cassie_state = self.sim.step_pd(self.u)
height = self.sim.qpos()[2]
self.time += 1
self.phase += 1
if self.phase > self.phaselen:
self.phase = 0
self.counter += 1
done = not (height > 0.4 and height < 3.0)
qpos = np.copy(self.sim.qpos())
qvel = np.copy(self.sim.qvel())
(ref_pos, ref_vel) = self.get_ref_state(self.phase)
weight = [0.15, 0.15, 0.1, 0.05, 0.05, 0.15, 0.15, 0.1, 0.05, 0.05]
joint_error = 0
footpos_error = 0
com_vel_error = 0
(ref_rfoot, ref_lfoot) = self.get_ref_footdist(self.phase + 1)
lfoot = self.cassie_state.leftFoot.position[:]
rfoot = self.cassie_state.rightFoot.position[:]
for j in [0, 1, 2]:
footpos_error += np.linalg.norm(lfoot[j] - ref_lfoot[j]) + np.linalg.norm(rfoot[j] - ref_rfoot[j])
if self.debug:
print('ref_rfoot: {} rfoot: {}'.format(ref_rfoot, rfoot))
print('ref_lfoot: {} lfoot: {}'.format(ref_lfoot, lfoot))
print(footpos_error)
ref_cvel = self.get_ref_com_vel(self.phase + 1)
cvel = self.cassie_state.pelvis.translationalVelocity
for j in [0, 1, 2]:
com_vel_error += np.linalg.norm(cvel[j] - ref_cvel[j])
for (i, j) in enumerate(self.pos_idx):
target = ref_pos[j]
actual = qpos[j]
if j == 20 or j == 34:
joint_error += 0
else:
joint_error += 30 * weight[i] * (target - actual) ** 2
reward = 0.5 * np.exp(-joint_error) + 0.25 * np.exp(-footpos_error) + 0.25 * np.exp(-com_vel_error)
if self.debug:
print('reward: {6}\njoint:\t{0:.2f}, % = {1:.2f}\nfoot:\t{2:.2f}, % = {3:.2f}\ncom_vel:\t{4:.2f}, % = {5:.2f}\n\n'.format(0.5 * np.exp(-joint_error), 0.5 * np.exp(-joint_error) / reward * 100, 0.25 * np.exp(-footpos_error), 0.25 * np.exp(-footpos_error) / reward * 100, 0.25 * np.exp(-com_vel_error), 0.25 * np.exp(-com_vel_error) / reward * 100, reward))
print('actual speed: {}\tdesired_speed: {}'.format(qvel[0], self.speed))
reward = reward
if reward < 0.3:
done = True
return (self.get_full_state(), reward, done, {})
|
apex
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_predict', action='store_true', help='Whether to run the model in inference mode on the test set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--do_kd', action='store_true', help='Whether to do knowledge distillation (KD).')
parser.add_argument('--kd_coeff', type=float, default=1.0, help='KD loss coefficient.')
parser.add_argument('--decay', default=0.995, type=float, help='The exponential decay')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.01, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.')
parser.add_argument('--logging_steps', type=int, default=10, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=1000, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--predict_checkpoints', type=int, default=0, help='predict checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
args.output_dir = args.output_dir + '{}'.format(list(filter(None, args.model_name_or_path.split('/'))).pop())
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
init_logger(log_file=args.output_dir + '/{}-{}-{}.log'.format(args.model_type, args.task_name, time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
seed_everything(args.seed)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
<DeepExtract>
if args.local_rank not in [-1, 0] and (not evaluate):
torch.distributed.barrier()
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format('train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
if 'train' == 'train':
examples = processor.get_train_examples(args.data_dir)
elif 'train' == 'dev':
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_test_examples(args.data_dir)
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and (not evaluate):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_lens = torch.tensor([f.input_len for f in features], dtype=torch.long)
if output_mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels)
train_dataset = dataset
</DeepExtract>
<DeepExtract>
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
args.warmup_steps = int(t_total * args.warmup_proportion)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
if args.do_kd:
kd_loss_fct = MSELoss()
kd_model = copy.deepcopy(model)
kd_model.eval()
seed_everything(args.seed)
for _ in range(int(args.num_train_epochs)):
pbar = ProgressBar(n_total=len(train_dataloader), desc='Training')
for (step, batch) in enumerate(train_dataloader):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert', 'roberta'] else None
outputs = model(**inputs)
loss = outputs[0]
if args.do_kd:
inputs['labels'] = None
with torch.no_grad():
kd_logits = kd_model(**inputs)[0]
kd_loss = kd_loss_fct(outputs[1], kd_logits)
loss += args.kd_coeff * kd_loss
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
pbar(step, {'loss': loss.item()})
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.do_kd:
decay = min(args.decay, (1 + global_step) / (10 + global_step))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in model.parameters() if p.requires_grad]
for (s_param, param) in zip(kd_model.parameters(), parameters):
s_param.sub_(one_minus_decay * (s_param - param))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
print(' ')
if args.local_rank == -1:
evaluate(args, model, tokenizer)
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
tokenizer.save_vocabulary(vocab_path=output_dir)
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
</DeepExtract>
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
<DeepExtract>
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn)
logger.info('********* Running evaluation {} ********'.format(prefix))
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
pbar = ProgressBar(n_total=len(eval_dataloader), desc='Evaluating')
for (step, batch) in enumerate(eval_dataloader):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert', 'roberta'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
pbar(step)
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
logger.info('******** Eval results {} ********'.format(prefix))
for key in sorted(result.keys()):
logger.info(' dev: %s = %s', key, str(result[key]))
result = results
</DeepExtract>
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
output_eval_file = os.path.join(args.output_dir, 'checkpoint_eval_results.txt')
with open(output_eval_file, 'w') as writer:
for key in sorted(results.keys()):
writer.write('%s = %s\n' % (key, str(results[key])))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_predict', action='store_true', help='Whether to run the model in inference mode on the test set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--do_kd', action='store_true', help='Whether to do knowledge distillation (KD).')
parser.add_argument('--kd_coeff', type=float, default=1.0, help='KD loss coefficient.')
parser.add_argument('--decay', default=0.995, type=float, help='The exponential decay')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.01, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.')
parser.add_argument('--logging_steps', type=int, default=10, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=1000, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--predict_checkpoints', type=int, default=0, help='predict checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
args.output_dir = args.output_dir + '{}'.format(list(filter(None, args.model_name_or_path.split('/'))).pop())
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
init_logger(log_file=args.output_dir + '/{}-{}-{}.log'.format(args.model_type, args.task_name, time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())))
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
seed_everything(args.seed)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
if args.local_rank not in [-1, 0] and (not evaluate):
torch.distributed.barrier()
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format('train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
if 'train' == 'train':
examples = processor.get_train_examples(args.data_dir)
elif 'train' == 'dev':
examples = processor.get_dev_examples(args.data_dir)
else:
examples = processor.get_test_examples(args.data_dir)
features = convert_examples_to_features(examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, pad_on_left=bool(args.model_type in ['xlnet']), pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and (not evaluate):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_lens = torch.tensor([f.input_len for f in features], dtype=torch.long)
if output_mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels)
train_dataset = dataset
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
args.warmup_steps = int(t_total * args.warmup_proportion)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
if args.do_kd:
kd_loss_fct = MSELoss()
kd_model = copy.deepcopy(model)
kd_model.eval()
seed_everything(args.seed)
for _ in range(int(args.num_train_epochs)):
pbar = ProgressBar(n_total=len(train_dataloader), desc='Training')
for (step, batch) in enumerate(train_dataloader):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert', 'roberta'] else None
outputs = model(**inputs)
loss = outputs[0]
if args.do_kd:
inputs['labels'] = None
with torch.no_grad():
kd_logits = kd_model(**inputs)[0]
kd_loss = kd_loss_fct(outputs[1], kd_logits)
loss += args.kd_coeff * kd_loss
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
pbar(step, {'loss': loss.item()})
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.do_kd:
decay = min(args.decay, (1 + global_step) / (10 + global_step))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in model.parameters() if p.requires_grad]
for (s_param, param) in zip(kd_model.parameters(), parameters):
s_param.sub_(one_minus_decay * (s_param - param))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
print(' ')
if args.local_rank == -1:
evaluate(args, model, tokenizer)
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
tokenizer.save_vocabulary(vocab_path=output_dir)
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, data_type='dev')
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn)
logger.info('********* Running evaluation {} ********'.format(prefix))
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
pbar = ProgressBar(n_total=len(eval_dataloader), desc='Evaluating')
for (step, batch) in enumerate(eval_dataloader):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'albert', 'roberta'] else None
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
pbar(step)
print(' ')
if 'cuda' in str(args.device):
torch.cuda.empty_cache()
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
logger.info('******** Eval results {} ********'.format(prefix))
for key in sorted(result.keys()):
logger.info(' dev: %s = %s', key, str(result[key]))
result = results
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
output_eval_file = os.path.join(args.output_dir, 'checkpoint_eval_results.txt')
with open(output_eval_file, 'w') as writer:
for key in sorted(results.keys()):
writer.write('%s = %s\n' % (key, str(results[key])))
|
BERT-SDA
|
positive
|
def _parse_known_args(arg_strings, namespace):
if self.fromfile_prefix_chars is not None:
<DeepExtract>
new_arg_strings = []
for arg_string in arg_strings:
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
arg_strings = new_arg_strings
</DeepExtract>
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for (i, mutex_action) in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for (i, arg_string) in enumerate(arg_strings_iter):
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
else:
<DeepExtract>
if not arg_string:
option_tuple = None
if not arg_string[0] in self.prefix_chars:
option_tuple = None
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
option_tuple = (action, arg_string, None)
if len(arg_string) == 1:
option_tuple = None
if '=' in arg_string:
(option_string, explicit_arg) = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
option_tuple = (action, option_string, explicit_arg)
option_tuples = self._get_option_tuples(arg_string)
if len(option_tuples) > 1:
options = ', '.join([option_string for (action, option_string, explicit_arg) in option_tuples])
tup = (arg_string, options)
self.error(_('ambiguous option: %s could match %s') % tup)
elif len(option_tuples) == 1:
(option_tuple,) = option_tuples
option_tuple = option_tuple
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
option_tuple = None
if ' ' in arg_string:
option_tuple = None
option_tuple = (None, arg_string, None)
</DeepExtract>
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
arg_strings_pattern = ''.join(arg_string_pattern_parts)
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
<DeepExtract>
if action.nargs not in [PARSER, REMAINDER]:
argument_strings = [s for s in argument_strings if s != '--']
if not argument_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
elif not argument_strings and action.nargs == ZERO_OR_MORE and (not action.option_strings):
if action.default is not None:
value = action.default
else:
value = argument_strings
self._check_value(action, value)
elif len(argument_strings) == 1 and action.nargs in [None, OPTIONAL]:
(arg_string,) = argument_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in argument_strings]
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in argument_strings]
self._check_value(action, value[0])
else:
value = [self._get_value(action, v) for v in argument_strings]
for v in value:
self._check_value(action, v)
argument_values = value
</DeepExtract>
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
<DeepExtract>
if conflict_action is None:
action_name = None
elif conflict_action.option_strings:
action_name = '/'.join(conflict_action.option_strings)
elif conflict_action.metavar not in (None, SUPPRESS):
action_name = conflict_action.metavar
elif conflict_action.dest not in (None, SUPPRESS):
action_name = conflict_action.dest
else:
action_name = None
</DeepExtract>
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
def consume_optional(start_index):
option_tuple = option_string_indices[start_index]
(action, option_string, explicit_arg) = option_tuple
match_argument = self._match_argument
action_tuples = []
while True:
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
assert action_tuples
for (action, args, option_string) in action_tuples:
<DeepExtract>
seen_actions.add(action)
argument_values = self._get_values(action, args)
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
</DeepExtract>
return stop
<DeepExtract>
positionals = [action for action in self._actions if not action.option_strings]
</DeepExtract>
def consume_positionals(start_index):
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
<DeepExtract>
seen_actions.add(action)
argument_values = self._get_values(action, args)
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
</DeepExtract>
positionals[:] = positionals[len(arg_counts):]
return start_index
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
next_option_string_index = min([index for index in option_string_indices if index >= start_index])
if start_index != next_option_string_index:
<DeepExtract>
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
take_action(action, args)
positionals[:] = positionals[len(arg_counts):]
positionals_end_index = start_index
</DeepExtract>
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
<DeepExtract>
option_tuple = option_string_indices[start_index]
(action, option_string, explicit_arg) = option_tuple
match_argument = self._match_argument
action_tuples = []
while True:
if action is None:
extras.append(arg_strings[start_index])
start_index = start_index + 1
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
assert action_tuples
for (action, args, option_string) in action_tuples:
take_action(action, args, option_string)
start_index = stop
</DeepExtract>
<DeepExtract>
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
take_action(action, args)
positionals[:] = positionals[len(arg_counts):]
stop_index = start_index
</DeepExtract>
extras.extend(arg_strings[stop_index:])
if positionals:
<DeepExtract>
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, _('too few arguments')))
</DeepExtract>
for action in self._actions:
if action.required:
if action not in seen_actions:
<DeepExtract>
if action is None:
name = None
elif action.option_strings:
name = '/'.join(action.option_strings)
elif action.metavar not in (None, SUPPRESS):
name = action.metavar
elif action.dest not in (None, SUPPRESS):
name = action.dest
else:
name = None
</DeepExtract>
<DeepExtract>
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, _('argument %s is required') % name))
</DeepExtract>
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
else:
names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
<DeepExtract>
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, msg % ' '.join(names)))
</DeepExtract>
return (namespace, extras)
|
def _parse_known_args(arg_strings, namespace):
if self.fromfile_prefix_chars is not None:
new_arg_strings = []
for arg_string in arg_strings:
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
arg_strings = new_arg_strings
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for (i, mutex_action) in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for (i, arg_string) in enumerate(arg_strings_iter):
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
else:
if not arg_string:
option_tuple = None
if not arg_string[0] in self.prefix_chars:
option_tuple = None
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
option_tuple = (action, arg_string, None)
if len(arg_string) == 1:
option_tuple = None
if '=' in arg_string:
(option_string, explicit_arg) = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
option_tuple = (action, option_string, explicit_arg)
option_tuples = self._get_option_tuples(arg_string)
if len(option_tuples) > 1:
options = ', '.join([option_string for (action, option_string, explicit_arg) in option_tuples])
tup = (arg_string, options)
self.error(_('ambiguous option: %s could match %s') % tup)
elif len(option_tuples) == 1:
(option_tuple,) = option_tuples
option_tuple = option_tuple
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
option_tuple = None
if ' ' in arg_string:
option_tuple = None
option_tuple = (None, arg_string, None)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
arg_strings_pattern = ''.join(arg_string_pattern_parts)
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
if action.nargs not in [PARSER, REMAINDER]:
argument_strings = [s for s in argument_strings if s != '--']
if not argument_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, basestring):
value = self._get_value(action, value)
self._check_value(action, value)
elif not argument_strings and action.nargs == ZERO_OR_MORE and (not action.option_strings):
if action.default is not None:
value = action.default
else:
value = argument_strings
self._check_value(action, value)
elif len(argument_strings) == 1 and action.nargs in [None, OPTIONAL]:
(arg_string,) = argument_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in argument_strings]
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in argument_strings]
self._check_value(action, value[0])
else:
value = [self._get_value(action, v) for v in argument_strings]
for v in value:
self._check_value(action, v)
argument_values = value
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
if conflict_action is None:
action_name = None
elif conflict_action.option_strings:
action_name = '/'.join(conflict_action.option_strings)
elif conflict_action.metavar not in (None, SUPPRESS):
action_name = conflict_action.metavar
elif conflict_action.dest not in (None, SUPPRESS):
action_name = conflict_action.dest
else:
action_name = None
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
def consume_optional(start_index):
option_tuple = option_string_indices[start_index]
(action, option_string, explicit_arg) = option_tuple
match_argument = self._match_argument
action_tuples = []
while True:
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
assert action_tuples
for (action, args, option_string) in action_tuples:
seen_actions.add(action)
argument_values = self._get_values(action, args)
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
return stop
positionals = [action for action in self._actions if not action.option_strings]
def consume_positionals(start_index):
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
seen_actions.add(action)
argument_values = self._get_values(action, args)
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
positionals[:] = positionals[len(arg_counts):]
return start_index
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
next_option_string_index = min([index for index in option_string_indices if index >= start_index])
if start_index != next_option_string_index:
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
take_action(action, args)
positionals[:] = positionals[len(arg_counts):]
positionals_end_index = start_index
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
option_tuple = option_string_indices[start_index]
(action, option_string, explicit_arg) = option_tuple
match_argument = self._match_argument
action_tuples = []
while True:
if action is None:
extras.append(arg_strings[start_index])
start_index = start_index + 1
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
assert action_tuples
for (action, args, option_string) in action_tuples:
take_action(action, args, option_string)
start_index = stop
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
for (action, arg_count) in zip(positionals, arg_counts):
args = arg_strings[start_index:start_index + arg_count]
start_index += arg_count
take_action(action, args)
positionals[:] = positionals[len(arg_counts):]
stop_index = start_index
extras.extend(arg_strings[stop_index:])
if positionals:
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, _('too few arguments')))
for action in self._actions:
if action.required:
if action not in seen_actions:
if action is None:
name = None
elif action.option_strings:
name = '/'.join(action.option_strings)
elif action.metavar not in (None, SUPPRESS):
name = action.metavar
elif action.dest not in (None, SUPPRESS):
name = action.dest
else:
name = None
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, _('argument %s is required') % name))
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
else:
names = [_get_action_name(action) for action in group._group_actions if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, msg % ' '.join(names)))
return (namespace, extras)
|
alfred-pocket
|
positive
|
ansible-navigator
|
positive
|
||
def _get_amount_total(self, parsed_inv, raw_text, partner_config, test_info):
thousand_sep = partner_config['thousand_sep']
if not thousand_sep:
thousand_sep_pattern = ''
elif thousand_sep == chr(32):
thousand_sep_pattern = test_info['space_pattern']
else:
thousand_sep_pattern = regex.escape(thousand_sep)
decimal_sep = partner_config['decimal_sep']
decimal_sep_pattern = regex.escape(decimal_sep)
decimal_places = partner_config['currency'].decimal_places
if self.regexp:
pattern = self.regexp
elif decimal_places:
pattern = '(?:\\d{1,3}%s)*\\d{1,3}%s\\d{%d}' % (thousand_sep_pattern, decimal_sep_pattern, decimal_places)
else:
pattern = '(?:\\d{1,3}%s)*\\d{1,3}' % thousand_sep_pattern
test_info[self.name] = {'pattern': pattern}
<DeepExtract>
self.ensure_one()
restrict_text = raw_text
start = self.start and self.start.strip() or False
end = self.end and self.end.strip() or False
if start:
position = restrict_text.find(start)
if position >= 0:
restrict_text = restrict_text[position + len(start):]
test_info[self.name]['start'] = _("Successful cut on '%s'") % start
else:
error_msg = _("String '%s' not found") % start
test_info[self.name]['start'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
if end:
if not restrict_text or (restrict_text and (not restrict_text.strip())):
error_msg = _('No text to cut, maybe because start string was the very end of the document')
test_info[self.name]['end'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
else:
position = restrict_text.find(end)
if position >= 0:
restrict_text = restrict_text[:position]
test_info[self.name]['end'] = _("Successful cut on '%s'") % end
else:
error_msg = _("String '%s' not found") % end
test_info[self.name]['end'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
restrict_text = restrict_text
</DeepExtract>
restrict_text_filtered = regex.sub('\\d{1,2}%s\\d{1,2}\\s?%%' % regex.escape(decimal_sep), '', restrict_text)
restrict_text_filtered = regex.sub('\\d{1,3}\\s?%', '', restrict_text_filtered)
restrict_text_filtered = regex.sub('[Cc]apital.{1,30}(?:\\d{1,3}%s)*\\d{1,3}' % regex.escape(thousand_sep), '', restrict_text_filtered)
res_regex = regex.findall(pattern, restrict_text_filtered)
valid_amounts = []
for amount_raw in res_regex:
if thousand_sep_pattern:
amount_raw = regex.sub(thousand_sep_pattern, '', amount_raw)
if decimal_places:
amount_raw_list = list(amount_raw)
amount_raw_list[-decimal_places - 1] = '.'
amount_raw = ''.join(amount_raw_list)
try:
valid_amounts.append(float(amount_raw))
except ValueError:
logger.debug('%s is an invalid float', amount_raw)
test_info[self.name].update({'res_regex': res_regex, 'valid_list': valid_amounts})
raise_if_none = not test_info['test_mode'] and True or False
<DeepExtract>
assert isinstance(valid_amounts, list)
if not valid_amounts:
if raise_if_none:
raise UserError(_("No valid data extracted for field '%s'.") % self.name)
else:
amount = None
if self.extract_rule in ('min', 'max', 'position_min', 'position_max'):
data_list_sorted = list(valid_amounts)
data_list_sorted.sort()
if self.name.startswith('date'):
test_info[self.name]['sorted_list'] = [format_date(self.env, date_dt) for date_dt in data_list_sorted]
else:
test_info[self.name]['sorted_list'] = data_list_sorted
if self.extract_rule == 'max':
amount = data_list_sorted[-1]
elif self.extract_rule == 'min':
amount = data_list_sorted[0]
elif self.extract_rule in ('position_min', 'position_max'):
if len(valid_amounts) < self.position:
error_msg = _("Partner '%s' is configured with an extract rule '%s' with position %d for field '%s' but the list of extracted valid data only has %d entries.") % (self.partner_id.display_name, test_info['extract_rule_sel'][self.extract_rule], self.position, test_info['field_name_sel'][self.name], len(valid_amounts))
if raise_if_none:
raise UserError(error_msg)
else:
test_info[self.name]['error_msg'] = error_msg
amount = None
sign = self.extract_rule == 'position_min' and 1 or -1
position = self.position
if self.extract_rule == 'position_min':
position -= 1
amount = data_list_sorted[position * sign]
elif self.extract_rule == 'first':
amount = valid_amounts[0]
elif self.extract_rule == 'last':
amount = valid_amounts[-1]
elif self.extract_rule in ('position_start', 'position_end'):
if len(valid_amounts) < self.position:
error_msg = _("Partner '%s' is configured with an extract rule '%s' with position %d for field '%s' but the list of extracted valid data only has %d entries.") % (self.partner_id.display_name, test_info['extract_rule_sel'][self.extract_rule], self.position, test_info['field_name_sel'][self.name], len(valid_amounts))
if raise_if_none:
raise UserError(error_msg)
else:
test_info[self.name]['error_msg'] = error_msg
amount = None
sign = self.extract_rule == 'position_start' and 1 or -1
position = self.position
if self.extract_rule == 'position_start':
position -= 1
amount = valid_amounts[position * sign]
else:
raise UserError(_('Bad configuration'))
</DeepExtract>
parsed_inv[self.name] = amount
|
def _get_amount_total(self, parsed_inv, raw_text, partner_config, test_info):
thousand_sep = partner_config['thousand_sep']
if not thousand_sep:
thousand_sep_pattern = ''
elif thousand_sep == chr(32):
thousand_sep_pattern = test_info['space_pattern']
else:
thousand_sep_pattern = regex.escape(thousand_sep)
decimal_sep = partner_config['decimal_sep']
decimal_sep_pattern = regex.escape(decimal_sep)
decimal_places = partner_config['currency'].decimal_places
if self.regexp:
pattern = self.regexp
elif decimal_places:
pattern = '(?:\\d{1,3}%s)*\\d{1,3}%s\\d{%d}' % (thousand_sep_pattern, decimal_sep_pattern, decimal_places)
else:
pattern = '(?:\\d{1,3}%s)*\\d{1,3}' % thousand_sep_pattern
test_info[self.name] = {'pattern': pattern}
self.ensure_one()
restrict_text = raw_text
start = self.start and self.start.strip() or False
end = self.end and self.end.strip() or False
if start:
position = restrict_text.find(start)
if position >= 0:
restrict_text = restrict_text[position + len(start):]
test_info[self.name]['start'] = _("Successful cut on '%s'") % start
else:
error_msg = _("String '%s' not found") % start
test_info[self.name]['start'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
if end:
if not restrict_text or (restrict_text and (not restrict_text.strip())):
error_msg = _('No text to cut, maybe because start string was the very end of the document')
test_info[self.name]['end'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
else:
position = restrict_text.find(end)
if position >= 0:
restrict_text = restrict_text[:position]
test_info[self.name]['end'] = _("Successful cut on '%s'") % end
else:
error_msg = _("String '%s' not found") % end
test_info[self.name]['end'] = '<b%s>%s</b>' % (ERROR_STYLE, error_msg)
restrict_text = restrict_text
restrict_text_filtered = regex.sub('\\d{1,2}%s\\d{1,2}\\s?%%' % regex.escape(decimal_sep), '', restrict_text)
restrict_text_filtered = regex.sub('\\d{1,3}\\s?%', '', restrict_text_filtered)
restrict_text_filtered = regex.sub('[Cc]apital.{1,30}(?:\\d{1,3}%s)*\\d{1,3}' % regex.escape(thousand_sep), '', restrict_text_filtered)
res_regex = regex.findall(pattern, restrict_text_filtered)
valid_amounts = []
for amount_raw in res_regex:
if thousand_sep_pattern:
amount_raw = regex.sub(thousand_sep_pattern, '', amount_raw)
if decimal_places:
amount_raw_list = list(amount_raw)
amount_raw_list[-decimal_places - 1] = '.'
amount_raw = ''.join(amount_raw_list)
try:
valid_amounts.append(float(amount_raw))
except ValueError:
logger.debug('%s is an invalid float', amount_raw)
test_info[self.name].update({'res_regex': res_regex, 'valid_list': valid_amounts})
raise_if_none = not test_info['test_mode'] and True or False
assert isinstance(valid_amounts, list)
if not valid_amounts:
if raise_if_none:
raise UserError(_("No valid data extracted for field '%s'.") % self.name)
else:
amount = None
if self.extract_rule in ('min', 'max', 'position_min', 'position_max'):
data_list_sorted = list(valid_amounts)
data_list_sorted.sort()
if self.name.startswith('date'):
test_info[self.name]['sorted_list'] = [format_date(self.env, date_dt) for date_dt in data_list_sorted]
else:
test_info[self.name]['sorted_list'] = data_list_sorted
if self.extract_rule == 'max':
amount = data_list_sorted[-1]
elif self.extract_rule == 'min':
amount = data_list_sorted[0]
elif self.extract_rule in ('position_min', 'position_max'):
if len(valid_amounts) < self.position:
error_msg = _("Partner '%s' is configured with an extract rule '%s' with position %d for field '%s' but the list of extracted valid data only has %d entries.") % (self.partner_id.display_name, test_info['extract_rule_sel'][self.extract_rule], self.position, test_info['field_name_sel'][self.name], len(valid_amounts))
if raise_if_none:
raise UserError(error_msg)
else:
test_info[self.name]['error_msg'] = error_msg
amount = None
sign = self.extract_rule == 'position_min' and 1 or -1
position = self.position
if self.extract_rule == 'position_min':
position -= 1
amount = data_list_sorted[position * sign]
elif self.extract_rule == 'first':
amount = valid_amounts[0]
elif self.extract_rule == 'last':
amount = valid_amounts[-1]
elif self.extract_rule in ('position_start', 'position_end'):
if len(valid_amounts) < self.position:
error_msg = _("Partner '%s' is configured with an extract rule '%s' with position %d for field '%s' but the list of extracted valid data only has %d entries.") % (self.partner_id.display_name, test_info['extract_rule_sel'][self.extract_rule], self.position, test_info['field_name_sel'][self.name], len(valid_amounts))
if raise_if_none:
raise UserError(error_msg)
else:
test_info[self.name]['error_msg'] = error_msg
amount = None
sign = self.extract_rule == 'position_start' and 1 or -1
position = self.position
if self.extract_rule == 'position_start':
position -= 1
amount = valid_amounts[position * sign]
else:
raise UserError(_('Bad configuration'))
parsed_inv[self.name] = amount
|
edi
|
positive
|
def evaluate(self, train_index, test_index):
inner_pool = ThreadPool(self._algorithm_params['n_threads'])
async_result = {}
for i in range(self._algorithm_params['grid_search_folds']):
async_result[i] = {}
x_train = self._x[train_index]
y_train = self._y[train_index]
skf = StratifiedKFold(n_splits=self._algorithm_params['grid_search_folds'], shuffle=True)
inner_cv = list(skf.split(np.zeros(len(y_train)), y_train))
parameters_combinations = list(itertools.product(self._algorithm_params['n_estimators_range'], self._algorithm_params['max_depth_range'], self._algorithm_params['min_samples_split_range'], self._algorithm_params['max_features_range']))
for i in range(len(inner_cv)):
(inner_train_index, inner_test_index) = inner_cv[i]
x_train_inner = x_train[inner_train_index]
x_test_inner = x_train[inner_test_index]
y_train_inner = y_train[inner_train_index]
y_test_inner = y_train[inner_test_index]
for parameters in parameters_combinations:
async_result[i][parameters] = inner_pool.apply_async(self._grid_search, (x_train_inner, x_test_inner, y_train_inner, y_test_inner, parameters[0], parameters[1], parameters[2], parameters[3]))
inner_pool.close()
inner_pool.join()
<DeepExtract>
c_values = []
accuracies = []
for fold in async_result.keys():
best_c = -1
best_acc = -1
for (c, async_acc) in async_result[fold].items():
acc = async_acc.get()
if acc > best_acc:
best_c = c
best_acc = acc
c_values.append(best_c)
accuracies.append(best_acc)
best_acc = np.mean(accuracies)
best_c = np.power(10, np.mean(np.log10(c_values)))
best_parameter = {'c': best_c, 'balanced_accuracy': best_acc}
</DeepExtract>
x_test = self._x[test_index]
y_test = self._y[test_index]
<DeepExtract>
if self._algorithm_params['balanced']:
classifier = RandomForestClassifier(n_estimators=best_parameter['n_estimators'], max_depth=best_parameter['max_depth'], min_samples_split=best_parameter['min_samples_split'], max_features=best_parameter['max_features'], class_weight='balanced', n_jobs=self._algorithm_params['n_threads'])
else:
classifier = RandomForestClassifier(n_estimators=best_parameter['n_estimators'], max_depth=best_parameter['max_depth'], min_samples_split=best_parameter['min_samples_split'], max_features=best_parameter['max_features'], n_jobs=self._algorithm_params['n_threads'])
classifier.fit(x_train, y_train)
y_hat_train = classifier.predict(x_train)
y_hat = classifier.predict(x_test)
proba_test = classifier.predict_proba(x_test)[:, 1]
auc = roc_auc_score(y_test, proba_test)
(_, y_hat, auc, y_hat_train) = (classifier, y_hat, auc, y_hat_train)
</DeepExtract>
result = dict()
result['best_parameter'] = best_parameter
result['evaluation'] = utils.evaluate_prediction(y_test, y_hat)
result['evaluation_train'] = utils.evaluate_prediction(y_train, y_hat_train)
result['y_hat'] = y_hat
result['y_hat_train'] = y_hat_train
result['y'] = y_test
result['y_train'] = y_train
result['y_index'] = test_index
result['x_index'] = train_index
result['auc'] = auc
return result
|
def evaluate(self, train_index, test_index):
inner_pool = ThreadPool(self._algorithm_params['n_threads'])
async_result = {}
for i in range(self._algorithm_params['grid_search_folds']):
async_result[i] = {}
x_train = self._x[train_index]
y_train = self._y[train_index]
skf = StratifiedKFold(n_splits=self._algorithm_params['grid_search_folds'], shuffle=True)
inner_cv = list(skf.split(np.zeros(len(y_train)), y_train))
parameters_combinations = list(itertools.product(self._algorithm_params['n_estimators_range'], self._algorithm_params['max_depth_range'], self._algorithm_params['min_samples_split_range'], self._algorithm_params['max_features_range']))
for i in range(len(inner_cv)):
(inner_train_index, inner_test_index) = inner_cv[i]
x_train_inner = x_train[inner_train_index]
x_test_inner = x_train[inner_test_index]
y_train_inner = y_train[inner_train_index]
y_test_inner = y_train[inner_test_index]
for parameters in parameters_combinations:
async_result[i][parameters] = inner_pool.apply_async(self._grid_search, (x_train_inner, x_test_inner, y_train_inner, y_test_inner, parameters[0], parameters[1], parameters[2], parameters[3]))
inner_pool.close()
inner_pool.join()
c_values = []
accuracies = []
for fold in async_result.keys():
best_c = -1
best_acc = -1
for (c, async_acc) in async_result[fold].items():
acc = async_acc.get()
if acc > best_acc:
best_c = c
best_acc = acc
c_values.append(best_c)
accuracies.append(best_acc)
best_acc = np.mean(accuracies)
best_c = np.power(10, np.mean(np.log10(c_values)))
best_parameter = {'c': best_c, 'balanced_accuracy': best_acc}
x_test = self._x[test_index]
y_test = self._y[test_index]
if self._algorithm_params['balanced']:
classifier = RandomForestClassifier(n_estimators=best_parameter['n_estimators'], max_depth=best_parameter['max_depth'], min_samples_split=best_parameter['min_samples_split'], max_features=best_parameter['max_features'], class_weight='balanced', n_jobs=self._algorithm_params['n_threads'])
else:
classifier = RandomForestClassifier(n_estimators=best_parameter['n_estimators'], max_depth=best_parameter['max_depth'], min_samples_split=best_parameter['min_samples_split'], max_features=best_parameter['max_features'], n_jobs=self._algorithm_params['n_threads'])
classifier.fit(x_train, y_train)
y_hat_train = classifier.predict(x_train)
y_hat = classifier.predict(x_test)
proba_test = classifier.predict_proba(x_test)[:, 1]
auc = roc_auc_score(y_test, proba_test)
(_, y_hat, auc, y_hat_train) = (classifier, y_hat, auc, y_hat_train)
result = dict()
result['best_parameter'] = best_parameter
result['evaluation'] = utils.evaluate_prediction(y_test, y_hat)
result['evaluation_train'] = utils.evaluate_prediction(y_train, y_hat_train)
result['y_hat'] = y_hat
result['y_hat_train'] = y_hat_train
result['y'] = y_test
result['y_train'] = y_train
result['y_index'] = test_index
result['x_index'] = train_index
result['auc'] = auc
return result
|
clinica
|
positive
|
def create_df_backtest_performance_tuple(self, fitcasesnd, fitcasesd, testcasesnd, testcasesd, n_days_fitting, n_days_test):
<DeepExtract>
(fitcasesnd, self.x_sol_final[15, :len(fitcasesnd)]) = (np.array(fitcasesnd), np.array(self.x_sol_final[15, :len(fitcasesnd)]))
mae = np.mean(np.abs(fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)]))
mape = np.mean(np.abs((fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)])[fitcasesnd > 0] / fitcasesnd[fitcasesnd > 0])) * 100
(mae_train_nondeath, mape_train_nondeath) = (mae, mape)
</DeepExtract>
<DeepExtract>
(fitcasesnd, self.x_sol_final[15, :len(fitcasesnd)]) = (np.array(fitcasesnd), np.array(self.x_sol_final[15, :len(fitcasesnd)]))
mpe = np.mean((fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)])[fitcasesnd > 0] / fitcasesnd[fitcasesnd > 0]) * 100
sign = np.sign(mpe)
sign_mape_train_nondeath = sign
</DeepExtract>
rmse_train_nondeath = mean_squared_error(y_true=fitcasesnd, y_pred=self.x_sol_final[15, :len(fitcasesnd)], squared=False)
<DeepExtract>
(fitcasesd, self.x_sol_final[14, :len(fitcasesd)]) = (np.array(fitcasesd), np.array(self.x_sol_final[14, :len(fitcasesd)]))
mae = np.mean(np.abs(fitcasesd - self.x_sol_final[14, :len(fitcasesd)]))
mape = np.mean(np.abs((fitcasesd - self.x_sol_final[14, :len(fitcasesd)])[fitcasesd > 0] / fitcasesd[fitcasesd > 0])) * 100
(mae_train_death, mape_train_death) = (mae, mape)
</DeepExtract>
<DeepExtract>
(fitcasesd, self.x_sol_final[14, :len(fitcasesd)]) = (np.array(fitcasesd), np.array(self.x_sol_final[14, :len(fitcasesd)]))
mpe = np.mean((fitcasesd - self.x_sol_final[14, :len(fitcasesd)])[fitcasesd > 0] / fitcasesd[fitcasesd > 0]) * 100
sign = np.sign(mpe)
sign_mape_train_death = sign
</DeepExtract>
rmse_train_death = mean_squared_error(y_true=fitcasesd, y_pred=self.x_sol_final[14, :len(fitcasesd)], squared=False)
<DeepExtract>
(testcasesnd, self.x_sol_final[15, -len(testcasesnd):]) = (np.array(testcasesnd), np.array(self.x_sol_final[15, -len(testcasesnd):]))
mae = np.mean(np.abs(testcasesnd - self.x_sol_final[15, -len(testcasesnd):]))
mape = np.mean(np.abs((testcasesnd - self.x_sol_final[15, -len(testcasesnd):])[testcasesnd > 0] / testcasesnd[testcasesnd > 0])) * 100
(mae_test_nondeath, mape_test_nondeath) = (mae, mape)
</DeepExtract>
<DeepExtract>
(testcasesnd, self.x_sol_final[15, -len(testcasesnd):]) = (np.array(testcasesnd), np.array(self.x_sol_final[15, -len(testcasesnd):]))
mpe = np.mean((testcasesnd - self.x_sol_final[15, -len(testcasesnd):])[testcasesnd > 0] / testcasesnd[testcasesnd > 0]) * 100
sign = np.sign(mpe)
sign_mape_test_nondeath = sign
</DeepExtract>
rmse_test_nondeath = mean_squared_error(y_true=fitcasesnd, y_pred=self.x_sol_final[15, -len(fitcasesnd):], squared=False)
<DeepExtract>
(testcasesd, self.x_sol_final[14, -len(testcasesd):]) = (np.array(testcasesd), np.array(self.x_sol_final[14, -len(testcasesd):]))
mae = np.mean(np.abs(testcasesd - self.x_sol_final[14, -len(testcasesd):]))
mape = np.mean(np.abs((testcasesd - self.x_sol_final[14, -len(testcasesd):])[testcasesd > 0] / testcasesd[testcasesd > 0])) * 100
(mae_test_death, mape_test_death) = (mae, mape)
</DeepExtract>
<DeepExtract>
(testcasesd, self.x_sol_final[14, -len(testcasesd):]) = (np.array(testcasesd), np.array(self.x_sol_final[14, -len(testcasesd):]))
mpe = np.mean((testcasesd - self.x_sol_final[14, -len(testcasesd):])[testcasesd > 0] / testcasesd[testcasesd > 0]) * 100
sign = np.sign(mpe)
sign_mape_test_death = sign
</DeepExtract>
rmse_test_death = mean_squared_error(y_true=fitcasesd, y_pred=self.x_sol_final[14, -len(fitcasesd):], squared=False)
true_last_train_cases = fitcasesnd[-1]
pred_last_train_cases = self.x_sol_final[15, len(fitcasesnd) - 1]
<DeepExtract>
delta_true = np.array([y_true_i - true_last_train_cases for y_true_i in testcasesnd])
delta_pred = np.array([y_pred_i - pred_last_train_cases for y_pred_i in self.x_sol_final[15, -len(testcasesnd):]])
mape_daily_delta = np.mean(np.abs(delta_true - delta_pred)[delta_true > 0] / delta_true[delta_true > 0]) * 100
mape_daily_delta_cases = mape_daily_delta
</DeepExtract>
true_last_train_deaths = fitcasesd[-1]
pred_last_train_deaths = self.x_sol_final[14, len(fitcasesd) - 1]
<DeepExtract>
delta_true = np.array([y_true_i - true_last_train_deaths for y_true_i in testcasesd])
delta_pred = np.array([y_pred_i - pred_last_train_deaths for y_pred_i in self.x_sol_final[14, -len(testcasesd):]])
mape_daily_delta = np.mean(np.abs(delta_true - delta_pred)[delta_true > 0] / delta_true[delta_true > 0]) * 100
mape_daily_delta_deaths = mape_daily_delta
</DeepExtract>
df_backtest_performance_tuple = pd.DataFrame({'continent': [self.continent], 'country': [self.country], 'province': [self.province], 'train_start_date': [self.date_day_since100], 'train_end_date': [self.date_day_since100 + timedelta(days=n_days_fitting - 1)], 'train_mape_cases': [mape_train_nondeath], 'train_mape_deaths': [mape_train_death], 'train_sign_mpe_cases': [sign_mape_train_nondeath], 'train_sign_mpe_deaths': [sign_mape_train_death], 'train_mae_cases': [mae_train_nondeath], 'train_mae_deaths': [mae_train_death], 'train_rmse_cases': [rmse_train_nondeath], 'train_rmse_deaths': [rmse_train_death], 'test_start_date': [self.date_day_since100 + timedelta(days=n_days_fitting)], 'test_end_date': [self.date_day_since100 + timedelta(days=n_days_fitting + n_days_test - 1)], 'test_mape_cases': [mape_test_nondeath], 'test_mape_deaths': [mape_test_death], 'test_sign_mpe_cases': [sign_mape_test_nondeath], 'test_sign_mpe_deaths': [sign_mape_test_death], 'test_mae_cases': [mae_test_nondeath], 'test_mae_deaths': [mae_test_death], 'test_rmse_cases': [rmse_test_nondeath], 'test_rmse_deaths': [rmse_test_death], 'mape_daily_delta_cases': [mape_daily_delta_cases], 'mape_daily_delta_deaths': [mape_daily_delta_deaths]})
for col in ['train_start_date', 'train_end_date', 'test_start_date', 'test_end_date']:
df_backtest_performance_tuple[col] = df_backtest_performance_tuple[col].apply(lambda x: str(x.date()))
return df_backtest_performance_tuple
|
def create_df_backtest_performance_tuple(self, fitcasesnd, fitcasesd, testcasesnd, testcasesd, n_days_fitting, n_days_test):
(fitcasesnd, self.x_sol_final[15, :len(fitcasesnd)]) = (np.array(fitcasesnd), np.array(self.x_sol_final[15, :len(fitcasesnd)]))
mae = np.mean(np.abs(fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)]))
mape = np.mean(np.abs((fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)])[fitcasesnd > 0] / fitcasesnd[fitcasesnd > 0])) * 100
(mae_train_nondeath, mape_train_nondeath) = (mae, mape)
(fitcasesnd, self.x_sol_final[15, :len(fitcasesnd)]) = (np.array(fitcasesnd), np.array(self.x_sol_final[15, :len(fitcasesnd)]))
mpe = np.mean((fitcasesnd - self.x_sol_final[15, :len(fitcasesnd)])[fitcasesnd > 0] / fitcasesnd[fitcasesnd > 0]) * 100
sign = np.sign(mpe)
sign_mape_train_nondeath = sign
rmse_train_nondeath = mean_squared_error(y_true=fitcasesnd, y_pred=self.x_sol_final[15, :len(fitcasesnd)], squared=False)
(fitcasesd, self.x_sol_final[14, :len(fitcasesd)]) = (np.array(fitcasesd), np.array(self.x_sol_final[14, :len(fitcasesd)]))
mae = np.mean(np.abs(fitcasesd - self.x_sol_final[14, :len(fitcasesd)]))
mape = np.mean(np.abs((fitcasesd - self.x_sol_final[14, :len(fitcasesd)])[fitcasesd > 0] / fitcasesd[fitcasesd > 0])) * 100
(mae_train_death, mape_train_death) = (mae, mape)
(fitcasesd, self.x_sol_final[14, :len(fitcasesd)]) = (np.array(fitcasesd), np.array(self.x_sol_final[14, :len(fitcasesd)]))
mpe = np.mean((fitcasesd - self.x_sol_final[14, :len(fitcasesd)])[fitcasesd > 0] / fitcasesd[fitcasesd > 0]) * 100
sign = np.sign(mpe)
sign_mape_train_death = sign
rmse_train_death = mean_squared_error(y_true=fitcasesd, y_pred=self.x_sol_final[14, :len(fitcasesd)], squared=False)
(testcasesnd, self.x_sol_final[15, -len(testcasesnd):]) = (np.array(testcasesnd), np.array(self.x_sol_final[15, -len(testcasesnd):]))
mae = np.mean(np.abs(testcasesnd - self.x_sol_final[15, -len(testcasesnd):]))
mape = np.mean(np.abs((testcasesnd - self.x_sol_final[15, -len(testcasesnd):])[testcasesnd > 0] / testcasesnd[testcasesnd > 0])) * 100
(mae_test_nondeath, mape_test_nondeath) = (mae, mape)
(testcasesnd, self.x_sol_final[15, -len(testcasesnd):]) = (np.array(testcasesnd), np.array(self.x_sol_final[15, -len(testcasesnd):]))
mpe = np.mean((testcasesnd - self.x_sol_final[15, -len(testcasesnd):])[testcasesnd > 0] / testcasesnd[testcasesnd > 0]) * 100
sign = np.sign(mpe)
sign_mape_test_nondeath = sign
rmse_test_nondeath = mean_squared_error(y_true=fitcasesnd, y_pred=self.x_sol_final[15, -len(fitcasesnd):], squared=False)
(testcasesd, self.x_sol_final[14, -len(testcasesd):]) = (np.array(testcasesd), np.array(self.x_sol_final[14, -len(testcasesd):]))
mae = np.mean(np.abs(testcasesd - self.x_sol_final[14, -len(testcasesd):]))
mape = np.mean(np.abs((testcasesd - self.x_sol_final[14, -len(testcasesd):])[testcasesd > 0] / testcasesd[testcasesd > 0])) * 100
(mae_test_death, mape_test_death) = (mae, mape)
(testcasesd, self.x_sol_final[14, -len(testcasesd):]) = (np.array(testcasesd), np.array(self.x_sol_final[14, -len(testcasesd):]))
mpe = np.mean((testcasesd - self.x_sol_final[14, -len(testcasesd):])[testcasesd > 0] / testcasesd[testcasesd > 0]) * 100
sign = np.sign(mpe)
sign_mape_test_death = sign
rmse_test_death = mean_squared_error(y_true=fitcasesd, y_pred=self.x_sol_final[14, -len(fitcasesd):], squared=False)
true_last_train_cases = fitcasesnd[-1]
pred_last_train_cases = self.x_sol_final[15, len(fitcasesnd) - 1]
delta_true = np.array([y_true_i - true_last_train_cases for y_true_i in testcasesnd])
delta_pred = np.array([y_pred_i - pred_last_train_cases for y_pred_i in self.x_sol_final[15, -len(testcasesnd):]])
mape_daily_delta = np.mean(np.abs(delta_true - delta_pred)[delta_true > 0] / delta_true[delta_true > 0]) * 100
mape_daily_delta_cases = mape_daily_delta
true_last_train_deaths = fitcasesd[-1]
pred_last_train_deaths = self.x_sol_final[14, len(fitcasesd) - 1]
delta_true = np.array([y_true_i - true_last_train_deaths for y_true_i in testcasesd])
delta_pred = np.array([y_pred_i - pred_last_train_deaths for y_pred_i in self.x_sol_final[14, -len(testcasesd):]])
mape_daily_delta = np.mean(np.abs(delta_true - delta_pred)[delta_true > 0] / delta_true[delta_true > 0]) * 100
mape_daily_delta_deaths = mape_daily_delta
df_backtest_performance_tuple = pd.DataFrame({'continent': [self.continent], 'country': [self.country], 'province': [self.province], 'train_start_date': [self.date_day_since100], 'train_end_date': [self.date_day_since100 + timedelta(days=n_days_fitting - 1)], 'train_mape_cases': [mape_train_nondeath], 'train_mape_deaths': [mape_train_death], 'train_sign_mpe_cases': [sign_mape_train_nondeath], 'train_sign_mpe_deaths': [sign_mape_train_death], 'train_mae_cases': [mae_train_nondeath], 'train_mae_deaths': [mae_train_death], 'train_rmse_cases': [rmse_train_nondeath], 'train_rmse_deaths': [rmse_train_death], 'test_start_date': [self.date_day_since100 + timedelta(days=n_days_fitting)], 'test_end_date': [self.date_day_since100 + timedelta(days=n_days_fitting + n_days_test - 1)], 'test_mape_cases': [mape_test_nondeath], 'test_mape_deaths': [mape_test_death], 'test_sign_mpe_cases': [sign_mape_test_nondeath], 'test_sign_mpe_deaths': [sign_mape_test_death], 'test_mae_cases': [mae_test_nondeath], 'test_mae_deaths': [mae_test_death], 'test_rmse_cases': [rmse_test_nondeath], 'test_rmse_deaths': [rmse_test_death], 'mape_daily_delta_cases': [mape_daily_delta_cases], 'mape_daily_delta_deaths': [mape_daily_delta_deaths]})
for col in ['train_start_date', 'train_end_date', 'test_start_date', 'test_end_date']:
df_backtest_performance_tuple[col] = df_backtest_performance_tuple[col].apply(lambda x: str(x.date()))
return df_backtest_performance_tuple
|
DELPHI
|
positive
|
def walk_jump_destinations(self):
"""
When the entire dialplan has loaded we can walk over all Goto
destinations and check for their existence.
"""
valid_destinations = []
for (context, extension, priority, where) in self.jump_destinations:
if isinstance(context, Var):
if isinstance(priority, str):
if not self.has_label(priority):
E_DP_GOTO_NOLABEL(where, context=context, exten=extension, label=priority)
else:
try:
<DeepExtract>
found_context = self.contexts_by_name[context]
</DeepExtract>
except KeyError:
E_DP_GOTO_NOCONTEXT(where, context=context, exten=extension, prio=priority)
else:
if isinstance(extension, Var):
if isinstance(priority, str):
if not found_context.has_label(priority):
E_DP_GOTO_CONTEXT_NOLABEL(where, context=context, exten=extension, label=priority)
else:
found_extension = found_context.match_pattern(extension, priority)
if found_extension:
valid_destinations.append(found_extension)
else:
W_DP_GOTO_CONTEXT_NOEXTEN(where, context=context, exten=extension, prio=priority)
|
def walk_jump_destinations(self):
"""
When the entire dialplan has loaded we can walk over all Goto
destinations and check for their existence.
"""
valid_destinations = []
for (context, extension, priority, where) in self.jump_destinations:
if isinstance(context, Var):
if isinstance(priority, str):
if not self.has_label(priority):
E_DP_GOTO_NOLABEL(where, context=context, exten=extension, label=priority)
else:
try:
found_context = self.contexts_by_name[context]
except KeyError:
E_DP_GOTO_NOCONTEXT(where, context=context, exten=extension, prio=priority)
else:
if isinstance(extension, Var):
if isinstance(priority, str):
if not found_context.has_label(priority):
E_DP_GOTO_CONTEXT_NOLABEL(where, context=context, exten=extension, label=priority)
else:
found_extension = found_context.match_pattern(extension, priority)
if found_extension:
valid_destinations.append(found_extension)
else:
W_DP_GOTO_CONTEXT_NOEXTEN(where, context=context, exten=extension, prio=priority)
|
asterisklint
|
positive
|
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir and 'VERSIONEER_PEP518' not in globals():
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
root = Path(root)
pyproject_toml = root / 'pyproject.toml'
setup_cfg = root / 'setup.cfg'
section = None
if pyproject_toml.exists() and have_tomli:
try:
with open(pyproject_toml, 'rb') as fobj:
pp = tomli.load(fobj)
section = pp['tool']['versioneer']
except (tomli.TOMLDecodeError, KeyError):
pass
if not section:
parser = configparser.ConfigParser()
with open(setup_cfg) as cfg_file:
parser.read_file(cfg_file)
parser.get('versioneer', 'VCS')
section = parser['versioneer']
cfg = VersioneerConfig()
cfg.VCS = section['VCS']
cfg.style = section.get('style', '')
cfg.versionfile_source = section.get('versionfile_source')
cfg.versionfile_build = section.get('versionfile_build')
cfg.tag_prefix = section.get('tag_prefix')
if cfg.tag_prefix in ("''", '""', None):
cfg.tag_prefix = ''
cfg.parentdir_prefix = section.get('parentdir_prefix')
cfg.verbose = section.get('verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
</DeepExtract>
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir and 'VERSIONEER_PEP518' not in globals():
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(my_path), versioneer_py))
except NameError:
pass
root = root
root = Path(root)
pyproject_toml = root / 'pyproject.toml'
setup_cfg = root / 'setup.cfg'
section = None
if pyproject_toml.exists() and have_tomli:
try:
with open(pyproject_toml, 'rb') as fobj:
pp = tomli.load(fobj)
section = pp['tool']['versioneer']
except (tomli.TOMLDecodeError, KeyError):
pass
if not section:
parser = configparser.ConfigParser()
with open(setup_cfg) as cfg_file:
parser.read_file(cfg_file)
parser.get('versioneer', 'VCS')
section = parser['versioneer']
cfg = VersioneerConfig()
cfg.VCS = section['VCS']
cfg.style = section.get('style', '')
cfg.versionfile_source = section.get('versionfile_source')
cfg.versionfile_build = section.get('versionfile_build')
cfg.tag_prefix = section.get('tag_prefix')
if cfg.tag_prefix in ("''", '""', None):
cfg.tag_prefix = ''
cfg.parentdir_prefix = section.get('parentdir_prefix')
cfg.verbose = section.get('verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
dask-sql
|
positive
|
def prepare_data(self, collaborative=False):
self.project = prepare_project(SEQUENCE_LABELING, collaborative_annotation=collaborative)
self.example1 = mommy.make('ExportedExample', project=self.project.item, text='confirmed')
self.span1 = mommy.make('ExportedSpan', example=self.example1, user=self.project.admin, start_offset=0, end_offset=1)
self.span2 = mommy.make('ExportedSpan', example=self.example1, user=self.project.annotator, start_offset=1, end_offset=2)
mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin)
self.example2 = mommy.make('ExportedExample', project=self.project.item, text='unconfirmed')
self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin)
self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator)
<DeepExtract>
d = self.example1.to_dict()
d['text'] = d.pop(DATA)
self.data1 = d
</DeepExtract>
<DeepExtract>
d = self.example2.to_dict()
d['text'] = d.pop(DATA)
self.data2 = d
</DeepExtract>
|
def prepare_data(self, collaborative=False):
self.project = prepare_project(SEQUENCE_LABELING, collaborative_annotation=collaborative)
self.example1 = mommy.make('ExportedExample', project=self.project.item, text='confirmed')
self.span1 = mommy.make('ExportedSpan', example=self.example1, user=self.project.admin, start_offset=0, end_offset=1)
self.span2 = mommy.make('ExportedSpan', example=self.example1, user=self.project.annotator, start_offset=1, end_offset=2)
mommy.make('ExampleState', example=self.example1, confirmed_by=self.project.admin)
self.example2 = mommy.make('ExportedExample', project=self.project.item, text='unconfirmed')
self.comment1 = mommy.make('ExportedComment', example=self.example1, user=self.project.admin)
self.comment2 = mommy.make('ExportedComment', example=self.example1, user=self.project.annotator)
d = self.example1.to_dict()
d['text'] = d.pop(DATA)
self.data1 = d
d = self.example2.to_dict()
d['text'] = d.pop(DATA)
self.data2 = d
</DeepExtract>
|
doccano
|
positive
|
def show_most_common_types(limit=10, objects=None):
"""Print the table of types of most common instances.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
<DeepExtract>
stats = sorted(typestats(objects).items(), key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
stats = stats
</DeepExtract>
width = max((len(name) for (name, count) in stats))
for (name, count) in stats:
print('%-*s %i' % (width, name, count))
|
def show_most_common_types(limit=10, objects=None):
"""Print the table of types of most common instances.
The caveats documented in :func:`typestats` apply.
Example:
>>> show_most_common_types(limit=5)
tuple 8959
function 2442
wrapper_descriptor 1048
dict 953
builtin_function_or_method 800
.. versionadded:: 1.1
.. versionchanged:: 1.7
New parameter: ``objects``.
"""
stats = sorted(typestats(objects).items(), key=operator.itemgetter(1), reverse=True)
if limit:
stats = stats[:limit]
stats = stats
width = max((len(name) for (name, count) in stats))
for (name, count) in stats:
print('%-*s %i' % (width, name, count))
|
exaproxy
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.