before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def ucb(f):
"""Read unsigned char byte from binary file
"""
if isinstance(f, list):
if len(f) < 1:
raise EOFError()
<DeepExtract>
a = []
if len(f) < 1:
(b, f) = (a, f)
for i in range(1):
a.append(f.pop(0))
(b, f) = (a, f)
</DeepExtract>
return struct.unpack('B', ''.join(b))[0]
else:
_f = f.read(1)
if len(_f) < 1:
raise EOFError()
return struct.unpack('B', _f)[0]
|
def ucb(f):
"""Read unsigned char byte from binary file
"""
if isinstance(f, list):
if len(f) < 1:
raise EOFError()
a = []
if len(f) < 1:
(b, f) = (a, f)
for i in range(1):
a.append(f.pop(0))
(b, f) = (a, f)
return struct.unpack('B', ''.join(b))[0]
else:
_f = f.read(1)
if len(_f) < 1:
raise EOFError()
return struct.unpack('B', _f)[0]
|
arib
|
positive
|
def base_test_spin_flip(self, case, method, block, max_guesses=10):
ground_state = adcc.LazyMp(cache.refstate[case])
matrix = adcc.AdcMatrix(method, ground_state)
for n_guesses in range(1, max_guesses + 1):
guesses = adcc.guess.guesses_from_diagonal(matrix, n_guesses, block=block, spin_change=-1, spin_block_symmetrisation='none')
assert len(guesses) == n_guesses
for gs in guesses:
<DeepExtract>
assert not matrix.is_core_valence_separated
mospaces = matrix.mospaces
nCa = noa = mospaces.n_orbs_alpha('o1')
nCb = nob = mospaces.n_orbs_beta('o1')
nva = mospaces.n_orbs_alpha('v1')
nvb = mospaces.n_orbs_beta('v1')
gts = gs.ph.to_ndarray()
assert gts.shape == (nCa + nCb, nva + nvb)
assert np.max(np.abs(gts[:nCa, :nva])) == 0
assert np.max(np.abs(gts[nCa:, :nva])) == 0
assert np.max(np.abs(gts[nCa:, nva:])) == 0
if 'pphh' not in matrix.axis_blocks:
return
gtd = gs.pphh.to_ndarray()
assert gtd.shape == (noa + nob, nCa + nCb, nva + nvb, nva + nvb)
assert np.max(np.abs(gtd[:noa, :nCa, :nva, :nva])) == 0
assert np.max(np.abs(gtd[:noa, :nCa, nva:, nva:])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, :nva, :nva])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, :nva, nva:])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, :nva, :nva])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, :nva, nva:])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, :nva, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, :nva, nva:])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, nva:, nva:])) == 0
assert_array_equal(gtd.transpose((0, 1, 3, 2)), -gtd)
if not matrix.is_core_valence_separated:
assert_array_equal(gtd.transpose((1, 0, 2, 3)), -gtd)
if block == 'ph':
assert np.max(np.abs(gtd[:noa, :nCa, :nva, nva:])) == 0
assert np.max(np.abs(gtd[:noa, :nCa, nva:, :nva])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, nva:, nva:])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, nva:, nva:])) == 0
assert np.max(np.abs(gts[:nCa, nva:])) > 0
elif block == 'pphh':
assert np.max(np.abs(gts[:nCa, nva:])) == 0
has_aaab = np.max(np.abs(gtd[:noa, :nCa, :nva, nva:])) > 0
has_aaba = np.max(np.abs(gtd[:noa, :nCa, nva:, :nva])) > 0
has_abbb = np.max(np.abs(gtd[:noa, nCa:, nva:, nva:])) > 0
has_babb = np.max(np.abs(gtd[noa:, :nCa, nva:, nva:])) > 0
assert has_aaab or has_aaba or has_abbb or has_babb
</DeepExtract>
<DeepExtract>
for (i, gi) in enumerate(guesses):
for (j, gj) in enumerate(guesses):
ref = 1 if i == j else 0
assert adcc.dot(gi, gj) == approx(ref)
</DeepExtract>
<DeepExtract>
mospaces = matrix.mospaces
nCa = noa = mospaces.n_orbs_alpha('o1')
nva = mospaces.n_orbs_alpha('v1')
if mospaces.has_core_occupied_space:
nCa = mospaces.n_orbs_alpha('o2')
sidcs = None
if block == 'ph':
diagonal = matrix.diagonal().ph.to_ndarray()
sidcs = np.dstack(np.unravel_index(np.argsort(diagonal.ravel()), diagonal.shape))
assert sidcs.shape[0] == 1
if True:
sidcs = [idx for idx in sidcs[0] if idx[0] < nCa and idx[1] >= nva]
else:
sidcs = [idx for idx in sidcs[0] if any((idx[0] >= nCa and idx[1] >= nva, idx[0] < nCa and idx[1] < nva))]
elif block == 'pphh':
diagonal = matrix.diagonal().pphh.to_ndarray()
sidcs = np.dstack(np.unravel_index(np.argsort(diagonal.ravel()), diagonal.shape))
assert sidcs.shape[0] == 1
if True:
sidcs = [idx for idx in sidcs[0] if any((idx[0] < noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] >= nva), idx[0] < noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] < noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] >= nva), idx[0] >= noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] >= nva)))]
else:
sidcs = [idx for idx in sidcs[0] if any((idx[0] < noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] < nva), idx[0] >= noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] >= nva), idx[0] < noa and idx[1] >= nCa and (idx[2] < nva) and (idx[3] >= nva), idx[0] >= noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] < noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] >= noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] >= nva)))]
sidcs = [idx for idx in sidcs if idx[2] != idx[3]]
if not matrix.is_core_valence_separated:
sidcs = [idx for idx in sidcs if idx[0] != idx[1]]
def grouping(x):
return np.round(diagonal[tuple(x)], decimals=12)
gidcs = [[tuple(gitem) for gitem in group] for (key, group) in itertools.groupby(sidcs, grouping)]
igroup = 0
for (i, guess) in enumerate(guesses):
nonzeros = np.dstack(np.where(guess[block].to_ndarray() != 0))
assert nonzeros.shape[0] == 1
nonzeros = [tuple(nzitem) for nzitem in nonzeros[0]]
if i > 0 and igroup + 1 < len(gidcs):
if nonzeros[0] in gidcs[igroup + 1]:
igroup += 1
for nz in nonzeros:
assert nz in gidcs[igroup]
</DeepExtract>
|
def base_test_spin_flip(self, case, method, block, max_guesses=10):
ground_state = adcc.LazyMp(cache.refstate[case])
matrix = adcc.AdcMatrix(method, ground_state)
for n_guesses in range(1, max_guesses + 1):
guesses = adcc.guess.guesses_from_diagonal(matrix, n_guesses, block=block, spin_change=-1, spin_block_symmetrisation='none')
assert len(guesses) == n_guesses
for gs in guesses:
assert not matrix.is_core_valence_separated
mospaces = matrix.mospaces
nCa = noa = mospaces.n_orbs_alpha('o1')
nCb = nob = mospaces.n_orbs_beta('o1')
nva = mospaces.n_orbs_alpha('v1')
nvb = mospaces.n_orbs_beta('v1')
gts = gs.ph.to_ndarray()
assert gts.shape == (nCa + nCb, nva + nvb)
assert np.max(np.abs(gts[:nCa, :nva])) == 0
assert np.max(np.abs(gts[nCa:, :nva])) == 0
assert np.max(np.abs(gts[nCa:, nva:])) == 0
if 'pphh' not in matrix.axis_blocks:
return
gtd = gs.pphh.to_ndarray()
assert gtd.shape == (noa + nob, nCa + nCb, nva + nvb, nva + nvb)
assert np.max(np.abs(gtd[:noa, :nCa, :nva, :nva])) == 0
assert np.max(np.abs(gtd[:noa, :nCa, nva:, nva:])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, :nva, :nva])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, :nva, nva:])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, :nva, :nva])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, :nva, nva:])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, :nva, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, :nva, nva:])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, nva:, :nva])) == 0
assert np.max(np.abs(gtd[noa:, nCa:, nva:, nva:])) == 0
assert_array_equal(gtd.transpose((0, 1, 3, 2)), -gtd)
if not matrix.is_core_valence_separated:
assert_array_equal(gtd.transpose((1, 0, 2, 3)), -gtd)
if block == 'ph':
assert np.max(np.abs(gtd[:noa, :nCa, :nva, nva:])) == 0
assert np.max(np.abs(gtd[:noa, :nCa, nva:, :nva])) == 0
assert np.max(np.abs(gtd[:noa, nCa:, nva:, nva:])) == 0
assert np.max(np.abs(gtd[noa:, :nCa, nva:, nva:])) == 0
assert np.max(np.abs(gts[:nCa, nva:])) > 0
elif block == 'pphh':
assert np.max(np.abs(gts[:nCa, nva:])) == 0
has_aaab = np.max(np.abs(gtd[:noa, :nCa, :nva, nva:])) > 0
has_aaba = np.max(np.abs(gtd[:noa, :nCa, nva:, :nva])) > 0
has_abbb = np.max(np.abs(gtd[:noa, nCa:, nva:, nva:])) > 0
has_babb = np.max(np.abs(gtd[noa:, :nCa, nva:, nva:])) > 0
assert has_aaab or has_aaba or has_abbb or has_babb
for (i, gi) in enumerate(guesses):
for (j, gj) in enumerate(guesses):
ref = 1 if i == j else 0
assert adcc.dot(gi, gj) == approx(ref)
mospaces = matrix.mospaces
nCa = noa = mospaces.n_orbs_alpha('o1')
nva = mospaces.n_orbs_alpha('v1')
if mospaces.has_core_occupied_space:
nCa = mospaces.n_orbs_alpha('o2')
sidcs = None
if block == 'ph':
diagonal = matrix.diagonal().ph.to_ndarray()
sidcs = np.dstack(np.unravel_index(np.argsort(diagonal.ravel()), diagonal.shape))
assert sidcs.shape[0] == 1
if True:
sidcs = [idx for idx in sidcs[0] if idx[0] < nCa and idx[1] >= nva]
else:
sidcs = [idx for idx in sidcs[0] if any((idx[0] >= nCa and idx[1] >= nva, idx[0] < nCa and idx[1] < nva))]
elif block == 'pphh':
diagonal = matrix.diagonal().pphh.to_ndarray()
sidcs = np.dstack(np.unravel_index(np.argsort(diagonal.ravel()), diagonal.shape))
assert sidcs.shape[0] == 1
if True:
sidcs = [idx for idx in sidcs[0] if any((idx[0] < noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] >= nva), idx[0] < noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] < noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] >= nva), idx[0] >= noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] >= nva)))]
else:
sidcs = [idx for idx in sidcs[0] if any((idx[0] < noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] < nva), idx[0] >= noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] >= nva), idx[0] < noa and idx[1] >= nCa and (idx[2] < nva) and (idx[3] >= nva), idx[0] >= noa and idx[1] < nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] < noa and idx[1] >= nCa and (idx[2] >= nva) and (idx[3] < nva), idx[0] >= noa and idx[1] < nCa and (idx[2] < nva) and (idx[3] >= nva)))]
sidcs = [idx for idx in sidcs if idx[2] != idx[3]]
if not matrix.is_core_valence_separated:
sidcs = [idx for idx in sidcs if idx[0] != idx[1]]
def grouping(x):
return np.round(diagonal[tuple(x)], decimals=12)
gidcs = [[tuple(gitem) for gitem in group] for (key, group) in itertools.groupby(sidcs, grouping)]
igroup = 0
for (i, guess) in enumerate(guesses):
nonzeros = np.dstack(np.where(guess[block].to_ndarray() != 0))
assert nonzeros.shape[0] == 1
nonzeros = [tuple(nzitem) for nzitem in nonzeros[0]]
if i > 0 and igroup + 1 < len(gidcs):
if nonzeros[0] in gidcs[igroup + 1]:
igroup += 1
for nz in nonzeros:
assert nz in gidcs[igroup]
</DeepExtract>
|
adcc
|
positive
|
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = '\n'
if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) and len(elem) and markdown.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if markdown.isBlockLevel(e.tag):
<DeepExtract>
i = '\n'
if markdown.isBlockLevel(e.tag) and e.tag not in ['code', 'pre']:
if (not e.text or not e.text.strip()) and len(e) and markdown.isBlockLevel(e[0].tag):
e.text = i
for e in e:
if markdown.isBlockLevel(e.tag):
self._prettifyETree(e)
if not e.tail or not e.tail.strip():
e.tail = i
if not e.tail or not e.tail.strip():
e.tail = i
</DeepExtract>
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
|
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = '\n'
if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) and len(elem) and markdown.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if markdown.isBlockLevel(e.tag):
i = '\n'
if markdown.isBlockLevel(e.tag) and e.tag not in ['code', 'pre']:
if (not e.text or not e.text.strip()) and len(e) and markdown.isBlockLevel(e[0].tag):
e.text = i
for e in e:
if markdown.isBlockLevel(e.tag):
self._prettifyETree(e)
if not e.tail or not e.tail.strip():
e.tail = i
if not e.tail or not e.tail.strip():
e.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
|
arecibo
|
positive
|
def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width):
output_tensor = tf.reshape(input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
<DeepExtract>
if name is None:
name = from_tensor.name
if [2, 3] is not None:
assert_rank(from_tensor, [2, 3], name)
shape = from_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
from_shape = shape
dyn_shape = tf.shape(from_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
from_shape = shape
</DeepExtract>
<DeepExtract>
if name is None:
name = to_tensor.name
if [2, 3] is not None:
assert_rank(to_tensor, [2, 3], name)
shape = to_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
to_shape = shape
dyn_shape = tf.shape(to_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
to_shape = shape
</DeepExtract>
if len(from_shape) != len(to_shape):
raise ValueError('The rank of `from_tensor` must match the rank of `to_tensor`.')
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if batch_size is None or from_seq_length is None or to_seq_length is None:
raise ValueError('When passing in rank 2 tensors to attention_layer, the values for `batch_size`, `from_seq_length`, and `to_seq_length` must all be specified.')
<DeepExtract>
ndims = from_tensor.shape.ndims
if ndims < 2:
raise ValueError('Input tensor must have at least rank 2. Shape = %s' % from_tensor.shape)
if ndims == 2:
from_tensor_2d = from_tensor
width = from_tensor.shape[-1]
output_tensor = tf.reshape(from_tensor, [-1, width])
from_tensor_2d = output_tensor
</DeepExtract>
<DeepExtract>
ndims = to_tensor.shape.ndims
if ndims < 2:
raise ValueError('Input tensor must have at least rank 2. Shape = %s' % to_tensor.shape)
if ndims == 2:
to_tensor_2d = to_tensor
width = to_tensor.shape[-1]
output_tensor = tf.reshape(to_tensor, [-1, width])
to_tensor_2d = output_tensor
</DeepExtract>
query_layer = tf.layers.dense(from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name='query', kernel_initializer=create_initializer(initializer_range))
key_layer = tf.layers.dense(to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name='key', kernel_initializer=create_initializer(initializer_range))
value_layer = tf.layers.dense(to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name='value', kernel_initializer=create_initializer(initializer_range))
<DeepExtract>
output_tensor = tf.reshape(query_layer, [batch_size, from_seq_length, num_attention_heads, size_per_head])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
query_layer = output_tensor
</DeepExtract>
<DeepExtract>
output_tensor = tf.reshape(key_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
key_layer = output_tensor
</DeepExtract>
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
attention_mask = tf.expand_dims(attention_mask, axis=[1])
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
attention_scores += adder
attention_probs = tf.nn.softmax(attention_scores)
<DeepExtract>
if attention_probs_dropout_prob is None or attention_probs_dropout_prob == 0.0:
attention_probs = attention_probs
output = tf.nn.dropout(attention_probs, 1.0 - attention_probs_dropout_prob)
attention_probs = output
</DeepExtract>
value_layer = tf.reshape(value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head])
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
context_layer = tf.reshape(context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
context_layer = tf.reshape(context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
|
def attention_layer(from_tensor, to_tensor, attention_mask=None, num_attention_heads=1, size_per_head=512, query_act=None, key_act=None, value_act=None, attention_probs_dropout_prob=0.0, initializer_range=0.02, do_return_2d_tensor=False, batch_size=None, from_seq_length=None, to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads, seq_length, width):
output_tensor = tf.reshape(input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
if name is None:
name = from_tensor.name
if [2, 3] is not None:
assert_rank(from_tensor, [2, 3], name)
shape = from_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
from_shape = shape
dyn_shape = tf.shape(from_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
from_shape = shape
if name is None:
name = to_tensor.name
if [2, 3] is not None:
assert_rank(to_tensor, [2, 3], name)
shape = to_tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
to_shape = shape
dyn_shape = tf.shape(to_tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
to_shape = shape
if len(from_shape) != len(to_shape):
raise ValueError('The rank of `from_tensor` must match the rank of `to_tensor`.')
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if batch_size is None or from_seq_length is None or to_seq_length is None:
raise ValueError('When passing in rank 2 tensors to attention_layer, the values for `batch_size`, `from_seq_length`, and `to_seq_length` must all be specified.')
ndims = from_tensor.shape.ndims
if ndims < 2:
raise ValueError('Input tensor must have at least rank 2. Shape = %s' % from_tensor.shape)
if ndims == 2:
from_tensor_2d = from_tensor
width = from_tensor.shape[-1]
output_tensor = tf.reshape(from_tensor, [-1, width])
from_tensor_2d = output_tensor
ndims = to_tensor.shape.ndims
if ndims < 2:
raise ValueError('Input tensor must have at least rank 2. Shape = %s' % to_tensor.shape)
if ndims == 2:
to_tensor_2d = to_tensor
width = to_tensor.shape[-1]
output_tensor = tf.reshape(to_tensor, [-1, width])
to_tensor_2d = output_tensor
query_layer = tf.layers.dense(from_tensor_2d, num_attention_heads * size_per_head, activation=query_act, name='query', kernel_initializer=create_initializer(initializer_range))
key_layer = tf.layers.dense(to_tensor_2d, num_attention_heads * size_per_head, activation=key_act, name='key', kernel_initializer=create_initializer(initializer_range))
value_layer = tf.layers.dense(to_tensor_2d, num_attention_heads * size_per_head, activation=value_act, name='value', kernel_initializer=create_initializer(initializer_range))
output_tensor = tf.reshape(query_layer, [batch_size, from_seq_length, num_attention_heads, size_per_head])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
query_layer = output_tensor
output_tensor = tf.reshape(key_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
key_layer = output_tensor
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
attention_mask = tf.expand_dims(attention_mask, axis=[1])
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
attention_scores += adder
attention_probs = tf.nn.softmax(attention_scores)
if attention_probs_dropout_prob is None or attention_probs_dropout_prob == 0.0:
attention_probs = attention_probs
output = tf.nn.dropout(attention_probs, 1.0 - attention_probs_dropout_prob)
attention_probs = output
value_layer = tf.reshape(value_layer, [batch_size, to_seq_length, num_attention_heads, size_per_head])
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
context_layer = tf.reshape(context_layer, [batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
context_layer = tf.reshape(context_layer, [batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
|
arabert
|
positive
|
def _test_pipeclose_help(self, deferred_help):
<DeepExtract>
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, _) = (app, command)
</DeepExtract>
with mock.patch('cliff.help.HelpAction.__call__', side_effect=BrokenPipeError):
app.run(['--help'])
|
def _test_pipeclose_help(self, deferred_help):
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, _) = (app, command)
with mock.patch('cliff.help.HelpAction.__call__', side_effect=BrokenPipeError):
app.run(['--help'])
|
cliff
|
positive
|
def specs_cleaner(self) -> List[SpecRecord]:
""" Returns a transformed version of the specs
Returns:
final_specs: List[SpecRecord]
for specs that are empty (ex: '') returns: []
for specs that are default (ex: ':') returns: [SpecRecord]
"""
if len(self.specs_strings) == 1:
if self.specs_strings[0].strip() == '':
self.specs_strings[0] = ':'
final_specs = []
for item in self.specs_strings:
try:
<DeepExtract>
parts = item.split(':')
if len(parts) > 3:
comm.abort(f'Error: spec item has too many parts: {item}')
is_range = True if len(parts) > 1 else False
start = parts[0]
stop = parts[1] if len(parts) > 1 else None
step = parts[2] if len(parts) > 2 else '1'
(start, stop, step, is_range) = (start, stop, step, is_range)
</DeepExtract>
<DeepExtract>
start = Specifications.transform_empty_string(start)
start = self.transform_name(start)
start = self.transform_negative_start_number(start, is_range)
if start is not None and (not comm.isnumeric(start)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {start}')
int_start = int(start) if start is not None else None
stop = self.transform_empty_string(stop)
stop = self.transform_name(stop)
stop = self.transform_negative_stop_number(stop, is_range)
stop = self.validate_positive_number(stop, is_range)
if stop is not None and (not comm.isnumeric(stop)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {stop}')
int_stop = int(stop) if stop is not None else None
step = self.transform_empty_string(step)
if step is not None and (not comm.isnumeric(step)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {step}')
float_step = float(step) if step is not None else 1.0
(int_start, int_stop, float_step) = (int_start, int_stop, float_step)
</DeepExtract>
<DeepExtract>
int_start = self.transform_none_start(int_start, float_step)
(int_stop, col_default_range, rec_default_range) = self.transform_none_stop(int_start, int_stop, float_step, is_range)
(int_start, int_stop, float_step, col_default_range, rec_default_range) = (int_start, int_stop, float_step, col_default_range, rec_default_range)
</DeepExtract>
try:
final_rec = SpecRecord(start=int_start, stop=int_stop, step=float_step, spec_type=self.spec_type, col_default_range=col_default_range, rec_default_range=rec_default_range)
except ValidationError as err:
comm.abort('Error: invalid specification', f'{self.spec_type}: {start}:{stop}:{step}')
final_specs.append(final_rec)
except OutOfRangeError:
continue
return final_specs
|
def specs_cleaner(self) -> List[SpecRecord]:
""" Returns a transformed version of the specs
Returns:
final_specs: List[SpecRecord]
for specs that are empty (ex: '') returns: []
for specs that are default (ex: ':') returns: [SpecRecord]
"""
if len(self.specs_strings) == 1:
if self.specs_strings[0].strip() == '':
self.specs_strings[0] = ':'
final_specs = []
for item in self.specs_strings:
try:
parts = item.split(':')
if len(parts) > 3:
comm.abort(f'Error: spec item has too many parts: {item}')
is_range = True if len(parts) > 1 else False
start = parts[0]
stop = parts[1] if len(parts) > 1 else None
step = parts[2] if len(parts) > 2 else '1'
(start, stop, step, is_range) = (start, stop, step, is_range)
start = Specifications.transform_empty_string(start)
start = self.transform_name(start)
start = self.transform_negative_start_number(start, is_range)
if start is not None and (not comm.isnumeric(start)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {start}')
int_start = int(start) if start is not None else None
stop = self.transform_empty_string(stop)
stop = self.transform_name(stop)
stop = self.transform_negative_stop_number(stop, is_range)
stop = self.validate_positive_number(stop, is_range)
if stop is not None and (not comm.isnumeric(stop)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {stop}')
int_stop = int(stop) if stop is not None else None
step = self.transform_empty_string(step)
if step is not None and (not comm.isnumeric(step)):
raise UnidentifiableNonNumericSpec(f'Do not know how to interpret: {step}')
float_step = float(step) if step is not None else 1.0
(int_start, int_stop, float_step) = (int_start, int_stop, float_step)
int_start = self.transform_none_start(int_start, float_step)
(int_stop, col_default_range, rec_default_range) = self.transform_none_stop(int_start, int_stop, float_step, is_range)
(int_start, int_stop, float_step, col_default_range, rec_default_range) = (int_start, int_stop, float_step, col_default_range, rec_default_range)
try:
final_rec = SpecRecord(start=int_start, stop=int_stop, step=float_step, spec_type=self.spec_type, col_default_range=col_default_range, rec_default_range=rec_default_range)
except ValidationError as err:
comm.abort('Error: invalid specification', f'{self.spec_type}: {start}:{stop}:{step}')
final_specs.append(final_rec)
except OutOfRangeError:
continue
return final_specs
|
DataGristle
|
positive
|
def test_ellipsis(self):
"""Triple dot (...) aka Ellipsis can be used anywhere in Python 3."""
<DeepExtract>
(before, after) = (up_to_version((3, 0)), from_version((3, 0)))
</DeepExtract>
code = '...'
<DeepExtract>
[] = sorted(listify([], [], str))
(error_type, error_msg) = INVALIDSYNTAX
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, [], details)
</DeepExtract>
<DeepExtract>
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
def test_ellipsis(self):
"""Triple dot (...) aka Ellipsis can be used anywhere in Python 3."""
(before, after) = (up_to_version((3, 0)), from_version((3, 0)))
code = '...'
[] = sorted(listify([], [], str))
(error_type, error_msg) = INVALIDSYNTAX
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(before, interpreters).contains_current_env():
exc = get_exception(code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, [], details)
details = 'Running following code :\n---\n{0}\n---'.format(code)
if PythonEnvRange(after, interpreters).contains_current_env():
exc = get_exception(code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
DidYouMean-Python
|
positive
|
def display(self):
"""Write results to an output stream"""
total = 0
count = 0
for (i, result) in enumerate(self._results):
if total == 0:
<DeepExtract>
</DeepExtract>
<DeepExtract>
raise NotImplementedError
</DeepExtract>
count += 1
total += 1
if count >= self.pagesize and self.pagesize > 0 and (i < len(self._results) - 1):
<DeepExtract>
text = input("Press return for next %d result%s (or type 'all'):" % (self.pagesize, plural(self.pagesize)))
if text:
if text.lower() in ['a', 'all']:
self._pagesize = 0
elif text.isdigit():
self._pagesize = int(text)
</DeepExtract>
count = 0
if total == 0:
self._ostream.write('No results\n')
else:
<DeepExtract>
</DeepExtract>
|
def display(self):
"""Write results to an output stream"""
total = 0
count = 0
for (i, result) in enumerate(self._results):
if total == 0:
raise NotImplementedError
count += 1
total += 1
if count >= self.pagesize and self.pagesize > 0 and (i < len(self._results) - 1):
text = input("Press return for next %d result%s (or type 'all'):" % (self.pagesize, plural(self.pagesize)))
if text:
if text.lower() in ['a', 'all']:
self._pagesize = 0
elif text.isdigit():
self._pagesize = int(text)
count = 0
if total == 0:
self._ostream.write('No results\n')
else:
</DeepExtract>
|
dql
|
positive
|
def __init__(self, left: Ty, right: Ty):
assert_isatomic(left, Ty)
assert_isatomic(right, Ty)
<DeepExtract>
if self.r != right and self != right.r:
raise AxiomError(messages.NOT_ADJOINT.format(self, right))
if self.r != right:
raise AxiomError(messages.NOT_RIGID_ADJOINT.format(self, right))
</DeepExtract>
name = 'Cap({}, {})'.format(left, right)
(dom, cod) = (self.ty_factory(), left @ right)
BinaryBoxConstructor.__init__(self, left, right)
Box.__init__(self, name, dom, cod, draw_as_wires=True)
|
def __init__(self, left: Ty, right: Ty):
assert_isatomic(left, Ty)
assert_isatomic(right, Ty)
if self.r != right and self != right.r:
raise AxiomError(messages.NOT_ADJOINT.format(self, right))
if self.r != right:
raise AxiomError(messages.NOT_RIGID_ADJOINT.format(self, right))
name = 'Cap({}, {})'.format(left, right)
(dom, cod) = (self.ty_factory(), left @ right)
BinaryBoxConstructor.__init__(self, left, right)
Box.__init__(self, name, dom, cod, draw_as_wires=True)
|
discopy
|
positive
|
@patch('appvalidator.testcases.webappbase.requests.get')
@patch('appvalidator.constants.MAX_RESOURCE_SIZE', 100)
def test_decode_gzip(self, r_g):
def compressed_gzip_body():
stream = cStringIO.StringIO()
compressor = gzip.GzipFile(fileobj=stream, mode='w')
compressor.write(u'é'.encode('utf-8') * 100)
compressor.close()
stream.seek(0)
return stream
normal_response_object = Mock()
<DeepExtract>
stream = cStringIO.StringIO()
compressor = gzip.GzipFile(fileobj=stream, mode='w')
compressor.write(u'é'.encode('utf-8') * 100)
compressor.close()
stream.seek(0)
body = stream
</DeepExtract>
normal_response_object.raw = HTTPResponse(status=200, preload_content=False, headers={'content-encoding': 'gzip', 'content-type': 'application/blah; charset=utf-8'}, body=body, decode_content=False)
normal_response_object.encoding = 'UTF-8'
normal_response_object.status_code = 200
r_g.return_value = normal_response_object
eq_(appbase.try_get_resource(self.err, None, 'http://foo.bar/', ''), u'é' * 100)
self.assert_silent()
|
@patch('appvalidator.testcases.webappbase.requests.get')
@patch('appvalidator.constants.MAX_RESOURCE_SIZE', 100)
def test_decode_gzip(self, r_g):
def compressed_gzip_body():
stream = cStringIO.StringIO()
compressor = gzip.GzipFile(fileobj=stream, mode='w')
compressor.write(u'é'.encode('utf-8') * 100)
compressor.close()
stream.seek(0)
return stream
normal_response_object = Mock()
stream = cStringIO.StringIO()
compressor = gzip.GzipFile(fileobj=stream, mode='w')
compressor.write(u'é'.encode('utf-8') * 100)
compressor.close()
stream.seek(0)
body = stream
normal_response_object.raw = HTTPResponse(status=200, preload_content=False, headers={'content-encoding': 'gzip', 'content-type': 'application/blah; charset=utf-8'}, body=body, decode_content=False)
normal_response_object.encoding = 'UTF-8'
normal_response_object.status_code = 200
r_g.return_value = normal_response_object
eq_(appbase.try_get_resource(self.err, None, 'http://foo.bar/', ''), u'é' * 100)
self.assert_silent()
|
app-validator
|
positive
|
def cut(self, key):
<DeepExtract>
if not isinstance(key, six.text_type):
key = key.decode('utf-8')
parts = key.split(u'/')
if len(parts) != 3 or parts[0] != 'keys':
raise CSStoreDenied("Invalid cert request key '{}'".format(key))
(service, hostname) = parts[1:3]
if service not in self.allowed_services:
raise CSStoreDenied("Invalid service '{}'".format(key))
principal = krb5_format_service_principal_name(service, hostname, self.ipa.env.realm)
key = u'cert/{}/{}'.format(service, hostname)
(key, hostname, principal) = (key, hostname, principal)
</DeepExtract>
<DeepExtract>
with self.ipa as ipa:
response = ipa.Command.cert_find(service=principal, validnotafter_from=datetime.datetime.utcnow())
certs = list((cert for cert in response['result'] if not cert[u'revoked']))
for cert in certs:
self.logger.info('Revoking cert %i (subject: %s, issuer: %s)', cert[u'serial_number'], cert[u'subject'], cert[u'issuer'])
ipa.Command.cert_revoke(cert[u'serial_number'], revocation_reason=self.revocation_reason)
certs = certs
</DeepExtract>
return self.store.cut(key) or certs
|
def cut(self, key):
if not isinstance(key, six.text_type):
key = key.decode('utf-8')
parts = key.split(u'/')
if len(parts) != 3 or parts[0] != 'keys':
raise CSStoreDenied("Invalid cert request key '{}'".format(key))
(service, hostname) = parts[1:3]
if service not in self.allowed_services:
raise CSStoreDenied("Invalid service '{}'".format(key))
principal = krb5_format_service_principal_name(service, hostname, self.ipa.env.realm)
key = u'cert/{}/{}'.format(service, hostname)
(key, hostname, principal) = (key, hostname, principal)
with self.ipa as ipa:
response = ipa.Command.cert_find(service=principal, validnotafter_from=datetime.datetime.utcnow())
certs = list((cert for cert in response['result'] if not cert[u'revoked']))
for cert in certs:
self.logger.info('Revoking cert %i (subject: %s, issuer: %s)', cert[u'serial_number'], cert[u'subject'], cert[u'issuer'])
ipa.Command.cert_revoke(cert[u'serial_number'], revocation_reason=self.revocation_reason)
certs = certs
return self.store.cut(key) or certs
|
custodia
|
positive
|
def construct(self):
<DeepExtract>
GraphScene.setup_axes(self)
init_label_x = 2
end_label_x = 7
step_x = 1
init_label_y = 20
end_label_y = 50
step_y = 5
self.x_axis.label_direction = DOWN
self.y_axis.label_direction = LEFT
self.x_axis.add_numbers(*range(init_label_x, end_label_x + step_x, step_x))
self.y_axis.add_numbers(*range(init_label_y, end_label_y + step_y, step_y))
self.play(ShowCreation(self.x_axis), ShowCreation(self.y_axis))
</DeepExtract>
graph = self.get_graph(lambda x: x ** 2, color=GREEN)
self.play(ShowCreation(graph), run_time=2)
self.wait()
|
def construct(self):
GraphScene.setup_axes(self)
init_label_x = 2
end_label_x = 7
step_x = 1
init_label_y = 20
end_label_y = 50
step_y = 5
self.x_axis.label_direction = DOWN
self.y_axis.label_direction = LEFT
self.x_axis.add_numbers(*range(init_label_x, end_label_x + step_x, step_x))
self.y_axis.add_numbers(*range(init_label_y, end_label_y + step_y, step_y))
self.play(ShowCreation(self.x_axis), ShowCreation(self.y_axis))
graph = self.get_graph(lambda x: x ** 2, color=GREEN)
self.play(ShowCreation(graph), run_time=2)
self.wait()
|
AnimationsWithManim
|
positive
|
def send_raw(self, command, _recurse=0, stime=None):
if _recurse > _MAX_RECURSE:
raise Exception('Cannot reconnect: %s' % (str(self.userv.address),))
if self.s == None:
<DeepExtract>
self.s = socket.socket(self.userv.family, socket.SOCK_STREAM)
if self.userv.family == socket.AF_INET6:
address = (self.userv.address[0][1:-1], self.userv.address[1])
else:
address = self.userv.address
self.s.connect(address)
</DeepExtract>
if stime == None:
stime = MonoTime()
while True:
try:
self.s.send(command.encode())
break
except socket.error as why:
if why.errno == EINTR:
continue
elif why.errno in (EPIPE, ENOTCONN, ECONNRESET):
self.s = None
return self.send_raw(command, _recurse + 1, stime)
raise why
while True:
try:
rval = self.s.recv(1024)
if len(rval) == 0:
self.s = None
return self.send_raw(command, _MAX_RECURSE, stime)
rval = rval.decode().strip()
break
except socket.error as why:
if why.errno == EINTR:
continue
elif why.errno in (EPIPE, ENOTCONN, ECONNRESET):
self.s = None
return self.send_raw(command, _recurse + 1, stime)
raise why
rtpc_delay = stime.offsetFromNow()
return (rval, rtpc_delay)
|
def send_raw(self, command, _recurse=0, stime=None):
if _recurse > _MAX_RECURSE:
raise Exception('Cannot reconnect: %s' % (str(self.userv.address),))
if self.s == None:
self.s = socket.socket(self.userv.family, socket.SOCK_STREAM)
if self.userv.family == socket.AF_INET6:
address = (self.userv.address[0][1:-1], self.userv.address[1])
else:
address = self.userv.address
self.s.connect(address)
if stime == None:
stime = MonoTime()
while True:
try:
self.s.send(command.encode())
break
except socket.error as why:
if why.errno == EINTR:
continue
elif why.errno in (EPIPE, ENOTCONN, ECONNRESET):
self.s = None
return self.send_raw(command, _recurse + 1, stime)
raise why
while True:
try:
rval = self.s.recv(1024)
if len(rval) == 0:
self.s = None
return self.send_raw(command, _MAX_RECURSE, stime)
rval = rval.decode().strip()
break
except socket.error as why:
if why.errno == EINTR:
continue
elif why.errno in (EPIPE, ENOTCONN, ECONNRESET):
self.s = None
return self.send_raw(command, _recurse + 1, stime)
raise why
rtpc_delay = stime.offsetFromNow()
return (rval, rtpc_delay)
|
b2bua
|
positive
|
def testCutThroughAll(self):
"""
Tests a model that uses more than one workplane
"""
s = Workplane(Plane.XY())
r = s.rect(2.0, 2.0).rect(1.3, 1.3, forConstruction=True).vertices().circle(0.125).extrude(0.5)
t = r.faces('>Y').workplane().circle(0.125).cutThruAll()
<DeepExtract>
with suppress_stdout_stderr():
t.exportSvg(os.path.join(OUTDIR, self._testMethodName + '.svg'))
t.val().exportStep(os.path.join(OUTDIR, self._testMethodName + '.step'))
t.val().exportStl(os.path.join(OUTDIR, self._testMethodName + '.stl'))
</DeepExtract>
self.assertEqual(11, t.faces().size())
|
def testCutThroughAll(self):
"""
Tests a model that uses more than one workplane
"""
s = Workplane(Plane.XY())
r = s.rect(2.0, 2.0).rect(1.3, 1.3, forConstruction=True).vertices().circle(0.125).extrude(0.5)
t = r.faces('>Y').workplane().circle(0.125).cutThruAll()
with suppress_stdout_stderr():
t.exportSvg(os.path.join(OUTDIR, self._testMethodName + '.svg'))
t.val().exportStep(os.path.join(OUTDIR, self._testMethodName + '.step'))
t.val().exportStl(os.path.join(OUTDIR, self._testMethodName + '.stl'))
self.assertEqual(11, t.faces().size())
|
cadquery
|
positive
|
def get_project(self, project, org=False):
"""
Retrieve the GH org or org/repo project.
For a repo project the project variable looks like:
my-gh-org/my-gh-repo/projects/1
For an org project the project variable looks like:
my-gh-org/projects/1
"""
pieces = []
if org:
(owner, _, number) = project.split('/')
pieces = ['orgs', owner]
else:
(owner, repo, _, number) = project.split('/')
pieces = ['repos', owner, repo]
pieces.append('projects')
<DeepExtract>
r = self.session.request('get', '/'.join(pieces), **kwargs)
r.raise_for_status()
if parse:
r = r.json()
r = r
</DeepExtract>
return [x['id'] for x in r if x['number'] == int(number)][0]
|
def get_project(self, project, org=False):
"""
Retrieve the GH org or org/repo project.
For a repo project the project variable looks like:
my-gh-org/my-gh-repo/projects/1
For an org project the project variable looks like:
my-gh-org/projects/1
"""
pieces = []
if org:
(owner, _, number) = project.split('/')
pieces = ['orgs', owner]
else:
(owner, repo, _, number) = project.split('/')
pieces = ['repos', owner, repo]
pieces.append('projects')
r = self.session.request('get', '/'.join(pieces), **kwargs)
r.raise_for_status()
if parse:
r = r.json()
r = r
return [x['id'] for x in r if x['number'] == int(number)][0]
|
auditree-framework
|
positive
|
def get_samples(fixed_features_values, feature_range, sampling_random_seed, sampling_size):
precisions = self.data_interface.get_decimal_precisions(output_type='dict')
if sampling_random_seed is not None:
random.seed(sampling_random_seed)
samples = []
for feature in self.data_interface.feature_names:
if feature in fixed_features_values:
sample = [fixed_features_values[feature]] * sampling_size
elif feature in self.data_interface.continuous_feature_names:
low = feature_range[feature][0]
high = feature_range[feature][1]
<DeepExtract>
if sampling_random_seed is not None:
np.random.seed(sampling_random_seed)
if precisions[feature] == 0:
result = np.random.randint(low, high + 1, sampling_size).tolist()
result = [float(r) for r in result]
else:
result = np.random.uniform(low, high + 10 ** (-precisions[feature]), sampling_size)
result = [round(r, precisions[feature]) for r in result]
sample = result
</DeepExtract>
else:
if sampling_random_seed is not None:
random.seed(sampling_random_seed)
sample = random.choices(feature_range[feature], k=sampling_size)
samples.append(sample)
samples = pd.DataFrame(dict(zip(self.data_interface.feature_names, samples)))
return samples
|
def get_samples(fixed_features_values, feature_range, sampling_random_seed, sampling_size):
precisions = self.data_interface.get_decimal_precisions(output_type='dict')
if sampling_random_seed is not None:
random.seed(sampling_random_seed)
samples = []
for feature in self.data_interface.feature_names:
if feature in fixed_features_values:
sample = [fixed_features_values[feature]] * sampling_size
elif feature in self.data_interface.continuous_feature_names:
low = feature_range[feature][0]
high = feature_range[feature][1]
if sampling_random_seed is not None:
np.random.seed(sampling_random_seed)
if precisions[feature] == 0:
result = np.random.randint(low, high + 1, sampling_size).tolist()
result = [float(r) for r in result]
else:
result = np.random.uniform(low, high + 10 ** (-precisions[feature]), sampling_size)
result = [round(r, precisions[feature]) for r in result]
sample = result
else:
if sampling_random_seed is not None:
random.seed(sampling_random_seed)
sample = random.choices(feature_range[feature], k=sampling_size)
samples.append(sample)
samples = pd.DataFrame(dict(zip(self.data_interface.feature_names, samples)))
return samples
|
DiCE
|
positive
|
def compute(df: Union[pd.DataFrame, dd.DataFrame], col1: Optional[Union[str, LatLong]]=None, col2: Optional[Union[str, LatLong]]=None, col3: Optional[str]=None, *, cfg: Union[Config, Dict[str, Any], None]=None, display: Optional[List[str]]=None, dtype: Optional[DTypeDef]=None) -> Intermediate:
"""
All in one compute function.
Parameters
----------
df
DataFrame from which visualizations are generated
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot(), the created Config object will be passed to compute().
When a user call compute() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute() directly and want to customize the output
col1: Optional[str], default None
A valid column name from the dataframe
col2: Optional[str], default None
A valid column name from the dataframe
col3: Optional[str], default None
A valid column name from the dataframe
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
<DeepExtract>
warnings.filterwarnings('ignore', 'The default value of regex will change from True to False in a future version', category=FutureWarning)
warnings.filterwarnings('ignore', 'invalid value encountered in true_divide', category=RuntimeWarning)
</DeepExtract>
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
(x, y, z) = (col1, col2, col3)
if not any([x, y, z]):
return compute_overview(df, cfg, dtype)
if sum((v is None for v in (x, y, z))) == 2:
x = x or y or z
if x is None:
raise ValueError
return compute_univariate(df, x, cfg, dtype)
if sum((v is None for v in [x, y, z])) == 1:
(x, y) = (v for v in [x, y, z] if v is not None)
if x is None or y is None:
raise ValueError
return compute_bivariate(df, x, y, cfg, dtype)
if x is not None and y is not None and (z is not None):
if not (isinstance(x, str) and isinstance(y, str) and isinstance(z, str)):
raise TypeError('Column names should be string. Current column names: {x}, {y}, {z}')
return compute_trivariate(df, x, y, z, cfg, dtype)
raise ValueError('The input is not correct.')
|
def compute(df: Union[pd.DataFrame, dd.DataFrame], col1: Optional[Union[str, LatLong]]=None, col2: Optional[Union[str, LatLong]]=None, col3: Optional[str]=None, *, cfg: Union[Config, Dict[str, Any], None]=None, display: Optional[List[str]]=None, dtype: Optional[DTypeDef]=None) -> Intermediate:
"""
All in one compute function.
Parameters
----------
df
DataFrame from which visualizations are generated
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot(), the created Config object will be passed to compute().
When a user call compute() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute() directly and want to customize the output
col1: Optional[str], default None
A valid column name from the dataframe
col2: Optional[str], default None
A valid column name from the dataframe
col3: Optional[str], default None
A valid column name from the dataframe
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
"""
warnings.filterwarnings('ignore', 'The default value of regex will change from True to False in a future version', category=FutureWarning)
warnings.filterwarnings('ignore', 'invalid value encountered in true_divide', category=RuntimeWarning)
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
(x, y, z) = (col1, col2, col3)
if not any([x, y, z]):
return compute_overview(df, cfg, dtype)
if sum((v is None for v in (x, y, z))) == 2:
x = x or y or z
if x is None:
raise ValueError
return compute_univariate(df, x, cfg, dtype)
if sum((v is None for v in [x, y, z])) == 1:
(x, y) = (v for v in [x, y, z] if v is not None)
if x is None or y is None:
raise ValueError
return compute_bivariate(df, x, y, cfg, dtype)
if x is not None and y is not None and (z is not None):
if not (isinstance(x, str) and isinstance(y, str) and isinstance(z, str)):
raise TypeError('Column names should be string. Current column names: {x}, {y}, {z}')
return compute_trivariate(df, x, y, z, cfg, dtype)
raise ValueError('The input is not correct.')
|
dataprep
|
positive
|
def write_config(mf_path, config):
<DeepExtract>
config_path = os.path.join(safe_str(mf_path), safe_str(constants.CONFIG_FILE))
</DeepExtract>
with open(config_path, 'w') as f:
json.dump(config, f)
|
def write_config(mf_path, config):
config_path = os.path.join(safe_str(mf_path), safe_str(constants.CONFIG_FILE))
with open(config_path, 'w') as f:
json.dump(config, f)
|
dataiku-contrib
|
positive
|
@side_effect_free
def harvest_source_list(context, data_dict):
"""
TODO: Use package search
"""
organization_id = data_dict.get('organization_id')
limit = config.get('ckan.harvest.harvest_source_limit', 100)
<DeepExtract>
session = context['session']
user = context.get('user', '')
only_active = data_dict.get('only_active', False)
only_to_run = data_dict.get('only_to_run', False)
query = session.query(HarvestSource).order_by(HarvestSource.created.desc())
if organization_id:
query = query.join(Package, HarvestSource.id == Package.id).filter(Package.owner_org == organization_id)
if only_active:
query = query.filter(HarvestSource.active == True)
if only_to_run:
query = query.filter(HarvestSource.frequency != 'MANUAL')
query = query.filter(or_(HarvestSource.next_run <= datetime.datetime.utcnow(), HarvestSource.next_run == None))
user_obj = User.get(user)
if user_obj and (not user_obj.sysadmin):
publisher_filters = []
publishers_for_the_user = user_obj.get_groups(u'publisher')
for publisher_id in [g.id for g in publishers_for_the_user]:
publisher_filters.append(HarvestSource.publisher_id == publisher_id)
if len(publisher_filters):
query = query.filter(or_(*publisher_filters))
else:
sources = []
log.debug('User %s with publishers %r has Harvest Sources: %r', user, publishers_for_the_user, [(hs.id, hs.url) for hs in query])
sources = query.limit(limit).all() if limit else query.all()
sources = sources
</DeepExtract>
last_job_status = p.toolkit.asbool(data_dict.get('return_last_job_status', False))
return [harvest_source_dictize(source, context, last_job_status) for source in sources]
|
@side_effect_free
def harvest_source_list(context, data_dict):
"""
TODO: Use package search
"""
organization_id = data_dict.get('organization_id')
limit = config.get('ckan.harvest.harvest_source_limit', 100)
session = context['session']
user = context.get('user', '')
only_active = data_dict.get('only_active', False)
only_to_run = data_dict.get('only_to_run', False)
query = session.query(HarvestSource).order_by(HarvestSource.created.desc())
if organization_id:
query = query.join(Package, HarvestSource.id == Package.id).filter(Package.owner_org == organization_id)
if only_active:
query = query.filter(HarvestSource.active == True)
if only_to_run:
query = query.filter(HarvestSource.frequency != 'MANUAL')
query = query.filter(or_(HarvestSource.next_run <= datetime.datetime.utcnow(), HarvestSource.next_run == None))
user_obj = User.get(user)
if user_obj and (not user_obj.sysadmin):
publisher_filters = []
publishers_for_the_user = user_obj.get_groups(u'publisher')
for publisher_id in [g.id for g in publishers_for_the_user]:
publisher_filters.append(HarvestSource.publisher_id == publisher_id)
if len(publisher_filters):
query = query.filter(or_(*publisher_filters))
else:
sources = []
log.debug('User %s with publishers %r has Harvest Sources: %r', user, publishers_for_the_user, [(hs.id, hs.url) for hs in query])
sources = query.limit(limit).all() if limit else query.all()
sources = sources
last_job_status = p.toolkit.asbool(data_dict.get('return_last_job_status', False))
return [harvest_source_dictize(source, context, last_job_status) for source in sources]
|
ckanext-harvest
|
positive
|
def _iter(self, data_path, ext='csv'):
"""
:param data_path: a string corresponding to a location of data
(file or folder). If list is provided, will assume
multiple data paths.
:return: generator of data-chunks.
"""
try:
validate_data_paths(data_path)
except Exception as e:
raise e
data_paths = listify(data_path)
file_openers = create_openers_of_valid_files(data_paths, ext=ext, encoding=self.parser_kwargs['encoding'])
if not file_openers:
raise ValueError("No valid files to open, please check the provided 'data_path' (%s). Note that files without the '%s' extension are ignored." % (data_paths, '.csv'))
if self.worker_threads_num > 1:
<DeepExtract>
chunk_queue = Queue(maxsize=self.buffer_size)
parser_kwargs = self.adjust_kwargs_to_engine(self.parser_kwargs)
iter_creator = fun_partial(self.get_data_chunk_iter, chunksize=self.chunk_size, **parser_kwargs)
queue_populator = fun_partial(populate_queue_with_chunks, itr_creator=iter_creator, queue=chunk_queue)
pool = Pool(self.worker_threads_num)
pool.map_async(queue_populator, file_openers)
pool.close()
received_termin_tokens_count = 0
while True:
chunk = chunk_queue.get(timeout=self.timeout)
if isinstance(chunk, Exception):
raise chunk
if chunk == TERMINATION_TOKEN:
received_termin_tokens_count += 1
if received_termin_tokens_count == len(file_openers):
pool.join()
break
else:
yield chunk
</DeepExtract>
else:
<DeepExtract>
parser_kwargs = self.adjust_kwargs_to_engine(self.parser_kwargs)
for file_opener in file_openers:
dc_iter = self.get_data_chunk_iter(file_opener, chunksize=self.chunk_size, **parser_kwargs)
for chunk in dc_iter:
yield chunk
</DeepExtract>
return chunk_iter
|
def _iter(self, data_path, ext='csv'):
"""
:param data_path: a string corresponding to a location of data
(file or folder). If list is provided, will assume
multiple data paths.
:return: generator of data-chunks.
"""
try:
validate_data_paths(data_path)
except Exception as e:
raise e
data_paths = listify(data_path)
file_openers = create_openers_of_valid_files(data_paths, ext=ext, encoding=self.parser_kwargs['encoding'])
if not file_openers:
raise ValueError("No valid files to open, please check the provided 'data_path' (%s). Note that files without the '%s' extension are ignored." % (data_paths, '.csv'))
if self.worker_threads_num > 1:
chunk_queue = Queue(maxsize=self.buffer_size)
parser_kwargs = self.adjust_kwargs_to_engine(self.parser_kwargs)
iter_creator = fun_partial(self.get_data_chunk_iter, chunksize=self.chunk_size, **parser_kwargs)
queue_populator = fun_partial(populate_queue_with_chunks, itr_creator=iter_creator, queue=chunk_queue)
pool = Pool(self.worker_threads_num)
pool.map_async(queue_populator, file_openers)
pool.close()
received_termin_tokens_count = 0
while True:
chunk = chunk_queue.get(timeout=self.timeout)
if isinstance(chunk, Exception):
raise chunk
if chunk == TERMINATION_TOKEN:
received_termin_tokens_count += 1
if received_termin_tokens_count == len(file_openers):
pool.join()
break
else:
yield chunk
else:
parser_kwargs = self.adjust_kwargs_to_engine(self.parser_kwargs)
for file_opener in file_openers:
dc_iter = self.get_data_chunk_iter(file_opener, chunksize=self.chunk_size, **parser_kwargs)
for chunk in dc_iter:
yield chunk
return chunk_iter
|
Copycat-abstractive-opinion-summarizer
|
positive
|
def storage_root(state: State, address: Address) -> Root:
"""
See `ethereum.dao_fork.state`.
"""
if state.tx_restore_points:
raise Exception('In a non-db transaction')
<DeepExtract>
if state.tx_restore_points:
raise Exception('In a non-db transaction')
for address in state.destroyed_accounts:
state.db.destroy_storage(address)
for (address, account) in state.dirty_accounts.items():
state.db.set_account(address, account)
for (address, storage) in state.dirty_storage.items():
for (key, value) in storage.items():
state.db.set_storage(address, key, value)
state.destroyed_accounts = set()
state.dirty_accounts.clear()
state.dirty_storage.clear()
</DeepExtract>
return state.db.storage_root(address)
|
def storage_root(state: State, address: Address) -> Root:
"""
See `ethereum.dao_fork.state`.
"""
if state.tx_restore_points:
raise Exception('In a non-db transaction')
if state.tx_restore_points:
raise Exception('In a non-db transaction')
for address in state.destroyed_accounts:
state.db.destroy_storage(address)
for (address, account) in state.dirty_accounts.items():
state.db.set_account(address, account)
for (address, storage) in state.dirty_storage.items():
for (key, value) in storage.items():
state.db.set_storage(address, key, value)
state.destroyed_accounts = set()
state.dirty_accounts.clear()
state.dirty_storage.clear()
return state.db.storage_root(address)
|
eth1.0-specs
|
positive
|
def _document_with_doctype(doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
<DeepExtract>
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(markup, builder=builder, **kwargs)
</DeepExtract>
return (doctype, soup)
|
def _document_with_doctype(doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(markup, builder=builder, **kwargs)
return (doctype, soup)
|
BeautifulSoup4
|
positive
|
def test_crispr_dsb_creates_operations(self):
s1 = 'agaaggtctggtagcgatgtagtcgatct'
s2 = 'gactaggtacgtagtcgtcaggtcagtca'
pam = 'cgg'
<DeepExtract>
g = Genome(name='Foo')
g.save()
for seq in sequences:
f = Fragment.create_with_sequence('Bar', seq, circular=False)
Genome_Fragment(genome=g, fragment=f, inherited=False).save()
try:
os.unlink(fragment_fasta_fn(f))
except OSError:
pass
build_all_genome_dbs(refresh=True)
g = Genome.objects.get(pk=g.id)
</DeepExtract>
guide = s1[-20:]
c = crispr_dsb(g, guide, 'ngg')
self.assertEquals(g.operation_set.count(), 0)
self.assertEquals(c.operation_set.count(), 1)
self.assertEquals(c.operation_set.all()[0].type, Operation.CRISPR_DSB[0])
|
def test_crispr_dsb_creates_operations(self):
s1 = 'agaaggtctggtagcgatgtagtcgatct'
s2 = 'gactaggtacgtagtcgtcaggtcagtca'
pam = 'cgg'
g = Genome(name='Foo')
g.save()
for seq in sequences:
f = Fragment.create_with_sequence('Bar', seq, circular=False)
Genome_Fragment(genome=g, fragment=f, inherited=False).save()
try:
os.unlink(fragment_fasta_fn(f))
except OSError:
pass
build_all_genome_dbs(refresh=True)
g = Genome.objects.get(pk=g.id)
guide = s1[-20:]
c = crispr_dsb(g, guide, 'ngg')
self.assertEquals(g.operation_set.count(), 0)
self.assertEquals(c.operation_set.count(), 1)
self.assertEquals(c.operation_set.all()[0].type, Operation.CRISPR_DSB[0])
|
edge
|
positive
|
def setup_classifiers():
rng = np.random.RandomState(123456)
<DeepExtract>
weights = [0.1, 0.2, 0.7]
(X, y) = make_classification(n_classes=3, n_samples=2000, n_informative=3, random_state=rng, weights=weights)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.33, random_state=rng)
scalar = StandardScaler()
X_train = scalar.fit_transform(X_train)
X_test = scalar.transform(X_test)
(X_train, X_dsel, y_train, y_dsel) = train_test_split(X_train, y_train, test_size=0.5, random_state=rng)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = (X_dsel, X_test, X_train, y_dsel, y_test, y_train)
</DeepExtract>
pool_classifiers = AdaBoostClassifier(random_state=rng)
pool_classifiers.fit(X_train, y_train)
return (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
|
def setup_classifiers():
rng = np.random.RandomState(123456)
weights = [0.1, 0.2, 0.7]
(X, y) = make_classification(n_classes=3, n_samples=2000, n_informative=3, random_state=rng, weights=weights)
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.33, random_state=rng)
scalar = StandardScaler()
X_train = scalar.fit_transform(X_train)
X_test = scalar.transform(X_test)
(X_train, X_dsel, y_train, y_dsel) = train_test_split(X_train, y_train, test_size=0.5, random_state=rng)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = (X_dsel, X_test, X_train, y_dsel, y_test, y_train)
pool_classifiers = AdaBoostClassifier(random_state=rng)
pool_classifiers.fit(X_train, y_train)
return (pool_classifiers, X_dsel, y_dsel, X_test, y_test)
|
DESlib
|
positive
|
@verbose(True, verbose_output=False, timeout=None, _str=None)
def analyze(self, data, uuid, _path, folder) -> bool:
"""
first logic to execute, this will check if malware folder exists or not
get details of the target file and move a temp version of it to a temp
folder that has the md5
"""
<DeepExtract>
self.malwarefarm = folder
if not self.malwarefarm.endswith(path.sep):
self.malwarefarm = self.malwarefarm + path.sep
if not path.isdir(self.malwarefarm):
mkdir(self.malwarefarm)
</DeepExtract>
<DeepExtract>
data['Details'] = deepcopy(self.datastruct)
temp_f = open(_path, 'rb').read()
open(_path, 'rb').read(4)
data['Details']['Properties'] = {'Name': path.basename(_path).lower(), 'md5': md5(temp_f).hexdigest(), 'sha1': sha1(temp_f).hexdigest(), 'sha256': sha256(temp_f).hexdigest(), 'ssdeep': hash_from_file(_path), 'size': convert_size(path.getsize(_path)), 'bytes': path.getsize(_path), 'mime': from_file(_path, mime=True), 'extension': guess_type(_path)[0], 'Entropy': get_entropy(temp_f)}
</DeepExtract>
<DeepExtract>
safename = ''.join([c for c in path.basename(_path) if match('[\\w\\.]', c)])
if len(safename) == 0:
safename = 'temp'
temp_md5 = data['Details']['Properties']['md5']
folder_path = self.malwarefarm + uuid + '_' + temp_md5
if path.exists(folder_path):
rmtree(folder_path)
mkdir(folder_path)
copyfile(_path, folder_path + path.sep + 'temp')
data['Location'] = {'Original': _path, 'File': folder_path + path.sep + 'temp', 'html': folder_path + path.sep + safename + '.html', 'json': folder_path + path.sep + safename + '.json', 'Folder': folder_path + path.sep + 'temp_unpacked'}
data['FilesDumps'] = {folder_path + path.sep + 'temp': open(_path, 'rb').read()}
</DeepExtract>
|
@verbose(True, verbose_output=False, timeout=None, _str=None)
def analyze(self, data, uuid, _path, folder) -> bool:
"""
first logic to execute, this will check if malware folder exists or not
get details of the target file and move a temp version of it to a temp
folder that has the md5
"""
self.malwarefarm = folder
if not self.malwarefarm.endswith(path.sep):
self.malwarefarm = self.malwarefarm + path.sep
if not path.isdir(self.malwarefarm):
mkdir(self.malwarefarm)
data['Details'] = deepcopy(self.datastruct)
temp_f = open(_path, 'rb').read()
open(_path, 'rb').read(4)
data['Details']['Properties'] = {'Name': path.basename(_path).lower(), 'md5': md5(temp_f).hexdigest(), 'sha1': sha1(temp_f).hexdigest(), 'sha256': sha256(temp_f).hexdigest(), 'ssdeep': hash_from_file(_path), 'size': convert_size(path.getsize(_path)), 'bytes': path.getsize(_path), 'mime': from_file(_path, mime=True), 'extension': guess_type(_path)[0], 'Entropy': get_entropy(temp_f)}
safename = ''.join([c for c in path.basename(_path) if match('[\\w\\.]', c)])
if len(safename) == 0:
safename = 'temp'
temp_md5 = data['Details']['Properties']['md5']
folder_path = self.malwarefarm + uuid + '_' + temp_md5
if path.exists(folder_path):
rmtree(folder_path)
mkdir(folder_path)
copyfile(_path, folder_path + path.sep + 'temp')
data['Location'] = {'Original': _path, 'File': folder_path + path.sep + 'temp', 'html': folder_path + path.sep + safename + '.html', 'json': folder_path + path.sep + safename + '.json', 'Folder': folder_path + path.sep + 'temp_unpacked'}
data['FilesDumps'] = {folder_path + path.sep + 'temp': open(_path, 'rb').read()}
</DeepExtract>
|
analyzer
|
positive
|
def load(self, conf, args=None):
"""
Discover and load all the rules as defined in the conf and args.
:param dict conf: Configuration dict
:param dict args: Arguments dict
:return: List of rules
:rtype: list
"""
names = []
use_rule = None if args is None else args.rule
rules = []
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
for rule_file in rule_files:
try:
<DeepExtract>
rule = self.load_yaml(rule_file)
self.load_options(rule, conf, rule_file, args)
self.load_modules(rule, args)
rule = rule
</DeepExtract>
if not rule:
logging.error('Invalid rule file skipped: %s' % rule_file)
continue
if 'is_enabled' in rule and (not rule['is_enabled']):
continue
if rule['name'] in names:
raise EAException('Duplicate rule named %s' % rule['name'])
except EAException as e:
raise EAException('Error loading file %s: %s' % (rule_file, e))
rules.append(rule)
names.append(rule['name'])
return rules
|
def load(self, conf, args=None):
"""
Discover and load all the rules as defined in the conf and args.
:param dict conf: Configuration dict
:param dict args: Arguments dict
:return: List of rules
:rtype: list
"""
names = []
use_rule = None if args is None else args.rule
rules = []
raise NotImplementedError()
for rule_file in rule_files:
try:
rule = self.load_yaml(rule_file)
self.load_options(rule, conf, rule_file, args)
self.load_modules(rule, args)
rule = rule
if not rule:
logging.error('Invalid rule file skipped: %s' % rule_file)
continue
if 'is_enabled' in rule and (not rule['is_enabled']):
continue
if rule['name'] in names:
raise EAException('Duplicate rule named %s' % rule['name'])
except EAException as e:
raise EAException('Error loading file %s: %s' % (rule_file, e))
rules.append(rule)
names.append(rule['name'])
return rules
|
elastalert
|
positive
|
def transform(self) -> Ensemble:
if not self.is_objective_supported():
raise ValueError(f"Unsupported objective '{self._objective}'")
<DeepExtract>
tree_table: pd.DataFrame = self._model.trees_to_dataframe()
transformed_trees = []
curr_tree: Optional[Any] = None
tree_nodes: List[TreeNode] = []
for (_, row) in tree_table.iterrows():
if row['Tree'] != curr_tree:
if len(tree_nodes) > 0:
transformed_trees.append(self.build_tree(tree_nodes))
curr_tree = row['Tree']
tree_nodes = []
tree_nodes.append(self.build_tree_node(row, curr_tree))
if len(tree_nodes) > 0:
transformed_trees.append(self.build_tree(tree_nodes))
if self._objective.partition(':')[0] == 'reg':
transformed_trees.append(self.build_base_score_stump())
forest = transformed_trees
</DeepExtract>
return Ensemble(feature_names=self._feature_names, trained_models=forest, output_aggregator=self.build_aggregator_output(), classification_labels=self._classification_labels, classification_weights=self._classification_weights, target_type=self.determine_target_type())
|
def transform(self) -> Ensemble:
if not self.is_objective_supported():
raise ValueError(f"Unsupported objective '{self._objective}'")
tree_table: pd.DataFrame = self._model.trees_to_dataframe()
transformed_trees = []
curr_tree: Optional[Any] = None
tree_nodes: List[TreeNode] = []
for (_, row) in tree_table.iterrows():
if row['Tree'] != curr_tree:
if len(tree_nodes) > 0:
transformed_trees.append(self.build_tree(tree_nodes))
curr_tree = row['Tree']
tree_nodes = []
tree_nodes.append(self.build_tree_node(row, curr_tree))
if len(tree_nodes) > 0:
transformed_trees.append(self.build_tree(tree_nodes))
if self._objective.partition(':')[0] == 'reg':
transformed_trees.append(self.build_base_score_stump())
forest = transformed_trees
return Ensemble(feature_names=self._feature_names, trained_models=forest, output_aggregator=self.build_aggregator_output(), classification_labels=self._classification_labels, classification_weights=self._classification_weights, target_type=self.determine_target_type())
|
eland
|
positive
|
def xorUtil(self, key, maxbits):
<DeepExtract>
curr = self.root
</DeepExtract>
for i in range(maxbits - 1, -1, -1):
bit = key >> i & 1
if curr.child[bit]:
curr = curr.child[bit]
elif curr.child[1 - bit]:
curr = curr.child[1 - bit]
return curr.value ^ key
|
def xorUtil(self, key, maxbits):
curr = self.root
for i in range(maxbits - 1, -1, -1):
bit = key >> i & 1
if curr.child[bit]:
curr = curr.child[bit]
elif curr.child[1 - bit]:
curr = curr.child[1 - bit]
return curr.value ^ key
|
Competitive_Programming
|
positive
|
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
lookahead = None
lookaheadstack = []
actions = self.action
goto = self.goto
prod = self.productions
defaulted_states = self.defaulted_states
pslice = YaccProduction(None)
errorcount = 0
if not lexer:
from ply import lex
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
if input is not None:
lexer.input(input)
if tokenfunc is None:
get_token = lexer.token
else:
get_token = tokenfunc
self.token = get_token
statestack = []
self.statestack = statestack
symstack = []
self.symstack = symstack
pslice.stack = symstack
errtoken = None
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token()
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
if errorcount:
errorcount -= 1
continue
if t < 0:
p = prod[-t]
pname = p.name
plen = p.len
sym = YaccSymbol()
sym.type = pname
sym.value = None
if plen:
targ = symstack[-plen - 1:]
targ[0] = sym
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
pslice.slice = targ
try:
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
lookaheadstack.append(lookahead)
symstack.extend(targ[1:-1])
statestack.pop()
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
else:
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
targ = [sym]
pslice.slice = targ
try:
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
lookaheadstack.append(lookahead)
statestack.pop()
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None
if self.errorfunc:
if errtoken and (not hasattr(errtoken, 'lexer')):
errtoken.lexer = lexer
self.state = state
<DeepExtract>
global _errok, _token, _restart
_errok = self.errok
_token = self.token
_restart = self.restart
r = self.errorfunc(errtoken)
try:
del _errok, _token, _restart
except NameError:
pass
tok = r
</DeepExtract>
if self.errorok:
lookahead = tok
errtoken = None
continue
elif errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
del lookaheadstack[:]
continue
if lookahead.type == '$end':
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
statestack.pop()
state = statestack[-1]
continue
raise RuntimeError('yacc: internal parser error!!!\n')
|
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
lookahead = None
lookaheadstack = []
actions = self.action
goto = self.goto
prod = self.productions
defaulted_states = self.defaulted_states
pslice = YaccProduction(None)
errorcount = 0
if not lexer:
from ply import lex
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
if input is not None:
lexer.input(input)
if tokenfunc is None:
get_token = lexer.token
else:
get_token = tokenfunc
self.token = get_token
statestack = []
self.statestack = statestack
symstack = []
self.symstack = symstack
pslice.stack = symstack
errtoken = None
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token()
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
if errorcount:
errorcount -= 1
continue
if t < 0:
p = prod[-t]
pname = p.name
plen = p.len
sym = YaccSymbol()
sym.type = pname
sym.value = None
if plen:
targ = symstack[-plen - 1:]
targ[0] = sym
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
pslice.slice = targ
try:
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
lookaheadstack.append(lookahead)
symstack.extend(targ[1:-1])
statestack.pop()
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
else:
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
targ = [sym]
pslice.slice = targ
try:
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
lookaheadstack.append(lookahead)
statestack.pop()
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None
if self.errorfunc:
if errtoken and (not hasattr(errtoken, 'lexer')):
errtoken.lexer = lexer
self.state = state
global _errok, _token, _restart
_errok = self.errok
_token = self.token
_restart = self.restart
r = self.errorfunc(errtoken)
try:
del _errok, _token, _restart
except NameError:
pass
tok = r
if self.errorok:
lookahead = tok
errtoken = None
continue
elif errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
del lookaheadstack[:]
continue
if lookahead.type == '$end':
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
statestack.pop()
state = statestack[-1]
continue
raise RuntimeError('yacc: internal parser error!!!\n')
|
demo2program
|
positive
|
def test_weave_readout():
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
<DeepExtract>
g = dgl.graph(([0, 0, 1], [1, 2, 2]))
(g, node_feats) = (g, torch.arange(g.num_nodes()).float().reshape(-1, 1))
</DeepExtract>
(g, node_feats) = (g.to(device), node_feats.to(device))
<DeepExtract>
g1 = dgl.graph(([0, 0, 1], [1, 2, 2]))
g2 = dgl.graph(([0, 1, 1, 1], [1, 2, 3, 4]))
bg = dgl.batch([g1, g2])
(bg, batch_node_feats) = (bg, torch.arange(bg.num_nodes()).float().reshape(-1, 1))
</DeepExtract>
(bg, batch_node_feats) = (bg.to(device), batch_node_feats.to(device))
model = WeaveGather(node_in_feats=1).to(device)
assert model(g, node_feats).shape == torch.Size([1, 1])
assert model(bg, batch_node_feats).shape == torch.Size([2, 1])
model = WeaveGather(node_in_feats=1, gaussian_expand=False).to(device)
assert model(g, node_feats).shape == torch.Size([1, 1])
assert model(bg, batch_node_feats).shape == torch.Size([2, 1])
|
def test_weave_readout():
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
g = dgl.graph(([0, 0, 1], [1, 2, 2]))
(g, node_feats) = (g, torch.arange(g.num_nodes()).float().reshape(-1, 1))
(g, node_feats) = (g.to(device), node_feats.to(device))
g1 = dgl.graph(([0, 0, 1], [1, 2, 2]))
g2 = dgl.graph(([0, 1, 1, 1], [1, 2, 3, 4]))
bg = dgl.batch([g1, g2])
(bg, batch_node_feats) = (bg, torch.arange(bg.num_nodes()).float().reshape(-1, 1))
(bg, batch_node_feats) = (bg.to(device), batch_node_feats.to(device))
model = WeaveGather(node_in_feats=1).to(device)
assert model(g, node_feats).shape == torch.Size([1, 1])
assert model(bg, batch_node_feats).shape == torch.Size([2, 1])
model = WeaveGather(node_in_feats=1, gaussian_expand=False).to(device)
assert model(g, node_feats).shape == torch.Size([1, 1])
assert model(bg, batch_node_feats).shape == torch.Size([2, 1])
|
dgl-lifesci
|
positive
|
def save(self, *args, update_volume_number_slug=True, **kwargs):
if update_volume_number_slug:
<DeepExtract>
self.volume_number_slug = slugify(self.volume_number)
</DeepExtract>
super().save(*args, **kwargs)
|
def save(self, *args, update_volume_number_slug=True, **kwargs):
if update_volume_number_slug:
self.volume_number_slug = slugify(self.volume_number)
super().save(*args, **kwargs)
|
capstone
|
positive
|
def adjust_poolsize(self, minthreads=None, maxthreads=None):
"""
adjust pool size
"""
if minthreads is None:
minthreads = self._min
if maxthreads is None:
maxthreads = self._max
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self._min = minthreads
self._max = maxthreads
if not self._started:
return
while self._workers > self._max:
<DeepExtract>
self._jobqueue.put(self._WORKER_STOP_SIGN)
self._workers -= 1
</DeepExtract>
while self._workers < self._min:
<DeepExtract>
self._workers += 1
name = 'PoolThread-%s-%s' % (self._name or id(self), self._workers)
new_thd = self._THREAD_FACTORY(target=self._worker, name=name)
if self._daemon_thread:
new_thd.daemon = True
self._threads.append(new_thd)
new_thd.start()
</DeepExtract>
<DeepExtract>
need_size = self._jobqueue.qsize() + len(self._working)
while self._workers < min(self._max, need_size):
self.start1worker()
</DeepExtract>
|
def adjust_poolsize(self, minthreads=None, maxthreads=None):
"""
adjust pool size
"""
if minthreads is None:
minthreads = self._min
if maxthreads is None:
maxthreads = self._max
assert minthreads >= 0, 'minimum is negative'
assert minthreads <= maxthreads, 'minimum is greater than maximum'
self._min = minthreads
self._max = maxthreads
if not self._started:
return
while self._workers > self._max:
self._jobqueue.put(self._WORKER_STOP_SIGN)
self._workers -= 1
while self._workers < self._min:
self._workers += 1
name = 'PoolThread-%s-%s' % (self._name or id(self), self._workers)
new_thd = self._THREAD_FACTORY(target=self._worker, name=name)
if self._daemon_thread:
new_thd.daemon = True
self._threads.append(new_thd)
new_thd.start()
need_size = self._jobqueue.qsize() + len(self._working)
while self._workers < min(self._max, need_size):
self.start1worker()
</DeepExtract>
|
CUP
|
positive
|
def parse_file(self, strfile, config, flags=None):
invar_props = []
ltl_props = []
if flags is None:
Logger.error('Module name not provided')
absstrfile = os.path.abspath(strfile)
print_level = 3
if not Logger.level(print_level):
saved_stdout = suppress_output()
<DeepExtract>
codeparser = VerilogCodeParser([absstrfile], preprocess_include=preprocess_include, preprocess_define=preprocess_define)
ast = codeparser.parse()
ast = ast
</DeepExtract>
if not Logger.level(print_level):
restore_output(saved_stdout)
if Logger.level(2):
timer = Logger.start_timer('encoding')
self.walker.config = config
hts = self.walker.walk(ast, flags[0])
self.abstract_clock_list = self.walker.abstract_clock_list
self.clock_list = self.walker.clock_list
if Logger.level(2):
Logger.get_timer(timer)
timer = Logger.start_timer('flattening')
hts.flatten()
if Logger.level(2):
Logger.get_timer(timer)
if config.zero_init:
ts = TS('zero-init')
assigns = []
for var in hts.state_vars:
if var.symbol_type().is_bv_type():
assigns.append(EqualsOrIff(var, BVZero(var.bv_width())))
if var.symbol_type().is_bool_type():
assigns.append(Not(var))
<DeepExtract>
assert len(args) == 2
ts.init = BVAnd(self.to_bv(args[0]), self.to_bv(args[1]))
</DeepExtract>
hts.add_ts(ts)
<DeepExtract>
assertvars = [v.symbol_name() for v in hts.vars if ASSERT_ST in v.symbol_name() or ASSERTI_ST in v.symbol_name()]
invar_props = []
ltl_props = []
print_line = False
def extract_lineno(line, linenum):
orig_lineno = re.search('/\\w.*\\(\\d+\\)', line)
if orig_lineno is None:
(invar_props, ltl_props) = (line, linenum)
(strfile, linenum) = (re.search('/\\w.*\\(', line), re.search('\\(\\d+\\)', line))
if strfile is None or linenum is None:
(invar_props, ltl_props) = (line, linenum)
strfile = strfile.group(0)[:-1]
linenum = int(linenum.group(0)[1:-1])
if print_line:
with open(strfile, 'r') as f:
line = f.readlines()[linenum - 1]
(invar_props, ltl_props) = (line, linenum)
if len(assertvars) > 0:
with open(strfile, 'r') as f:
lines = f.readlines()
for assertion in assertvars:
asserti = ASSERTI_ST in assertion
prefix = ASSERTI_ST if asserti else ASSERT_ST
linenum = int(assertion[assertion.find(prefix) + len(prefix):assertion.find(ASSERT_EN)])
(line, linenum) = extract_lineno(lines[linenum - 1], linenum)
line = re.sub(' +', ' ', line.strip())
lineprint = ', "%s"' % line if print_line else ''
if asserti:
ltl_props.append(('ImmediateAssertion_line_%d' % linenum, 'Immediate assertion at line %d%s' % (linenum, lineprint), 'F(%s)' % assertion))
else:
invar_props.append(('Assertion_line_%d' % linenum, 'Assertion at line %d%s' % (linenum, lineprint), assertion))
(invar_props, ltl_props) = (invar_props, ltl_props)
</DeepExtract>
return (hts, invar_props, ltl_props)
|
def parse_file(self, strfile, config, flags=None):
invar_props = []
ltl_props = []
if flags is None:
Logger.error('Module name not provided')
absstrfile = os.path.abspath(strfile)
print_level = 3
if not Logger.level(print_level):
saved_stdout = suppress_output()
codeparser = VerilogCodeParser([absstrfile], preprocess_include=preprocess_include, preprocess_define=preprocess_define)
ast = codeparser.parse()
ast = ast
if not Logger.level(print_level):
restore_output(saved_stdout)
if Logger.level(2):
timer = Logger.start_timer('encoding')
self.walker.config = config
hts = self.walker.walk(ast, flags[0])
self.abstract_clock_list = self.walker.abstract_clock_list
self.clock_list = self.walker.clock_list
if Logger.level(2):
Logger.get_timer(timer)
timer = Logger.start_timer('flattening')
hts.flatten()
if Logger.level(2):
Logger.get_timer(timer)
if config.zero_init:
ts = TS('zero-init')
assigns = []
for var in hts.state_vars:
if var.symbol_type().is_bv_type():
assigns.append(EqualsOrIff(var, BVZero(var.bv_width())))
if var.symbol_type().is_bool_type():
assigns.append(Not(var))
assert len(args) == 2
ts.init = BVAnd(self.to_bv(args[0]), self.to_bv(args[1]))
hts.add_ts(ts)
assertvars = [v.symbol_name() for v in hts.vars if ASSERT_ST in v.symbol_name() or ASSERTI_ST in v.symbol_name()]
invar_props = []
ltl_props = []
print_line = False
def extract_lineno(line, linenum):
orig_lineno = re.search('/\\w.*\\(\\d+\\)', line)
if orig_lineno is None:
(invar_props, ltl_props) = (line, linenum)
(strfile, linenum) = (re.search('/\\w.*\\(', line), re.search('\\(\\d+\\)', line))
if strfile is None or linenum is None:
(invar_props, ltl_props) = (line, linenum)
strfile = strfile.group(0)[:-1]
linenum = int(linenum.group(0)[1:-1])
if print_line:
with open(strfile, 'r') as f:
line = f.readlines()[linenum - 1]
(invar_props, ltl_props) = (line, linenum)
if len(assertvars) > 0:
with open(strfile, 'r') as f:
lines = f.readlines()
for assertion in assertvars:
asserti = ASSERTI_ST in assertion
prefix = ASSERTI_ST if asserti else ASSERT_ST
linenum = int(assertion[assertion.find(prefix) + len(prefix):assertion.find(ASSERT_EN)])
(line, linenum) = extract_lineno(lines[linenum - 1], linenum)
line = re.sub(' +', ' ', line.strip())
lineprint = ', "%s"' % line if print_line else ''
if asserti:
ltl_props.append(('ImmediateAssertion_line_%d' % linenum, 'Immediate assertion at line %d%s' % (linenum, lineprint), 'F(%s)' % assertion))
else:
invar_props.append(('Assertion_line_%d' % linenum, 'Assertion at line %d%s' % (linenum, lineprint), assertion))
(invar_props, ltl_props) = (invar_props, ltl_props)
return (hts, invar_props, ltl_props)
|
CoSA
|
positive
|
def compute_loss(likelihood):
gtl = torch.tensor(self.gt_likelihood).float().to(self.device)
if self.args.pm_loss == 'KL':
self.loss_ll = (gtl * torch.log(gtl / likelihood)).sum()
elif self.args.pm_loss == 'L1':
self.loss_ll = torch.abs(likelihood - gtl).sum()
if self.args.update_pm_by == 'GTL' or self.args.update_pm_by == 'BOTH':
if len(self.loss_likelihood) < self.args.pm_batch_size:
self.loss_likelihood.append(self.loss_ll)
if self.args.verbose > 2:
print('loss_likelihood', len(self.loss_likelihood))
if len(self.loss_likelihood) >= self.args.pm_batch_size:
<DeepExtract>
if self.args.update_pm_by == 'GTL' or self.args.update_pm_by == 'BOTH':
self.optimizer_pm.zero_grad()
(sum(self.loss_likelihood) / float(len(self.loss_likelihood))).backward(retain_graph=True)
self.optimizer_pm.step()
mean_test_loss = sum(self.loss_likelihood).item()
if self.args.schedule_pm:
self.scheduler_pm.step()
self.pm_backprop_cnt += 1
if self.args.save and self.pm_backprop_cnt % self.args.mdl_save_freq == 0:
torch.save(self.perceptual_model.state_dict(), self.pm_filepath)
print('perceptual model saved at %s.' % self.pm_filepath)
else:
return
if self.args.verbose > 0:
print('back_prop_pm done')
</DeepExtract>
self.loss_likelihood = []
del gtl
|
def compute_loss(likelihood):
gtl = torch.tensor(self.gt_likelihood).float().to(self.device)
if self.args.pm_loss == 'KL':
self.loss_ll = (gtl * torch.log(gtl / likelihood)).sum()
elif self.args.pm_loss == 'L1':
self.loss_ll = torch.abs(likelihood - gtl).sum()
if self.args.update_pm_by == 'GTL' or self.args.update_pm_by == 'BOTH':
if len(self.loss_likelihood) < self.args.pm_batch_size:
self.loss_likelihood.append(self.loss_ll)
if self.args.verbose > 2:
print('loss_likelihood', len(self.loss_likelihood))
if len(self.loss_likelihood) >= self.args.pm_batch_size:
if self.args.update_pm_by == 'GTL' or self.args.update_pm_by == 'BOTH':
self.optimizer_pm.zero_grad()
(sum(self.loss_likelihood) / float(len(self.loss_likelihood))).backward(retain_graph=True)
self.optimizer_pm.step()
mean_test_loss = sum(self.loss_likelihood).item()
if self.args.schedule_pm:
self.scheduler_pm.step()
self.pm_backprop_cnt += 1
if self.args.save and self.pm_backprop_cnt % self.args.mdl_save_freq == 0:
torch.save(self.perceptual_model.state_dict(), self.pm_filepath)
print('perceptual model saved at %s.' % self.pm_filepath)
else:
return
if self.args.verbose > 0:
print('back_prop_pm done')
self.loss_likelihood = []
del gtl
|
dal
|
positive
|
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
(data_time, batch_time) = (AverageMeter(), AverageMeter())
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats if l == 'tot' or opt.weights[l] > 0}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for (iter_id, batch) in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
(output, loss, loss_stats) = model_with_loss(batch, phase)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(epoch, iter_id, num_iters, phase=phase, total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(loss_stats[l].mean().item(), batch['image'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) |Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
<DeepExtract>
opt = self.opt
if 'pre_hm' in batch:
output.update({'pre_hm': batch['pre_hm']})
dets = fusion_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
dets_gt = batch['meta']['gt_det']
for i in range(1):
debugger = Debugger(opt=opt, dataset=data_loader.dataset)
img = batch['image'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip((img * data_loader.dataset.std + data_loader.dataset.mean) * 255.0, 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm', trans=self.opt.hm_transparency)
debugger.add_blend_img(img, gt, 'gt_hm', trans=self.opt.hm_transparency)
debugger.add_img(img, img_id='img')
if opt.pointcloud:
pc_2d = batch['pc_2d'][i].detach().cpu().numpy()
pc_3d = None
pc_N = batch['pc_N'][i].detach().cpu().numpy()
debugger.add_img(img, img_id='pc')
debugger.add_pointcloud(pc_2d, pc_N, img_id='pc')
if 'pc_hm' in opt.pc_feat_lvl:
channel = opt.pc_feat_channels['pc_hm']
pc_hm = debugger.gen_colormap(batch['pc_hm'][i][channel].unsqueeze(0).detach().cpu().numpy())
debugger.add_blend_img(img, pc_hm, 'pc_hm', trans=self.opt.hm_transparency)
if 'pc_dep' in opt.pc_feat_lvl:
channel = opt.pc_feat_channels['pc_dep']
pc_hm = batch['pc_hm'][i][channel].unsqueeze(0).detach().cpu().numpy()
pc_dep = debugger.add_overlay_img(img, pc_hm, 'pc_dep')
if 'pre_img' in batch:
pre_img = batch['pre_img'][i].detach().cpu().numpy().transpose(1, 2, 0)
pre_img = np.clip((pre_img * data_loader.dataset.std + data_loader.dataset.mean) * 255, 0, 255).astype(np.uint8)
debugger.add_img(pre_img, 'pre_img_pred')
debugger.add_img(pre_img, 'pre_img_gt')
if 'pre_hm' in batch:
pre_hm = debugger.gen_colormap(batch['pre_hm'][i].detach().cpu().numpy())
debugger.add_blend_img(pre_img, pre_hm, 'pre_hm', trans=self.opt.hm_transparency)
debugger.add_img(img, img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_img(img, img_id='out_pred_amodal')
debugger.add_img(img, img_id='out_gt_amodal')
for k in range(len(dets['scores'][i])):
if dets['scores'][i, k] > opt.vis_thresh:
debugger.add_coco_bbox(dets['bboxes'][i, k] * opt.down_ratio, dets['clses'][i, k], dets['scores'][i, k], img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(dets['bboxes_amodal'][i, k] * opt.down_ratio, dets['clses'][i, k], dets['scores'][i, k], img_id='out_pred_amodal')
if 'hps' in opt.heads and int(dets['clses'][i, k]) == 0:
debugger.add_coco_hp(dets['hps'][i, k] * opt.down_ratio, img_id='out_pred')
if 'tracking' in opt.heads:
debugger.add_arrow(dets['cts'][i][k] * opt.down_ratio, dets['tracking'][i][k] * opt.down_ratio, img_id='out_pred')
debugger.add_arrow(dets['cts'][i][k] * opt.down_ratio, dets['tracking'][i][k] * opt.down_ratio, img_id='pre_img_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt['scores'][i])):
if dets_gt['scores'][i][k] > opt.vis_thresh:
if 'dep' in dets_gt.keys():
dist = dets_gt['dep'][i][k]
if len(dist) > 1:
dist = dist[0]
else:
dist = -1
debugger.add_coco_bbox(dets_gt['bboxes'][i][k] * opt.down_ratio, dets_gt['clses'][i][k], dets_gt['scores'][i][k], img_id='out_gt', dist=dist)
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(dets_gt['bboxes_amodal'][i, k] * opt.down_ratio, dets_gt['clses'][i, k], dets_gt['scores'][i, k], img_id='out_gt_amodal')
if 'hps' in opt.heads and int(dets['clses'][i, k]) == 0:
debugger.add_coco_hp(dets_gt['hps'][i][k] * opt.down_ratio, img_id='out_gt')
if 'tracking' in opt.heads:
debugger.add_arrow(dets_gt['cts'][i][k] * opt.down_ratio, dets_gt['tracking'][i][k] * opt.down_ratio, img_id='out_gt')
debugger.add_arrow(dets_gt['cts'][i][k] * opt.down_ratio, dets_gt['tracking'][i][k] * opt.down_ratio, img_id='pre_img_gt')
if 'hm_hp' in opt.heads:
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp', trans=self.opt.hm_transparency)
debugger.add_blend_img(img, gt, 'gt_hmhp', trans=self.opt.hm_transparency)
if 'rot' in opt.heads and 'dim' in opt.heads and ('dep' in opt.heads):
dets_gt = {k: dets_gt[k].cpu().numpy() for k in dets_gt}
calib = batch['meta']['calib'].detach().numpy() if 'calib' in batch['meta'] else None
det_pred = generic_post_process(opt, dets, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
det_gt = generic_post_process(opt, dets_gt, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib, is_gt=True)
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_pred[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_pred')
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_gt[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_gt')
pc_3d = None
if opt.pointcloud:
pc_3d = batch['pc_3d'].cpu().numpy()
debugger.add_bird_views(det_pred[i], det_gt[i], vis_thresh=opt.vis_thresh, img_id='bird_pred_gt', pc_3d=pc_3d, show_velocity=opt.show_velocity)
debugger.add_bird_views([], det_gt[i], vis_thresh=opt.vis_thresh, img_id='bird_gt', pc_3d=pc_3d, show_velocity=opt.show_velocity)
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
</DeepExtract>
if phase == 'val' and (opt.run_dataset_eval or opt.eval):
meta = batch['meta']
dets = fusion_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
calib = meta['calib'].detach().numpy() if 'calib' in meta else None
dets = generic_post_process(opt, dets, meta['c'].cpu().numpy(), meta['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
result = []
for i in range(len(dets[0])):
if dets[0][i]['score'] > self.opt.out_thresh and all(dets[0][i]['dim'] > 0):
result.append(dets[0][i])
img_id = batch['meta']['img_id'].numpy().astype(np.int32)[0]
results[img_id] = result
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for (k, v) in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.0
return (ret, results)
|
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
(data_time, batch_time) = (AverageMeter(), AverageMeter())
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats if l == 'tot' or opt.weights[l] > 0}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for (iter_id, batch) in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
(output, loss, loss_stats) = model_with_loss(batch, phase)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(epoch, iter_id, num_iters, phase=phase, total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(loss_stats[l].mean().item(), batch['image'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) |Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
opt = self.opt
if 'pre_hm' in batch:
output.update({'pre_hm': batch['pre_hm']})
dets = fusion_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
dets_gt = batch['meta']['gt_det']
for i in range(1):
debugger = Debugger(opt=opt, dataset=data_loader.dataset)
img = batch['image'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip((img * data_loader.dataset.std + data_loader.dataset.mean) * 255.0, 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm', trans=self.opt.hm_transparency)
debugger.add_blend_img(img, gt, 'gt_hm', trans=self.opt.hm_transparency)
debugger.add_img(img, img_id='img')
if opt.pointcloud:
pc_2d = batch['pc_2d'][i].detach().cpu().numpy()
pc_3d = None
pc_N = batch['pc_N'][i].detach().cpu().numpy()
debugger.add_img(img, img_id='pc')
debugger.add_pointcloud(pc_2d, pc_N, img_id='pc')
if 'pc_hm' in opt.pc_feat_lvl:
channel = opt.pc_feat_channels['pc_hm']
pc_hm = debugger.gen_colormap(batch['pc_hm'][i][channel].unsqueeze(0).detach().cpu().numpy())
debugger.add_blend_img(img, pc_hm, 'pc_hm', trans=self.opt.hm_transparency)
if 'pc_dep' in opt.pc_feat_lvl:
channel = opt.pc_feat_channels['pc_dep']
pc_hm = batch['pc_hm'][i][channel].unsqueeze(0).detach().cpu().numpy()
pc_dep = debugger.add_overlay_img(img, pc_hm, 'pc_dep')
if 'pre_img' in batch:
pre_img = batch['pre_img'][i].detach().cpu().numpy().transpose(1, 2, 0)
pre_img = np.clip((pre_img * data_loader.dataset.std + data_loader.dataset.mean) * 255, 0, 255).astype(np.uint8)
debugger.add_img(pre_img, 'pre_img_pred')
debugger.add_img(pre_img, 'pre_img_gt')
if 'pre_hm' in batch:
pre_hm = debugger.gen_colormap(batch['pre_hm'][i].detach().cpu().numpy())
debugger.add_blend_img(pre_img, pre_hm, 'pre_hm', trans=self.opt.hm_transparency)
debugger.add_img(img, img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_img(img, img_id='out_pred_amodal')
debugger.add_img(img, img_id='out_gt_amodal')
for k in range(len(dets['scores'][i])):
if dets['scores'][i, k] > opt.vis_thresh:
debugger.add_coco_bbox(dets['bboxes'][i, k] * opt.down_ratio, dets['clses'][i, k], dets['scores'][i, k], img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(dets['bboxes_amodal'][i, k] * opt.down_ratio, dets['clses'][i, k], dets['scores'][i, k], img_id='out_pred_amodal')
if 'hps' in opt.heads and int(dets['clses'][i, k]) == 0:
debugger.add_coco_hp(dets['hps'][i, k] * opt.down_ratio, img_id='out_pred')
if 'tracking' in opt.heads:
debugger.add_arrow(dets['cts'][i][k] * opt.down_ratio, dets['tracking'][i][k] * opt.down_ratio, img_id='out_pred')
debugger.add_arrow(dets['cts'][i][k] * opt.down_ratio, dets['tracking'][i][k] * opt.down_ratio, img_id='pre_img_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt['scores'][i])):
if dets_gt['scores'][i][k] > opt.vis_thresh:
if 'dep' in dets_gt.keys():
dist = dets_gt['dep'][i][k]
if len(dist) > 1:
dist = dist[0]
else:
dist = -1
debugger.add_coco_bbox(dets_gt['bboxes'][i][k] * opt.down_ratio, dets_gt['clses'][i][k], dets_gt['scores'][i][k], img_id='out_gt', dist=dist)
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(dets_gt['bboxes_amodal'][i, k] * opt.down_ratio, dets_gt['clses'][i, k], dets_gt['scores'][i, k], img_id='out_gt_amodal')
if 'hps' in opt.heads and int(dets['clses'][i, k]) == 0:
debugger.add_coco_hp(dets_gt['hps'][i][k] * opt.down_ratio, img_id='out_gt')
if 'tracking' in opt.heads:
debugger.add_arrow(dets_gt['cts'][i][k] * opt.down_ratio, dets_gt['tracking'][i][k] * opt.down_ratio, img_id='out_gt')
debugger.add_arrow(dets_gt['cts'][i][k] * opt.down_ratio, dets_gt['tracking'][i][k] * opt.down_ratio, img_id='pre_img_gt')
if 'hm_hp' in opt.heads:
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp', trans=self.opt.hm_transparency)
debugger.add_blend_img(img, gt, 'gt_hmhp', trans=self.opt.hm_transparency)
if 'rot' in opt.heads and 'dim' in opt.heads and ('dep' in opt.heads):
dets_gt = {k: dets_gt[k].cpu().numpy() for k in dets_gt}
calib = batch['meta']['calib'].detach().numpy() if 'calib' in batch['meta'] else None
det_pred = generic_post_process(opt, dets, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
det_gt = generic_post_process(opt, dets_gt, batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib, is_gt=True)
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_pred[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_pred')
debugger.add_3d_detection(batch['meta']['img_path'][i], batch['meta']['flipped'][i], det_gt[i], calib[i], vis_thresh=opt.vis_thresh, img_id='add_gt')
pc_3d = None
if opt.pointcloud:
pc_3d = batch['pc_3d'].cpu().numpy()
debugger.add_bird_views(det_pred[i], det_gt[i], vis_thresh=opt.vis_thresh, img_id='bird_pred_gt', pc_3d=pc_3d, show_velocity=opt.show_velocity)
debugger.add_bird_views([], det_gt[i], vis_thresh=opt.vis_thresh, img_id='bird_gt', pc_3d=pc_3d, show_velocity=opt.show_velocity)
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
if phase == 'val' and (opt.run_dataset_eval or opt.eval):
meta = batch['meta']
dets = fusion_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
calib = meta['calib'].detach().numpy() if 'calib' in meta else None
dets = generic_post_process(opt, dets, meta['c'].cpu().numpy(), meta['s'].cpu().numpy(), output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes, calib)
result = []
for i in range(len(dets[0])):
if dets[0][i]['score'] > self.opt.out_thresh and all(dets[0][i]['dim'] > 0):
result.append(dets[0][i])
img_id = batch['meta']['img_id'].numpy().astype(np.int32)[0]
results[img_id] = result
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for (k, v) in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.0
return (ret, results)
|
CenterFusion
|
positive
|
def test_query(self):
m1 = MinHash()
m1.update('a'.encode('utf8'))
m1.update('b'.encode('utf8'))
m1.update('c'.encode('utf8'))
<DeepExtract>
d = 'abcdefghijklmnopqrstuvwxyz'
forest = MinHashLSHForest()
for i in range(len(d) - 2):
key = d[i]
m = MinHash()
j = i + 3
for s in d[i:j]:
m.update(s.encode('utf8'))
forest.add(key, m)
forest.index()
forest = forest
</DeepExtract>
result = forest.query(m1, 3)
self.assertTrue('a' in result)
self.assertTrue('b' in result)
self.assertTrue('c' in result)
m3 = MinHash(18)
self.assertRaises(ValueError, forest.query, m3, 1)
|
def test_query(self):
m1 = MinHash()
m1.update('a'.encode('utf8'))
m1.update('b'.encode('utf8'))
m1.update('c'.encode('utf8'))
d = 'abcdefghijklmnopqrstuvwxyz'
forest = MinHashLSHForest()
for i in range(len(d) - 2):
key = d[i]
m = MinHash()
j = i + 3
for s in d[i:j]:
m.update(s.encode('utf8'))
forest.add(key, m)
forest.index()
forest = forest
result = forest.query(m1, 3)
self.assertTrue('a' in result)
self.assertTrue('b' in result)
self.assertTrue('c' in result)
m3 = MinHash(18)
self.assertRaises(ValueError, forest.query, m3, 1)
|
datasketch
|
positive
|
def test_invalid_method(self):
"""Test invalid methods."""
class InvalidMethodClass(object):
def Test1(self, a):
pass
def Test2(self, a=None):
pass
def Test3(self, a, b):
pass
def Test4(self, a: Int, b: Str, c):
pass
def Test5(self, a: Int, b: Str, c) -> Int:
pass
def Test6(self, *arg):
pass
def Test7(self, **kwargs):
pass
def Test8(self, a: Int, b: Double, *, c, d=None):
pass
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test1, InvalidMethodClass.Test1.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test2, InvalidMethodClass.Test2.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test3, InvalidMethodClass.Test3.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test4, InvalidMethodClass.Test4.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'c'.")
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test5, InvalidMethodClass.Test5.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'c'.")
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test6, InvalidMethodClass.Test6.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test7, InvalidMethodClass.Test7.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
</DeepExtract>
<DeepExtract>
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test8, InvalidMethodClass.Test8.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
</DeepExtract>
|
def test_invalid_method(self):
"""Test invalid methods."""
class InvalidMethodClass(object):
def Test1(self, a):
pass
def Test2(self, a=None):
pass
def Test3(self, a, b):
pass
def Test4(self, a: Int, b: Str, c):
pass
def Test5(self, a: Int, b: Str, c) -> Int:
pass
def Test6(self, *arg):
pass
def Test7(self, **kwargs):
pass
def Test8(self, a: Int, b: Double, *, c, d=None):
pass
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test1, InvalidMethodClass.Test1.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test2, InvalidMethodClass.Test2.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test3, InvalidMethodClass.Test3.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'a'.")
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test4, InvalidMethodClass.Test4.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'c'.")
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test5, InvalidMethodClass.Test5.__name__)
self.assertEqual(str(cm.exception), "Undefined type of parameter 'c'.")
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test6, InvalidMethodClass.Test6.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test7, InvalidMethodClass.Test7.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
with self.assertRaises(DBusSpecificationError) as cm:
self.generator._generate_method(InvalidMethodClass.Test8, InvalidMethodClass.Test8.__name__)
self.assertEqual(str(cm.exception), 'Only positional or keyword arguments are allowed.')
</DeepExtract>
|
dasbus
|
positive
|
def test_iterator_hierarchy(self):
def _inner_inner():
return [n for n in range(0, 2)]
def _inner():
for i in range(0, 2):
yield _inner_inner()
<DeepExtract>
for i in range(0, 2):
yield _inner_inner()
</DeepExtract>
self.assertTrue(isinstance(g, types.GeneratorType))
c = itertools.chain(*g)
self.assertTrue(isinstance(c, itertools.chain))
rr = list(c)
self.assertTrue(isinstance(rr, list))
self.assertTrue(isinstance(rr[0], int))
|
def test_iterator_hierarchy(self):
def _inner_inner():
return [n for n in range(0, 2)]
def _inner():
for i in range(0, 2):
yield _inner_inner()
for i in range(0, 2):
yield _inner_inner()
self.assertTrue(isinstance(g, types.GeneratorType))
c = itertools.chain(*g)
self.assertTrue(isinstance(c, itertools.chain))
rr = list(c)
self.assertTrue(isinstance(rr, list))
self.assertTrue(isinstance(rr[0], int))
|
cassandra-medusa
|
positive
|
def activate(self, context):
if context.area.type == 'PROPERTIES':
self.handler = bpy.types.SpaceView3D.draw_handler_add(DrawNorth_callback, (self, context), 'WINDOW', 'POST_PIXEL')
self.isActive = True
<DeepExtract>
bpy.context.scene.cursor_location.x += 0.0
</DeepExtract>
return True
return False
|
def activate(self, context):
if context.area.type == 'PROPERTIES':
self.handler = bpy.types.SpaceView3D.draw_handler_add(DrawNorth_callback, (self, context), 'WINDOW', 'POST_PIXEL')
self.isActive = True
bpy.context.scene.cursor_location.x += 0.0
return True
return False
|
blender-architecture-scripts
|
positive
|
def pixelization(video_path: str, output_path: Optional[str]=None, ratio: float=1.0, metadata: Optional[List[Dict[str, Any]]]=None) -> str:
"""
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
assert ratio > 0, "Expected 'ratio' to be a positive number"
video_info = helpers.get_video_info(video_path)
(width, height) = (video_info['width'], video_info['height'])
output_path = output_path or video_path
<DeepExtract>
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
resize_aug = af.VideoAugmenterByResize(height * ratio, width * ratio)
resize_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='resize', **func_kwargs)
return output_path or video_path
</DeepExtract>
<DeepExtract>
func_kwargs = helpers.get_func_kwargs(metadata, locals(), output_path)
resize_aug = af.VideoAugmenterByResize(height, width)
resize_aug.add_augmenter(output_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='resize', **func_kwargs)
return output_path or output_path
</DeepExtract>
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='pixelization', **func_kwargs)
return output_path or video_path
|
def pixelization(video_path: str, output_path: Optional[str]=None, ratio: float=1.0, metadata: Optional[List[Dict[str, Any]]]=None) -> str:
"""
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
assert ratio > 0, "Expected 'ratio' to be a positive number"
video_info = helpers.get_video_info(video_path)
(width, height) = (video_info['width'], video_info['height'])
output_path = output_path or video_path
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
resize_aug = af.VideoAugmenterByResize(height * ratio, width * ratio)
resize_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='resize', **func_kwargs)
return output_path or video_path
func_kwargs = helpers.get_func_kwargs(metadata, locals(), output_path)
resize_aug = af.VideoAugmenterByResize(height, width)
resize_aug.add_augmenter(output_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='resize', **func_kwargs)
return output_path or output_path
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name='pixelization', **func_kwargs)
return output_path or video_path
|
AugLy
|
positive
|
def _selectObjects(self, objType, selector=None):
"""
Filters objects of the selected type with the specified selector,and returns results
:param objType: the type of object we are searching for
:type objType: string: (Vertex|Edge|Wire|Solid|Shell|Compound|CompSolid)
:return: a CQ object with the selected objects on the stack.
**Implementation Note**: This is the base implementation of the vertices,edges,faces,
solids,shells, and other similar selector methods. It is a useful extension point for
plugin developers to make other selector methods.
"""
<DeepExtract>
all = {}
for o in self.objects:
if objType == 'Solids' and isinstance(o, Solid) and (o.ShapeType() == 'Compound'):
for i in getattr(o, 'Compounds')():
all[i.hashCode()] = i
elif hasattr(o, objType):
for i in getattr(o, objType)():
all[i.hashCode()] = i
toReturn = list(all.values())
</DeepExtract>
if selector is not None:
try:
selectorObj = selectors.StringSyntaxSelector(selector)
except:
selectorObj = selector
toReturn = selectorObj.filter(toReturn)
return self.newObject(toReturn)
|
def _selectObjects(self, objType, selector=None):
"""
Filters objects of the selected type with the specified selector,and returns results
:param objType: the type of object we are searching for
:type objType: string: (Vertex|Edge|Wire|Solid|Shell|Compound|CompSolid)
:return: a CQ object with the selected objects on the stack.
**Implementation Note**: This is the base implementation of the vertices,edges,faces,
solids,shells, and other similar selector methods. It is a useful extension point for
plugin developers to make other selector methods.
"""
all = {}
for o in self.objects:
if objType == 'Solids' and isinstance(o, Solid) and (o.ShapeType() == 'Compound'):
for i in getattr(o, 'Compounds')():
all[i.hashCode()] = i
elif hasattr(o, objType):
for i in getattr(o, objType)():
all[i.hashCode()] = i
toReturn = list(all.values())
if selector is not None:
try:
selectorObj = selectors.StringSyntaxSelector(selector)
except:
selectorObj = selector
toReturn = selectorObj.filter(toReturn)
return self.newObject(toReturn)
|
cadquery
|
positive
|
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
<DeepExtract>
if mean_std is not None:
in_m = torch.from_numpy(mean_std[0])
in_s = torch.from_numpy(mean_std[1])
out_m = torch.from_numpy(mean_std[2])
out_s = torch.from_numpy(mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print('Input dim: {:d}'.format(in_dim))
print('Mean dim: {:d}'.format(in_m.shape[0]))
print('Std dim: {:d}'.format(in_s.shape[0]))
print('Input dimension incompatible')
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print('Output dim: {:d}'.format(out_dim))
print('Mean dim: {:d}'.format(out_m.shape[0]))
print('Std dim: {:d}'.format(out_s.shape[0]))
print('Output dimension incompatible')
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
(in_m, in_s, out_m, out_s) = (in_m, in_s, out_m, out_s)
</DeepExtract>
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
protocol_file = prj_conf.optional_argument[0]
<DeepExtract>
data_buffer = {}
try:
temp_buffer = np.loadtxt(protocol_file, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
except OSError:
print('Skip loading protocol file')
self.protocol_parser = data_buffer
</DeepExtract>
self.m_target_sr = 16000
self.frame_hops = [160]
self.frame_lens = [320]
self.fft_n = [1024]
self.lfcc_dim = [20]
self.lfcc_with_delta = True
self.lfcc_max_freq = 0.5
self.win = torch.hann_window
self.amp_floor = 1e-05
self.v_truncate_lens = [None for x in self.frame_hops]
self.v_submodels = len(self.frame_lens)
self.v_emd_dim = 1
self.m_transform = []
self.m_before_pooling = []
self.m_output_act = []
self.m_frontend = []
for (idx, (trunc_len, fft_n, lfcc_dim)) in enumerate(zip(self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(torch_nn.Sequential(torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7)))
self.m_before_pooling.append(torch_nn.Sequential(nii_nn.BLSTMLayer(lfcc_dim // 16 * 32, lfcc_dim // 16 * 32), nii_nn.BLSTMLayer(lfcc_dim // 16 * 32, lfcc_dim // 16 * 32)))
self.m_output_act.append(torch_nn.Linear(lfcc_dim // 16 * 32, self.v_emd_dim))
self.m_frontend.append(nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True, max_freq=self.lfcc_max_freq))
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
return
|
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
if mean_std is not None:
in_m = torch.from_numpy(mean_std[0])
in_s = torch.from_numpy(mean_std[1])
out_m = torch.from_numpy(mean_std[2])
out_s = torch.from_numpy(mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print('Input dim: {:d}'.format(in_dim))
print('Mean dim: {:d}'.format(in_m.shape[0]))
print('Std dim: {:d}'.format(in_s.shape[0]))
print('Input dimension incompatible')
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print('Output dim: {:d}'.format(out_dim))
print('Mean dim: {:d}'.format(out_m.shape[0]))
print('Std dim: {:d}'.format(out_s.shape[0]))
print('Output dimension incompatible')
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
(in_m, in_s, out_m, out_s) = (in_m, in_s, out_m, out_s)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
protocol_file = prj_conf.optional_argument[0]
data_buffer = {}
try:
temp_buffer = np.loadtxt(protocol_file, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
except OSError:
print('Skip loading protocol file')
self.protocol_parser = data_buffer
self.m_target_sr = 16000
self.frame_hops = [160]
self.frame_lens = [320]
self.fft_n = [1024]
self.lfcc_dim = [20]
self.lfcc_with_delta = True
self.lfcc_max_freq = 0.5
self.win = torch.hann_window
self.amp_floor = 1e-05
self.v_truncate_lens = [None for x in self.frame_hops]
self.v_submodels = len(self.frame_lens)
self.v_emd_dim = 1
self.m_transform = []
self.m_before_pooling = []
self.m_output_act = []
self.m_frontend = []
for (idx, (trunc_len, fft_n, lfcc_dim)) in enumerate(zip(self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(torch_nn.Sequential(torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(48, affine=False), torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch.nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(64, affine=False), torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]), nii_nn.MaxFeatureMap2D(), torch_nn.BatchNorm2d(32, affine=False), torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]), nii_nn.MaxFeatureMap2D(), torch_nn.MaxPool2d([2, 2], [2, 2]), torch_nn.Dropout(0.7)))
self.m_before_pooling.append(torch_nn.Sequential(nii_nn.BLSTMLayer(lfcc_dim // 16 * 32, lfcc_dim // 16 * 32), nii_nn.BLSTMLayer(lfcc_dim // 16 * 32, lfcc_dim // 16 * 32)))
self.m_output_act.append(torch_nn.Linear(lfcc_dim // 16 * 32, self.v_emd_dim))
self.m_frontend.append(nii_front_end.LFCC(self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx], self.m_target_sr, self.lfcc_dim[idx], with_energy=True, max_freq=self.lfcc_max_freq))
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
return
|
2021
|
positive
|
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
<DeepExtract>
attrs = self._fore + self._back * 16 + (self._style | self._light)
</DeepExtract>
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
|
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self._fore + self._back * 16 + (self._style | self._light)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
|
commix
|
positive
|
def __init__(self, cfg, in_channels, builder, arch_def):
super(FBNetRPNHead, self).__init__()
assert in_channels == builder.last_depth
rpn_bn_type = cfg.MODEL.FBNET.RPN_BN_TYPE
if len(rpn_bn_type) > 0:
builder.bn_type = rpn_bn_type
use_blocks = cfg.MODEL.FBNET.RPN_HEAD_BLOCKS
<DeepExtract>
rpn_stage = arch_def.get('rpn')
ret = mbuilder.get_blocks(arch_def, stage_indices=rpn_stage)
if use_blocks > 0:
logger.warn('Use last {} blocks in {} as rpn'.format(use_blocks, ret))
block_count = len(ret['stages'])
assert use_blocks <= block_count, 'use block {}, block count {}'.format(use_blocks, block_count)
blocks = range(block_count - use_blocks, block_count)
ret = mbuilder.get_blocks(ret, block_indices=blocks)
stages = ret['stages']
</DeepExtract>
self.head = builder.add_blocks(stages)
self.out_channels = builder.last_depth
|
def __init__(self, cfg, in_channels, builder, arch_def):
super(FBNetRPNHead, self).__init__()
assert in_channels == builder.last_depth
rpn_bn_type = cfg.MODEL.FBNET.RPN_BN_TYPE
if len(rpn_bn_type) > 0:
builder.bn_type = rpn_bn_type
use_blocks = cfg.MODEL.FBNET.RPN_HEAD_BLOCKS
rpn_stage = arch_def.get('rpn')
ret = mbuilder.get_blocks(arch_def, stage_indices=rpn_stage)
if use_blocks > 0:
logger.warn('Use last {} blocks in {} as rpn'.format(use_blocks, ret))
block_count = len(ret['stages'])
assert use_blocks <= block_count, 'use block {}, block count {}'.format(use_blocks, block_count)
blocks = range(block_count - use_blocks, block_count)
ret = mbuilder.get_blocks(ret, block_indices=blocks)
stages = ret['stages']
self.head = builder.add_blocks(stages)
self.out_channels = builder.last_depth
|
CenterMask
|
positive
|
def feed(self, aBuf, aLen):
if self._mDone:
return
i = self._mNeedToSkipCharNum
while i < aLen:
<DeepExtract>
(order, charLen) = (-1, 1)
</DeepExtract>
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if order != -1 and self._mLastCharOrder != -1:
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
|
def feed(self, aBuf, aLen):
if self._mDone:
return
i = self._mNeedToSkipCharNum
while i < aLen:
(order, charLen) = (-1, 1)
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if order != -1 and self._mLastCharOrder != -1:
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
|
crunchy-xml-decoder
|
positive
|
@main
def main(*args):
import argparse
parser = argparse.ArgumentParser(description='Run Recommendations', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-u', '--user', type=str, choices=USER_FILES, default='test_user', metavar='USER', help='user file, e.g.\n' + '{{{}}}'.format(','.join(sample(USER_FILES, 3))))
parser.add_argument('-k', '--k', type=int, help='for k-means')
parser.add_argument('-q', '--query', choices=CATEGORIES, metavar='QUERY', help='search for restaurants by category e.g.\n{{{}}}'.format(','.join(sample(CATEGORIES, 3))))
parser.add_argument('-p', '--predict', action='store_true', help='predict ratings for all restaurants')
parser.add_argument('-r', '--restaurants', action='store_true', help='outputs a list of restaurant names')
args = parser.parse_args()
if args.restaurants:
print('Restaurant names:')
for restaurant in sorted(ALL_RESTAURANTS, key=restaurant_name):
print(repr(restaurant_name(restaurant)))
exit(0)
if args.query:
<DeepExtract>
ALL_RESTAURANTS = [r for r in ALL_RESTAURANTS if args.query in restaurant_categories(r)]
</DeepExtract>
else:
restaurants = ALL_RESTAURANTS
assert args.user, 'A --user is required to draw a map'
user = load_user_file('{}.dat'.format(args.user))
if args.predict:
<DeepExtract>
predictor = best_predictor(user, ALL_RESTAURANTS, feature_set())
reviewed = user_reviewed_restaurants(user, restaurants)
ratings = {}
for r in restaurants:
name = restaurant_name(r)
if r in reviewed:
ratings[name] = user_rating(user, name)
else:
ratings[name] = predictor(r)
ratings = ratings
</DeepExtract>
else:
restaurants = user_reviewed_restaurants(user, restaurants)
names = [restaurant_name(r) for r in restaurants]
ratings = {name: user_rating(user, name) for name in names}
if args.k:
<DeepExtract>
assert len(restaurants) >= min(args.k, len(restaurants)), 'Not enough restaurants to cluster'
(old_centroids, n) = ([], 0)
centroids = [restaurant_location(r) for r in sample(restaurants, min(args.k, len(restaurants)))]
while old_centroids != centroids and n < max_updates:
old_centroids = centroids
clusters = group_by_centroid(restaurants, centroids)
centroids = [find_centroid(cluster) for cluster in clusters]
n += 1
centroids = centroids
</DeepExtract>
else:
centroids = [restaurant_location(r) for r in restaurants]
draw_map(centroids, restaurants, ratings)
|
@main
def main(*args):
import argparse
parser = argparse.ArgumentParser(description='Run Recommendations', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-u', '--user', type=str, choices=USER_FILES, default='test_user', metavar='USER', help='user file, e.g.\n' + '{{{}}}'.format(','.join(sample(USER_FILES, 3))))
parser.add_argument('-k', '--k', type=int, help='for k-means')
parser.add_argument('-q', '--query', choices=CATEGORIES, metavar='QUERY', help='search for restaurants by category e.g.\n{{{}}}'.format(','.join(sample(CATEGORIES, 3))))
parser.add_argument('-p', '--predict', action='store_true', help='predict ratings for all restaurants')
parser.add_argument('-r', '--restaurants', action='store_true', help='outputs a list of restaurant names')
args = parser.parse_args()
if args.restaurants:
print('Restaurant names:')
for restaurant in sorted(ALL_RESTAURANTS, key=restaurant_name):
print(repr(restaurant_name(restaurant)))
exit(0)
if args.query:
ALL_RESTAURANTS = [r for r in ALL_RESTAURANTS if args.query in restaurant_categories(r)]
else:
restaurants = ALL_RESTAURANTS
assert args.user, 'A --user is required to draw a map'
user = load_user_file('{}.dat'.format(args.user))
if args.predict:
predictor = best_predictor(user, ALL_RESTAURANTS, feature_set())
reviewed = user_reviewed_restaurants(user, restaurants)
ratings = {}
for r in restaurants:
name = restaurant_name(r)
if r in reviewed:
ratings[name] = user_rating(user, name)
else:
ratings[name] = predictor(r)
ratings = ratings
else:
restaurants = user_reviewed_restaurants(user, restaurants)
names = [restaurant_name(r) for r in restaurants]
ratings = {name: user_rating(user, name) for name in names}
if args.k:
assert len(restaurants) >= min(args.k, len(restaurants)), 'Not enough restaurants to cluster'
(old_centroids, n) = ([], 0)
centroids = [restaurant_location(r) for r in sample(restaurants, min(args.k, len(restaurants)))]
while old_centroids != centroids and n < max_updates:
old_centroids = centroids
clusters = group_by_centroid(restaurants, centroids)
centroids = [find_centroid(cluster) for cluster in clusters]
n += 1
centroids = centroids
else:
centroids = [restaurant_location(r) for r in restaurants]
draw_map(centroids, restaurants, ratings)
|
cs61a
|
positive
|
def getOrganizations(p_apiKey):
endpoint = '/organizations'
<DeepExtract>
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
query = '?' + urlencode(p_queryItems, True)
url = API_BASE_URL + endpoint + query
verb = 'GET'.upper()
session = NoRebuildAuthSession()
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb == 'GET':
r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'PUT':
if not p_requestBody is None:
if FLAG_REQUEST_VERBOSE:
print('body', p_requestBody)
r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'POST':
if not p_requestBody is None:
if FLAG_REQUEST_VERBOSE:
print('body', p_requestBody)
r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'DELETE':
r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, r.headers['Retry-After']))
time.sleep(int(r.headers['Retry-After']))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
</DeepExtract>
return (success, errors, headers, response)
|
def getOrganizations(p_apiKey):
endpoint = '/organizations'
if p_retry > API_MAX_RETRIES:
if FLAG_REQUEST_VERBOSE:
print('ERROR: Reached max retries')
(success, errors, headers, response) = (False, None, None, None)
bearerString = 'Bearer ' + str(p_apiKey)
headers = {'Authorization': bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ''
if not p_queryItems is None:
query = '?' + urlencode(p_queryItems, True)
url = API_BASE_URL + endpoint + query
verb = 'GET'.upper()
session = NoRebuildAuthSession()
try:
if FLAG_REQUEST_VERBOSE:
print(verb, url)
if verb == 'GET':
r = session.get(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'PUT':
if not p_requestBody is None:
if FLAG_REQUEST_VERBOSE:
print('body', p_requestBody)
r = session.put(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'POST':
if not p_requestBody is None:
if FLAG_REQUEST_VERBOSE:
print('body', p_requestBody)
r = session.post(url, headers=headers, json=p_requestBody, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
elif verb == 'DELETE':
r = session.delete(url, headers=headers, timeout=(API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT))
else:
(success, errors, headers, response) = (False, None, None, None)
except:
(success, errors, headers, response) = (False, None, None, None)
if FLAG_REQUEST_VERBOSE:
print(r.status_code)
success = r.status_code in range(200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
if FLAG_REQUEST_VERBOSE:
print('INFO: Hit max request rate. Retrying %s after %s seconds' % (p_retry + 1, r.headers['Retry-After']))
time.sleep(int(r.headers['Retry-After']))
(success, errors, responseHeaders, responseBody) = merakiRequest(p_apiKey, 'GET', endpoint, p_additionalHeaders, p_queryItems, p_requestBody, FLAG_REQUEST_VERBOSE, p_retry + 1)
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if 'errors' in rjson:
errors = rjson['errors']
if FLAG_REQUEST_VERBOSE:
print(errors)
else:
responseBody = rjson
if 'Link' in r.headers:
parsedLinks = utils.parse_header_links(r.headers['Link'])
for link in parsedLinks:
if link['rel'] == 'next':
if FLAG_REQUEST_VERBOSE:
print('Next page:', link['url'])
splitLink = link['url'].split('/api/v1')
(success, errors, responseHeaders, nextBody) = merakiRequest(p_apiKey, 'GET', splitLink[1], p_additionalHeaders=p_additionalHeaders, p_requestBody=p_requestBody, p_verbose=FLAG_REQUEST_VERBOSE)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
(success, errors, headers, response) = (success, errors, responseHeaders, responseBody)
return (success, errors, headers, response)
|
automation-scripts
|
positive
|
def test_register_new_exams(self):
"""
If an exam does not yet exist for content_id a new exam is created
"""
exam_data = [{'course_id': self.course_id, 'content_id': '123aaaa', 'exam_name': 'midterm1', 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}, {'course_id': self.course_id, 'content_id': '123zzzz', 'exam_name': 'midterm2', 'external_id': None, 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}, get_exam_by_id(self.exam.id)]
<DeepExtract>
response = self.client.patch(reverse('edx_proctoring:proctored_exam.register_exams_by_course_id', kwargs={'course_id': self.course_id}), exam_data, content_type='application/json')
</DeepExtract>
<DeepExtract>
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
for result in response.data:
self.assertGreater(result.get('exam_id'), 0)
</DeepExtract>
exams = get_all_exams_for_course(course_id=self.course_id, active_only=True)
expected_content_ids = [self.content_id, '123aaaa', '123zzzz']
actual_content_ids = [exam.get('content_id') for exam in exams]
self.assertEqual(expected_content_ids, actual_content_ids)
created_exam = ProctoredExam.get_exam_by_content_id(self.course_id, '123aaaa')
self.assertEqual(created_exam.exam_name, exam_data[0]['exam_name'])
self.assertEqual(created_exam.time_limit_mins, exam_data[0]['time_limit_mins'])
self.assertEqual(created_exam.is_proctored, exam_data[0]['is_proctored'])
self.assertEqual(created_exam.is_practice_exam, exam_data[0]['is_practice_exam'])
self.assertEqual(created_exam.hide_after_due, exam_data[0]['hide_after_due'])
self.assertEqual(created_exam.backend, exam_data[0]['backend'])
self.assertEqual(created_exam.external_id, None)
|
def test_register_new_exams(self):
"""
If an exam does not yet exist for content_id a new exam is created
"""
exam_data = [{'course_id': self.course_id, 'content_id': '123aaaa', 'exam_name': 'midterm1', 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}, {'course_id': self.course_id, 'content_id': '123zzzz', 'exam_name': 'midterm2', 'external_id': None, 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}, get_exam_by_id(self.exam.id)]
response = self.client.patch(reverse('edx_proctoring:proctored_exam.register_exams_by_course_id', kwargs={'course_id': self.course_id}), exam_data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 3)
for result in response.data:
self.assertGreater(result.get('exam_id'), 0)
exams = get_all_exams_for_course(course_id=self.course_id, active_only=True)
expected_content_ids = [self.content_id, '123aaaa', '123zzzz']
actual_content_ids = [exam.get('content_id') for exam in exams]
self.assertEqual(expected_content_ids, actual_content_ids)
created_exam = ProctoredExam.get_exam_by_content_id(self.course_id, '123aaaa')
self.assertEqual(created_exam.exam_name, exam_data[0]['exam_name'])
self.assertEqual(created_exam.time_limit_mins, exam_data[0]['time_limit_mins'])
self.assertEqual(created_exam.is_proctored, exam_data[0]['is_proctored'])
self.assertEqual(created_exam.is_practice_exam, exam_data[0]['is_practice_exam'])
self.assertEqual(created_exam.hide_after_due, exam_data[0]['hide_after_due'])
self.assertEqual(created_exam.backend, exam_data[0]['backend'])
self.assertEqual(created_exam.external_id, None)
|
edx-proctoring
|
positive
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Rul evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=50, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
<DeepExtract>
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
</DeepExtract>
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
<DeepExtract>
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format('dev' if False else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
examples = processor.get_dev_examples(args.data_dir) if False else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=bool(args.model_type in ['xlnet']), cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1, pad_on_left=bool(args.model_type in ['xlnet']), pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == 'classification':
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_dataset = dataset
</DeepExtract>
<DeepExtract>
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, 'labels': batch[3]}
ouputs = model(**inputs)
loss = ouputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step()
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
</DeepExtract>
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('pytorch_transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
<DeepExtract>
eval_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(global_step))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, 'labels': batch[3]}
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(global_step))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
result = results
</DeepExtract>
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
return results
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the .tsv files (or other data files) for the task.')
parser.add_argument('--model_type', default=None, type=str, required=True, help='Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))
parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help='Path to pre-trained model or shortcut name selected in the list: ' + ', '.join(ALL_MODELS))
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train selected in the list: ' + ', '.join(processors.keys()))
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--evaluate_during_training', action='store_true', help='Rul evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=-1, type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=50, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--local_rank', type=int, default=-1, help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if args.server_ip and args.server_port:
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device('cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu')
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError('Task not found: %s' % args.task_name)
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
processor = processors[args.task_name]()
output_mode = output_modes[args.task_name]
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format('dev' if False else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length), str(args.task_name)))
if os.path.exists(cached_features_file):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
examples = processor.get_dev_examples(args.data_dir) if False else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=bool(args.model_type in ['xlnet']), cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token, cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1, pad_on_left=bool(args.model_type in ['xlnet']), pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
if args.local_rank in [-1, 0]:
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == 'classification':
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == 'regression':
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_dataset = dataset
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=args.local_rank not in [-1, 0])
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=args.local_rank not in [-1, 0])
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, 'labels': batch[3]}
ouputs = model(**inputs)
loss = ouputs[0]
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step()
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and (global_step % args.logging_steps == 0):
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and (global_step % args.save_steps == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
(global_step, tr_loss) = (global_step, tr_loss / global_step)
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))))
logging.getLogger('pytorch_transformers.modeling_utils').setLevel(logging.WARN)
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ''
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
eval_task_names = ('mnli', 'mnli-mm') if args.task_name == 'mnli' else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == 'mnli' else (args.output_dir,)
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(global_step))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, 'labels': batch[3]}
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif args.output_mode == 'regression':
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(global_step))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
result = results
result = dict(((k + '_{}'.format(global_step), v) for (k, v) in result.items()))
results.update(result)
return results
|
AAAI_2020_CommonsenseQA
|
positive
|
def initialize_falist(path_file):
filename = path_file.split('/')[-1]
AIPS = pd.read_csv(path_file)
<DeepExtract>
jd_st1_st2 = list(zip(AIPS['jd'], AIPS['st1'], AIPS['st2']))
AIPS['baseline'] = list(map(lambda x: dictBase[jd2track2017(x[0])][x[1]] + dictBase[jd2track2017(x[0])][x[2]], jd_st1_st2))
AIPS = AIPS
</DeepExtract>
<DeepExtract>
h_m_s_doy = list(zip(AIPS['hour'].astype('int32'), AIPS['min'].astype('int32'), AIPS['sec'].astype('int32'), AIPS['us'].astype('int32'), AIPS['doy'].astype('int32')))
AIPS['datetime'] = list(map(lambda x: datetime.datetime(2017, 1, 1, x[0], x[1], x[2], x[3]) + datetime.timedelta(days=int(x[4]) - 1), h_m_s_doy))
AIPS = AIPS
</DeepExtract>
<DeepExtract>
AIPS['track'] = list(map(lambda x: jd2track2017(x), AIPS['jd']))
AIPS['expt_no'] = list(map(lambda x: jd2expt2017(x), AIPS['jd']))
AIPS = AIPS
</DeepExtract>
AIPS['vis'] = AIPS['amp'] * np.exp(1j * AIPS['phase'] * np.pi / 180)
AIPS['std'] = AIPS['amp']
AIPS = AIPS.groupby(('expt_no', 'track', 'datetime', 'baseline')).agg({'vis': np.mean, 'std': np.std, 'sigma': lambda x: np.std(x) / len(x), 'u': np.mean, 'v': np.mean})
AIPS['amp'] = np.abs(AIPS['vis'])
AIPS['phase'] = np.angle(AIPS['vis']) * 180 / np.pi
<DeepExtract>
sour = filename.split('.')[-3]
pol = filename.split('.')[-2]
AIPS['source'] = [sour] * AIPS.shape[0]
AIPS['polarization'] = [pol] * AIPS.shape[0]
AIPS = AIPS
</DeepExtract>
AIPS = AIPS.reset_index()
AIPS = AIPS[['datetime', 'baseline', 'source', 'amp', 'phase', 'sigma', 'std', 'polarization', 'track', 'expt_no', 'u', 'v']]
AIPS = AIPS.sort_values('datetime').reset_index(drop=True)
return AIPS
|
def initialize_falist(path_file):
filename = path_file.split('/')[-1]
AIPS = pd.read_csv(path_file)
jd_st1_st2 = list(zip(AIPS['jd'], AIPS['st1'], AIPS['st2']))
AIPS['baseline'] = list(map(lambda x: dictBase[jd2track2017(x[0])][x[1]] + dictBase[jd2track2017(x[0])][x[2]], jd_st1_st2))
AIPS = AIPS
h_m_s_doy = list(zip(AIPS['hour'].astype('int32'), AIPS['min'].astype('int32'), AIPS['sec'].astype('int32'), AIPS['us'].astype('int32'), AIPS['doy'].astype('int32')))
AIPS['datetime'] = list(map(lambda x: datetime.datetime(2017, 1, 1, x[0], x[1], x[2], x[3]) + datetime.timedelta(days=int(x[4]) - 1), h_m_s_doy))
AIPS = AIPS
AIPS['track'] = list(map(lambda x: jd2track2017(x), AIPS['jd']))
AIPS['expt_no'] = list(map(lambda x: jd2expt2017(x), AIPS['jd']))
AIPS = AIPS
AIPS['vis'] = AIPS['amp'] * np.exp(1j * AIPS['phase'] * np.pi / 180)
AIPS['std'] = AIPS['amp']
AIPS = AIPS.groupby(('expt_no', 'track', 'datetime', 'baseline')).agg({'vis': np.mean, 'std': np.std, 'sigma': lambda x: np.std(x) / len(x), 'u': np.mean, 'v': np.mean})
AIPS['amp'] = np.abs(AIPS['vis'])
AIPS['phase'] = np.angle(AIPS['vis']) * 180 / np.pi
sour = filename.split('.')[-3]
pol = filename.split('.')[-2]
AIPS['source'] = [sour] * AIPS.shape[0]
AIPS['polarization'] = [pol] * AIPS.shape[0]
AIPS = AIPS
AIPS = AIPS.reset_index()
AIPS = AIPS[['datetime', 'baseline', 'source', 'amp', 'phase', 'sigma', 'std', 'polarization', 'track', 'expt_no', 'u', 'v']]
AIPS = AIPS.sort_values('datetime').reset_index(drop=True)
return AIPS
|
eat
|
positive
|
def maybe_download_and_extract(data_dir):
train_dir = os.path.join(data_dir, 'train_32x32')
if not os.path.exists(train_dir):
train_url = 'http://image-net.org/small/train_32x32.tar'
filepath = os.path.join(data_dir, 'train_32x32.tar')
<DeepExtract>
filename = train_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
print(train_url)
(filepath, headers) = urllib.request.urlretrieve(train_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
</DeepExtract>
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir)
test_dir = os.path.join(data_dir, 'valid_32x32')
if not os.path.exists(test_dir):
test_url = 'http://image-net.org/small/valid_32x32.tar'
filepath = os.path.join(data_dir, 'valid_32x32.tar')
<DeepExtract>
filename = test_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
print(test_url)
(filepath, headers) = urllib.request.urlretrieve(test_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
</DeepExtract>
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir)
|
def maybe_download_and_extract(data_dir):
train_dir = os.path.join(data_dir, 'train_32x32')
if not os.path.exists(train_dir):
train_url = 'http://image-net.org/small/train_32x32.tar'
filepath = os.path.join(data_dir, 'train_32x32.tar')
filename = train_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
print(train_url)
(filepath, headers) = urllib.request.urlretrieve(train_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir)
test_dir = os.path.join(data_dir, 'valid_32x32')
if not os.path.exists(test_dir):
test_url = 'http://image-net.org/small/valid_32x32.tar'
filepath = os.path.join(data_dir, 'valid_32x32.tar')
filename = test_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
print(test_url)
(filepath, headers) = urllib.request.urlretrieve(test_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
print('unpacking the tar file', filepath)
tarfile.open(filepath, 'r').extractall(data_dir)
|
DualLearning
|
positive
|
def call(self, inputs, training=False):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
cls_index = None
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, 'Too many inputs.'
else:
hidden_states = inputs.get('hidden_states')
cls_index = inputs.get('cls_index', None)
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == 'cls_index':
<DeepExtract>
static = hidden_states.shape.as_list()
dynamic = tf.shape(hidden_states)
hidden_shape = [dynamic[i] if s is None else s for (i, s) in enumerate(static)]
</DeepExtract>
if cls_index is None:
cls_index = tf.fill(hidden_shape[:-2], hidden_shape[-2] - 1)
<DeepExtract>
static = cls_index.shape.as_list()
dynamic = tf.shape(cls_index)
cls_shape = [dynamic[i] if s is None else s for (i, s) in enumerate(static)]
</DeepExtract>
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = cls_index[..., tf.newaxis]
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(output, axis=len(hidden_shape) - 2)
elif self.summary_type == 'attn':
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
|
def call(self, inputs, training=False):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'cls_index' and cls_index is None:
we take the last token of the sequence as classification token
"""
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
cls_index = None
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, 'Too many inputs.'
else:
hidden_states = inputs.get('hidden_states')
cls_index = inputs.get('cls_index', None)
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == 'cls_index':
static = hidden_states.shape.as_list()
dynamic = tf.shape(hidden_states)
hidden_shape = [dynamic[i] if s is None else s for (i, s) in enumerate(static)]
if cls_index is None:
cls_index = tf.fill(hidden_shape[:-2], hidden_shape[-2] - 1)
static = cls_index.shape.as_list()
dynamic = tf.shape(cls_index)
cls_shape = [dynamic[i] if s is None else s for (i, s) in enumerate(static)]
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = cls_index[..., tf.newaxis]
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(output, axis=len(hidden_shape) - 2)
elif self.summary_type == 'attn':
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
|
COCON_ICLR2021
|
positive
|
def test_code_context(self):
for end_line_num in (None, END_LINE_NUM):
for prefix in ('', 'salchicha_'):
context = _code_context(FILE_PATH, LINE_NUM, end_line_num, prefix)
<DeepExtract>
expected_actual_line = ACTUAL_LINES[LINE_NUM - 1:end_line_num or LINE_NUM]
self.assertEqual(context[prefix + 'actual_line'], expected_actual_line)
</DeepExtract>
<DeepExtract>
expected_code = [line.strip('\n') for line in ACTUAL_LINES[0:LINE_NUM + 10]] + ['']
self.assertEqual(context[prefix + 'code'], expected_code)
</DeepExtract>
self.assertEqual(context[prefix + 'file_path'], FILE_PATH)
self.assertEqual(context[prefix + 'line_num'], LINE_NUM)
|
def test_code_context(self):
for end_line_num in (None, END_LINE_NUM):
for prefix in ('', 'salchicha_'):
context = _code_context(FILE_PATH, LINE_NUM, end_line_num, prefix)
expected_actual_line = ACTUAL_LINES[LINE_NUM - 1:end_line_num or LINE_NUM]
self.assertEqual(context[prefix + 'actual_line'], expected_actual_line)
expected_code = [line.strip('\n') for line in ACTUAL_LINES[0:LINE_NUM + 10]] + ['']
self.assertEqual(context[prefix + 'code'], expected_code)
self.assertEqual(context[prefix + 'file_path'], FILE_PATH)
self.assertEqual(context[prefix + 'line_num'], LINE_NUM)
|
django-silk
|
positive
|
def hessian_to_block_tree(hessian, f_tree, params_tree):
"""Convert a Hessian array to block-tree format.
Remark: In comparison to Jax we need this formatting function because we calculate
the second derivative using second-order finite differences. Jax computes the
second derivative by applying their jacobian function twice, which produces the
desired block-tree shape of the Hessian automatically. If we apply our first
derivative function twice we get the same block-tree shape.
Args:
hessian (np.ndarray): The Hessian, 2- or 3-dimensional array representation of
the resulting block-tree.
f_tree (pytree): The function evaluated at params_tree.
params_tree (pytree): The params_tree.
Returns:
hessian_block_tree (pytree): The pytree
"""
<DeepExtract>
extended_registry = get_registry(extended=True)
flat_f = tree_leaves(f_tree, registry=extended_registry)
flat_p = tree_leaves(params_tree, registry=extended_registry)
if len(flat_f) == 1:
relevant_hessian_shape = tuple((k for k in hessian.shape if k != 1))
if len(relevant_hessian_shape) == 0 and len(flat_p) != 1:
raise ValueError('Hessian dimension does not match those of params.')
if len(relevant_hessian_shape) == 2:
if relevant_hessian_shape != (len(flat_p), len(flat_p)):
raise ValueError('Hessian dimension does not match those of params.')
if len(relevant_hessian_shape) > 2:
raise ValueError('Hessian must be 0- or 2-d if f is scalar-valued.')
else:
if hessian.ndim != 3:
raise ValueError('Hessian must be 3d if f is multidimensional.')
if hessian.shape[0] != len(flat_f):
raise ValueError('First Hessian dimension does not match that of f.')
if hessian.shape[1:] != (len(flat_p), len(flat_p)):
raise ValueError('Last two Hessian dimensions do not match those of params.')
</DeepExtract>
if hessian.ndim == 2:
hessian = hessian[np.newaxis]
(flat_f, treedef_f) = tree_flatten(f_tree)
(flat_p, treedef_p) = tree_flatten(params_tree)
flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f]
flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p]
shapes_f = [np.shape(a) for a in flat_f_np]
shapes_p = [np.shape(a) for a in flat_p_np]
block_bounds_f = np.cumsum([int(np.product(s)) for s in shapes_f[:-1]])
block_bounds_p = np.cumsum([int(np.product(s)) for s in shapes_p[:-1]])
sub_block_trees = []
for (s0, subarr) in zip(shapes_f, np.split(hessian, block_bounds_f, axis=0)):
blocks = []
for (leaf_outer, s1, submat) in zip(flat_p, shapes_p, np.split(subarr, block_bounds_p, axis=1)):
row = []
for (leaf_inner, s2, block_values) in zip(flat_p, shapes_p, np.split(submat, block_bounds_p, axis=2)):
_shape = [k for k in (*s0, *s1, *s2) if k != 1]
raw_block = block_values.reshape(_shape)
<DeepExtract>
if np.ndim(raw_block) not in (1, 2):
block = raw_block
if not _is_pd_object(leaf_outer) and (not _is_pd_object(leaf_inner)):
block = raw_block
index1 = None if not _is_pd_object(leaf_outer) else leaf_outer.index
index2 = None if not _is_pd_object(leaf_inner) else leaf_inner.index
if np.ndim(raw_block) == 1:
out = pd.Series(raw_block, index=_select_non_none(index1, index2))
elif np.ndim(raw_block) == 2:
if np.isscalar(leaf_outer) or np.isscalar(leaf_inner):
if np.isscalar(leaf_outer):
(index, columns) = (leaf_inner.index, leaf_inner.columns)
elif np.isscalar(leaf_inner):
(index, columns) = (leaf_outer.index, leaf_outer.columns)
out = pd.DataFrame(raw_block, index=index, columns=columns)
else:
out = pd.DataFrame(raw_block, index=index1, columns=index2)
block = out
</DeepExtract>
row.append(block)
blocks.append(row)
block_tree = tree_unflatten(treedef_p, [tree_unflatten(treedef_p, row) for row in blocks])
sub_block_trees.append(block_tree)
hessian_block_tree = tree_unflatten(treedef_f, sub_block_trees)
return hessian_block_tree
|
def hessian_to_block_tree(hessian, f_tree, params_tree):
"""Convert a Hessian array to block-tree format.
Remark: In comparison to Jax we need this formatting function because we calculate
the second derivative using second-order finite differences. Jax computes the
second derivative by applying their jacobian function twice, which produces the
desired block-tree shape of the Hessian automatically. If we apply our first
derivative function twice we get the same block-tree shape.
Args:
hessian (np.ndarray): The Hessian, 2- or 3-dimensional array representation of
the resulting block-tree.
f_tree (pytree): The function evaluated at params_tree.
params_tree (pytree): The params_tree.
Returns:
hessian_block_tree (pytree): The pytree
"""
extended_registry = get_registry(extended=True)
flat_f = tree_leaves(f_tree, registry=extended_registry)
flat_p = tree_leaves(params_tree, registry=extended_registry)
if len(flat_f) == 1:
relevant_hessian_shape = tuple((k for k in hessian.shape if k != 1))
if len(relevant_hessian_shape) == 0 and len(flat_p) != 1:
raise ValueError('Hessian dimension does not match those of params.')
if len(relevant_hessian_shape) == 2:
if relevant_hessian_shape != (len(flat_p), len(flat_p)):
raise ValueError('Hessian dimension does not match those of params.')
if len(relevant_hessian_shape) > 2:
raise ValueError('Hessian must be 0- or 2-d if f is scalar-valued.')
else:
if hessian.ndim != 3:
raise ValueError('Hessian must be 3d if f is multidimensional.')
if hessian.shape[0] != len(flat_f):
raise ValueError('First Hessian dimension does not match that of f.')
if hessian.shape[1:] != (len(flat_p), len(flat_p)):
raise ValueError('Last two Hessian dimensions do not match those of params.')
if hessian.ndim == 2:
hessian = hessian[np.newaxis]
(flat_f, treedef_f) = tree_flatten(f_tree)
(flat_p, treedef_p) = tree_flatten(params_tree)
flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f]
flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p]
shapes_f = [np.shape(a) for a in flat_f_np]
shapes_p = [np.shape(a) for a in flat_p_np]
block_bounds_f = np.cumsum([int(np.product(s)) for s in shapes_f[:-1]])
block_bounds_p = np.cumsum([int(np.product(s)) for s in shapes_p[:-1]])
sub_block_trees = []
for (s0, subarr) in zip(shapes_f, np.split(hessian, block_bounds_f, axis=0)):
blocks = []
for (leaf_outer, s1, submat) in zip(flat_p, shapes_p, np.split(subarr, block_bounds_p, axis=1)):
row = []
for (leaf_inner, s2, block_values) in zip(flat_p, shapes_p, np.split(submat, block_bounds_p, axis=2)):
_shape = [k for k in (*s0, *s1, *s2) if k != 1]
raw_block = block_values.reshape(_shape)
if np.ndim(raw_block) not in (1, 2):
block = raw_block
if not _is_pd_object(leaf_outer) and (not _is_pd_object(leaf_inner)):
block = raw_block
index1 = None if not _is_pd_object(leaf_outer) else leaf_outer.index
index2 = None if not _is_pd_object(leaf_inner) else leaf_inner.index
if np.ndim(raw_block) == 1:
out = pd.Series(raw_block, index=_select_non_none(index1, index2))
elif np.ndim(raw_block) == 2:
if np.isscalar(leaf_outer) or np.isscalar(leaf_inner):
if np.isscalar(leaf_outer):
(index, columns) = (leaf_inner.index, leaf_inner.columns)
elif np.isscalar(leaf_inner):
(index, columns) = (leaf_outer.index, leaf_outer.columns)
out = pd.DataFrame(raw_block, index=index, columns=columns)
else:
out = pd.DataFrame(raw_block, index=index1, columns=index2)
block = out
row.append(block)
blocks.append(row)
block_tree = tree_unflatten(treedef_p, [tree_unflatten(treedef_p, row) for row in blocks])
sub_block_trees.append(block_tree)
hessian_block_tree = tree_unflatten(treedef_f, sub_block_trees)
return hessian_block_tree
|
estimagic
|
positive
|
def localization_setup(y2x_distances, batches):
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
<DeepExtract>
obs_coord = ind2sub(safe_eval(obs_inds, t))
y2x = pairwise_distances(obs_coord, state_coord, shape if periodic else None)
</DeepExtract>
if direction == 'x2y':
def obs_taperer(batch):
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return (batches, obs_taperer)
elif direction == 'y2x':
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
return localization_now
|
def localization_setup(y2x_distances, batches):
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
obs_coord = ind2sub(safe_eval(obs_inds, t))
y2x = pairwise_distances(obs_coord, state_coord, shape if periodic else None)
if direction == 'x2y':
def obs_taperer(batch):
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return (batches, obs_taperer)
elif direction == 'y2x':
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
return localization_now
|
DAPPER
|
positive
|
def _has_context(self, filename, file_set):
<DeepExtract>
fidx = get_idx(filename)
idxs = list(np.arange(-self.backward_context * self.strides, 0, self.strides)) + list(np.arange(0, self.forward_context * self.strides, self.strides) + self.strides)
context_paths = [self._change_idx(fidx + i, filename) for i in idxs]
</DeepExtract>
return all([f in file_set for f in context_paths])
|
def _has_context(self, filename, file_set):
fidx = get_idx(filename)
idxs = list(np.arange(-self.backward_context * self.strides, 0, self.strides)) + list(np.arange(0, self.forward_context * self.strides, self.strides) + self.strides)
context_paths = [self._change_idx(fidx + i, filename) for i in idxs]
return all([f in file_set for f in context_paths])
|
dro-sfm
|
positive
|
def place_trajectory_in_city_frame(traj_label: TrajectoryLabel, log_id: str) -> np.ndarray:
"""Place trajectory in the city frame
Args:
traj_label (TrajectoryLabel): instance of the TrajectoryLabel class.
log_id (str): Log id.
Returns:
- traj_city_fr: trajectory length of NUM_CUBOID_VERTS (x,y,z) coords per cuboid.
"""
seq_len = traj_label.timestamps.shape[0]
if self.bboxes_3d:
NUM_CUBOID_VERTS = 8
else:
NUM_CUBOID_VERTS = 4
traj_city_fr = np.zeros((seq_len, NUM_CUBOID_VERTS, 3))
rand_color = (float(np.random.rand()), float(np.random.rand()), float(np.random.rand()))
logger.info(f'On log {log_id} with {traj_label.track_uuid}')
for t in range(seq_len):
obj_label_rec = ObjectLabelRecord(quaternion=traj_label.quaternions[t], translation=traj_label.translations[t], length=traj_label.max_length, width=traj_label.max_width, height=traj_label.max_height, occlusion=traj_label.occlusion[t])
timestamp = int(traj_label.timestamps[t])
if self.bboxes_3d:
bbox_ego_frame = obj_label_rec.as_3d_bbox()
else:
bbox_ego_frame = obj_label_rec.as_2d_bbox()
<DeepExtract>
city_to_egovehicle_se3 = get_city_SE3_egovehicle_at_sensor_t(timestamp, self.dataset_dir, log_id)
if city_to_egovehicle_se3 is None:
raise RuntimeError(f'Could not get city to egovehicle coordinate transformation at timestamp {timestamp}')
bbox_city_fr = city_to_egovehicle_se3.transform_point_cloud(bbox_ego_frame)
pose_city_to_ego = {'rotation': city_to_egovehicle_se3.rotation, 'translation': city_to_egovehicle_se3.translation}
(bbox_city_fr, pose_city_to_ego) = (bbox_city_fr, pose_city_to_ego)
</DeepExtract>
if bbox_city_fr is None:
logger.warning(f'\t {log_id}: Couldnt find the pose for {traj_label.track_uuid}!')
continue
self.log_egopose_dict[log_id][timestamp] = pose_city_to_ego
frame_rec = FrameRecord(bbox_city_fr=bbox_city_fr, bbox_ego_frame=bbox_ego_frame, occlusion_val=obj_label_rec.occlusion, color=rand_color, track_uuid=traj_label.track_uuid, obj_class_str=traj_label.obj_class_str)
self.log_timestamp_dict[log_id].setdefault(timestamp, []).append(frame_rec)
traj_city_fr[t] = bbox_city_fr
return traj_city_fr
|
def place_trajectory_in_city_frame(traj_label: TrajectoryLabel, log_id: str) -> np.ndarray:
"""Place trajectory in the city frame
Args:
traj_label (TrajectoryLabel): instance of the TrajectoryLabel class.
log_id (str): Log id.
Returns:
- traj_city_fr: trajectory length of NUM_CUBOID_VERTS (x,y,z) coords per cuboid.
"""
seq_len = traj_label.timestamps.shape[0]
if self.bboxes_3d:
NUM_CUBOID_VERTS = 8
else:
NUM_CUBOID_VERTS = 4
traj_city_fr = np.zeros((seq_len, NUM_CUBOID_VERTS, 3))
rand_color = (float(np.random.rand()), float(np.random.rand()), float(np.random.rand()))
logger.info(f'On log {log_id} with {traj_label.track_uuid}')
for t in range(seq_len):
obj_label_rec = ObjectLabelRecord(quaternion=traj_label.quaternions[t], translation=traj_label.translations[t], length=traj_label.max_length, width=traj_label.max_width, height=traj_label.max_height, occlusion=traj_label.occlusion[t])
timestamp = int(traj_label.timestamps[t])
if self.bboxes_3d:
bbox_ego_frame = obj_label_rec.as_3d_bbox()
else:
bbox_ego_frame = obj_label_rec.as_2d_bbox()
city_to_egovehicle_se3 = get_city_SE3_egovehicle_at_sensor_t(timestamp, self.dataset_dir, log_id)
if city_to_egovehicle_se3 is None:
raise RuntimeError(f'Could not get city to egovehicle coordinate transformation at timestamp {timestamp}')
bbox_city_fr = city_to_egovehicle_se3.transform_point_cloud(bbox_ego_frame)
pose_city_to_ego = {'rotation': city_to_egovehicle_se3.rotation, 'translation': city_to_egovehicle_se3.translation}
(bbox_city_fr, pose_city_to_ego) = (bbox_city_fr, pose_city_to_ego)
if bbox_city_fr is None:
logger.warning(f'\t {log_id}: Couldnt find the pose for {traj_label.track_uuid}!')
continue
self.log_egopose_dict[log_id][timestamp] = pose_city_to_ego
frame_rec = FrameRecord(bbox_city_fr=bbox_city_fr, bbox_ego_frame=bbox_ego_frame, occlusion_val=obj_label_rec.occlusion, color=rand_color, track_uuid=traj_label.track_uuid, obj_class_str=traj_label.obj_class_str)
self.log_timestamp_dict[log_id].setdefault(timestamp, []).append(frame_rec)
traj_city_fr[t] = bbox_city_fr
return traj_city_fr
|
argoverse-api
|
positive
|
def get_energy_dependent_integration_weights(spin, energy):
integration_weights = np.zeros(self._ir_weights_shape[spin])
<DeepExtract>
max_energies = self.max_tetrahedra_energies[spin]
min_energies = self.min_tetrahedra_energies[spin]
if band_idx is not None:
mask = np.full_like(max_energies, False, dtype=bool)
mask[band_idx] = True
tetrahedra_mask = (min_energies < energy) & (max_energies > energy) & mask
else:
tetrahedra_mask = (min_energies < energy) & (max_energies > energy)
</DeepExtract>
if not np.any(tetrahedra_mask):
return integration_weights
energies = self.ir_tetrahedra_energies[spin][tetrahedra_mask]
e21 = self.e21[spin][tetrahedra_mask]
e31 = self.e31[spin][tetrahedra_mask]
e41 = self.e41[spin][tetrahedra_mask]
e32 = self.e32[spin][tetrahedra_mask]
e42 = self.e42[spin][tetrahedra_mask]
e43 = self.e43[spin][tetrahedra_mask]
cond_a_mask = (energies[:, 0] < energy) & (energy < energies[:, 1])
cond_b_mask = (energies[:, 1] <= energy) & (energy < energies[:, 2])
cond_c_mask = (energies[:, 2] <= energy) & (energy < energies[:, 3])
ee1 = energy - energies[:, 0]
ee2 = energy - energies[:, 1]
ee3 = energy - energies[:, 2]
e2e = energies[:, 1] - energy
e3e = energies[:, 2] - energy
e4e = energies[:, 3] - energy
kpoints_idx = self.ir_tetrahedra[spin][tetrahedra_mask]
ir_kpoints_idx = self.ir_kpoint_mapping[kpoints_idx]
vert_weights = np.zeros_like(energies)
<DeepExtract>
c = ee1[cond_a_mask] ** 2 / (e21[cond_a_mask] * e31[cond_a_mask] * e41[cond_a_mask])
i1 = c * (e2e[cond_a_mask] / e21[cond_a_mask] + e3e[cond_a_mask] / e31[cond_a_mask] + e4e[cond_a_mask] / e41[cond_a_mask])
i2 = c * (ee1[cond_a_mask] / e21[cond_a_mask])
i3 = c * (ee1[cond_a_mask] / e31[cond_a_mask])
i4 = c * (ee1[cond_a_mask] / e41[cond_a_mask])
vert_weights[cond_a_mask] = np.stack([i1, i2, i3, i4], axis=1)
</DeepExtract>
<DeepExtract>
c = ee1[cond_b_mask] * e4e[cond_b_mask] / (e31[cond_b_mask] * e41[cond_b_mask] * e42[cond_b_mask])
x = e3e[cond_b_mask] / e31[cond_b_mask]
y = e4e[cond_b_mask] / e42[cond_b_mask]
z = ee2[cond_b_mask] / (e32[cond_b_mask] * e42[cond_b_mask])
zx = z * x
k = ee1[cond_b_mask] / e31[cond_b_mask]
n = ee2[cond_b_mask] / e42[cond_b_mask]
i1 = c * (x + e4e[cond_b_mask] / e41[cond_b_mask]) + z * x ** 2
i2 = c * y + zx * (e3e[cond_b_mask] / e32[cond_b_mask] + y)
i3 = c * k + zx * (k + ee2[cond_b_mask] / e32[cond_b_mask])
i4 = c * (ee1[cond_b_mask] / e41[cond_b_mask] + n) + zx * n
vert_weights[cond_b_mask] = np.stack([i1, i2, i3, i4], axis=1)
</DeepExtract>
<DeepExtract>
c = e4e[cond_c_mask] ** 2 / (e41[cond_c_mask] * e42[cond_c_mask] * e43[cond_c_mask])
i1 = c * e4e[cond_c_mask] / e41[cond_c_mask]
i2 = c * e4e[cond_c_mask] / e42[cond_c_mask]
i3 = c * e4e[cond_c_mask] / e43[cond_c_mask]
i4 = c * (ee1[cond_c_mask] / e41[cond_c_mask] + ee2[cond_c_mask] / e42[cond_c_mask] + ee3[cond_c_mask] / e43[cond_c_mask])
vert_weights[cond_c_mask] = np.stack([i1, i2, i3, i4], axis=1)
</DeepExtract>
(band_idx, tetrahedra_idx) = np.where(tetrahedra_mask)
vert_weights *= self.ir_tetrahedra_weights[tetrahedra_idx][:, None]
flat_ir_kpoints = np.ravel(ir_kpoints_idx)
flat_ir_weights = np.ravel(vert_weights)
flat_bands = np.repeat(band_idx, 4)
np.add.at(integration_weights, (flat_bands, flat_ir_kpoints), flat_ir_weights)
integration_weights *= self._tetrahedron_volume / self.ir_kpoint_weights[None, :]
return integration_weights
|
def get_energy_dependent_integration_weights(spin, energy):
integration_weights = np.zeros(self._ir_weights_shape[spin])
max_energies = self.max_tetrahedra_energies[spin]
min_energies = self.min_tetrahedra_energies[spin]
if band_idx is not None:
mask = np.full_like(max_energies, False, dtype=bool)
mask[band_idx] = True
tetrahedra_mask = (min_energies < energy) & (max_energies > energy) & mask
else:
tetrahedra_mask = (min_energies < energy) & (max_energies > energy)
if not np.any(tetrahedra_mask):
return integration_weights
energies = self.ir_tetrahedra_energies[spin][tetrahedra_mask]
e21 = self.e21[spin][tetrahedra_mask]
e31 = self.e31[spin][tetrahedra_mask]
e41 = self.e41[spin][tetrahedra_mask]
e32 = self.e32[spin][tetrahedra_mask]
e42 = self.e42[spin][tetrahedra_mask]
e43 = self.e43[spin][tetrahedra_mask]
cond_a_mask = (energies[:, 0] < energy) & (energy < energies[:, 1])
cond_b_mask = (energies[:, 1] <= energy) & (energy < energies[:, 2])
cond_c_mask = (energies[:, 2] <= energy) & (energy < energies[:, 3])
ee1 = energy - energies[:, 0]
ee2 = energy - energies[:, 1]
ee3 = energy - energies[:, 2]
e2e = energies[:, 1] - energy
e3e = energies[:, 2] - energy
e4e = energies[:, 3] - energy
kpoints_idx = self.ir_tetrahedra[spin][tetrahedra_mask]
ir_kpoints_idx = self.ir_kpoint_mapping[kpoints_idx]
vert_weights = np.zeros_like(energies)
c = ee1[cond_a_mask] ** 2 / (e21[cond_a_mask] * e31[cond_a_mask] * e41[cond_a_mask])
i1 = c * (e2e[cond_a_mask] / e21[cond_a_mask] + e3e[cond_a_mask] / e31[cond_a_mask] + e4e[cond_a_mask] / e41[cond_a_mask])
i2 = c * (ee1[cond_a_mask] / e21[cond_a_mask])
i3 = c * (ee1[cond_a_mask] / e31[cond_a_mask])
i4 = c * (ee1[cond_a_mask] / e41[cond_a_mask])
vert_weights[cond_a_mask] = np.stack([i1, i2, i3, i4], axis=1)
c = ee1[cond_b_mask] * e4e[cond_b_mask] / (e31[cond_b_mask] * e41[cond_b_mask] * e42[cond_b_mask])
x = e3e[cond_b_mask] / e31[cond_b_mask]
y = e4e[cond_b_mask] / e42[cond_b_mask]
z = ee2[cond_b_mask] / (e32[cond_b_mask] * e42[cond_b_mask])
zx = z * x
k = ee1[cond_b_mask] / e31[cond_b_mask]
n = ee2[cond_b_mask] / e42[cond_b_mask]
i1 = c * (x + e4e[cond_b_mask] / e41[cond_b_mask]) + z * x ** 2
i2 = c * y + zx * (e3e[cond_b_mask] / e32[cond_b_mask] + y)
i3 = c * k + zx * (k + ee2[cond_b_mask] / e32[cond_b_mask])
i4 = c * (ee1[cond_b_mask] / e41[cond_b_mask] + n) + zx * n
vert_weights[cond_b_mask] = np.stack([i1, i2, i3, i4], axis=1)
c = e4e[cond_c_mask] ** 2 / (e41[cond_c_mask] * e42[cond_c_mask] * e43[cond_c_mask])
i1 = c * e4e[cond_c_mask] / e41[cond_c_mask]
i2 = c * e4e[cond_c_mask] / e42[cond_c_mask]
i3 = c * e4e[cond_c_mask] / e43[cond_c_mask]
i4 = c * (ee1[cond_c_mask] / e41[cond_c_mask] + ee2[cond_c_mask] / e42[cond_c_mask] + ee3[cond_c_mask] / e43[cond_c_mask])
vert_weights[cond_c_mask] = np.stack([i1, i2, i3, i4], axis=1)
(band_idx, tetrahedra_idx) = np.where(tetrahedra_mask)
vert_weights *= self.ir_tetrahedra_weights[tetrahedra_idx][:, None]
flat_ir_kpoints = np.ravel(ir_kpoints_idx)
flat_ir_weights = np.ravel(vert_weights)
flat_bands = np.repeat(band_idx, 4)
np.add.at(integration_weights, (flat_bands, flat_ir_kpoints), flat_ir_weights)
integration_weights *= self._tetrahedron_volume / self.ir_kpoint_weights[None, :]
return integration_weights
|
amset
|
positive
|
def test_plugins_download_base_prepare_download_dir_permission(self):
"""Download._prepare_download must check output directory permissions"""
if os.name == 'nt':
self.skipTest('windows permissions too complex to set for this test')
<DeepExtract>
plugin = self.plugins_manager.get_download_plugin(self.product)
</DeepExtract>
self.product.location = self.product.remote_location = 'somewhere'
outdir = TemporaryDirectory()
os.chmod(outdir.name, stat.S_IREAD)
with self.assertLogs(level='WARNING') as cm:
(fs_path, _) = plugin._prepare_download(self.product, outputs_prefix=outdir.name)
self.assertIn('Unable to create records directory', str(cm.output))
|
def test_plugins_download_base_prepare_download_dir_permission(self):
"""Download._prepare_download must check output directory permissions"""
if os.name == 'nt':
self.skipTest('windows permissions too complex to set for this test')
plugin = self.plugins_manager.get_download_plugin(self.product)
self.product.location = self.product.remote_location = 'somewhere'
outdir = TemporaryDirectory()
os.chmod(outdir.name, stat.S_IREAD)
with self.assertLogs(level='WARNING') as cm:
(fs_path, _) = plugin._prepare_download(self.product, outputs_prefix=outdir.name)
self.assertIn('Unable to create records directory', str(cm.output))
|
eodag
|
positive
|
def get_base_form(self, lower: str, pos: str):
"""
Get base form by looking at exceptions and enumerating each suffix group.
:param lower:
:param pos:
:return:
"""
<DeepExtract>
base = self.dict_exc.get(lower, None)
</DeepExtract>
if base is None:
<DeepExtract>
for suffix_group in self.suffix_groups:
base = suffix_group.get_base_form(lower, pos)
if base is not None:
base = base
base = None
</DeepExtract>
return base
|
def get_base_form(self, lower: str, pos: str):
"""
Get base form by looking at exceptions and enumerating each suffix group.
:param lower:
:param pos:
:return:
"""
base = self.dict_exc.get(lower, None)
if base is None:
for suffix_group in self.suffix_groups:
base = suffix_group.get_base_form(lower, pos)
if base is not None:
base = base
base = None
return base
|
elit
|
positive
|
def search(self, query, ordered_ids=None, **options):
<DeepExtract>
index = model_document.index()
documents = index.search(query, document_class=document_class, **options)
keys = [x.instance_id for x in documents]
</DeepExtract>
if ordered_ids is not None:
ordered_ids.extend(keys)
return self.filter(pk__in=keys)
|
def search(self, query, ordered_ids=None, **options):
index = model_document.index()
documents = index.search(query, document_class=document_class, **options)
keys = [x.instance_id for x in documents]
if ordered_ids is not None:
ordered_ids.extend(keys)
return self.filter(pk__in=keys)
|
djangae
|
positive
|
def _make_head(pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
<DeepExtract>
downsample = None
if 1 != 1 or channels != head_channels[i] * head_block.expansion:
downsample = nn.Sequential(nn.Conv2d(channels, head_channels[i] * head_block.expansion, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(head_channels[i] * head_block.expansion, momentum=BN_MOMENTUM))
layers = []
layers.append(head_block(channels, head_channels[i], 1, downsample))
channels = head_channels[i] * head_block.expansion
for i in range(1, 1):
layers.append(head_block(channels, head_channels[i]))
incre_module = nn.Sequential(*layers)
</DeepExtract>
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i + 1] * head_block.expansion
downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d(in_channels=head_channels[3] * head_block.expansion, out_channels=2048, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(2048, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
|
def _make_head(pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
downsample = None
if 1 != 1 or channels != head_channels[i] * head_block.expansion:
downsample = nn.Sequential(nn.Conv2d(channels, head_channels[i] * head_block.expansion, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(head_channels[i] * head_block.expansion, momentum=BN_MOMENTUM))
layers = []
layers.append(head_block(channels, head_channels[i], 1, downsample))
channels = head_channels[i] * head_block.expansion
for i in range(1, 1):
layers.append(head_block(channels, head_channels[i]))
incre_module = nn.Sequential(*layers)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i + 1] * head_block.expansion
downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d(in_channels=head_channels[3] * head_block.expansion, out_channels=2048, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(2048, momentum=BN_MOMENTUM), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
|
AIC2020_ReID
|
positive
|
def main():
"""Main function."""
global g_osarch
global g_osname
compression = str(PlatformVar('compression'))
default_assembler_list = ['/usr/local/bin/as', 'as']
default_compiler_list = ['g++9', 'g++-9', 'g++8', 'g++-8', 'g++7', 'g++-7', 'g++', 'c++']
default_linker_list = ['/usr/local/bin/ld', 'ld']
default_preprocessor_list = ['cpp', 'clang-cpp']
default_objcopy_list = ['/usr/local/bin/objcopy', 'objcopy']
default_strip_list = ['/usr/local/bin/strip', 'strip']
definitions = []
extra_assembler_flags = []
extra_compiler_flags = []
extra_libraries = []
extra_linker_flags = []
include_directories = [PATH_VIDEOCORE + '/include', PATH_VIDEOCORE + '/include/interface/vcos/pthreads', PATH_VIDEOCORE + '/include/interface/vmcs_host/linux', '/usr/include/freetype2/', '/usr/include/opus', '/usr/include/SDL', '/usr/local/include', '/usr/local/include/freetype2/', '/usr/local/include/opus', '/usr/local/include/SDL']
library_directories = ['/lib/x86_64-linux-gnu', '/lib', PATH_VIDEOCORE + '/lib', '/usr/lib/arm-linux-gnueabihf', '/usr/lib/x86_64-linux-gnu', '/usr/lib', '/usr/local/lib']
opengl_reason = None
opengl_version = None
output_file = None
program_name = os.path.basename(sys.argv[0])
sdl_version = 2
parser = argparse.ArgumentParser(usage='%s [args] <source file(s)> [-o output]' % program_name, description='Size-optimized executable generator for *nix platforms.\nPreprocesses given source file(s) looking for specifically marked function calls, then generates a dynamic loader header file that can be used within these same source files to decrease executable size.\nOptionally also perform the actual compilation of a size-optimized binary after generating the header.', formatter_class=CustomHelpFormatter, add_help=False)
parser.add_argument('--32', dest='m32', action='store_true', help='Try to target 32-bit version of the architecture if on a 64-bit system.')
parser.add_argument('-a', '--abstraction-layer', choices=('sdl1', 'sdl2'), help='Specify abstraction layer to use instead of autodetecting.')
parser.add_argument('-A', '--assembler', default=None, help='Try to use given assembler executable as opposed to autodetect.')
parser.add_argument('-B', '--objcopy', default=None, help='Try to use given objcopy executable as opposed to autodetect.')
parser.add_argument('-C', '--compiler', default=None, help='Try to use given compiler executable as opposed to autodetect.')
parser.add_argument('-d', '--definition-ld', default='USE_LD', help="Definition to use for checking whether to use 'safe' mechanism instead of dynamic loading.\n(default: %(default)s)")
parser.add_argument('-D', '--define', default=[], action='append', help='Additional preprocessor definition.')
parser.add_argument('-e', '--elfling', action='store_true', help='Use elfling packer if available.')
parser.add_argument('-E', '--preprocess-only', action='store_true', help='Preprocess only, do not generate compiled output.')
parser.add_argument('-F', '--filedrop-mode', default='auto', choices=('header', 'native', 'cross', 'auto'), help='File dropping and interpreter calling mode.\n\theader:\n\t\tAdd explicit PT_INTERP header into the binary.\n\tnative:\n\t\tCall dynamic linker for the native platform.\n\tcross:\n\t\tCall dynamic linker assuming cross-platform emulation.\n\tauto:\n\t\tTry to autodetect and create the smallest binary to be ran on current machine.\n(default: %(default)s)')
parser.add_argument('-h', '--help', action='store_true', help='Print this help string and exit.')
parser.add_argument('-H', '--hash-function', default='auto', choices=('crc32', 'sdbm', 'auto'), help='Hash function to use for hashing function names:\n\tcrc32:\n\t\tCRC32 intrisic hash.\n\tsdbm:\n\t\tSDBM hash.\n\tauto:\n\t\tUse smallest implementation.\n(default: %(default)s)')
parser.add_argument('-I', '--include-directory', default=[], action='append', help='Add an include directory to be searched for header files.')
parser.add_argument('--interp', default=None, type=str, help='Use given interpreter as opposed to platform default.')
parser.add_argument('-k', '--linker', default=None, help='Try to use given linker executable as opposed to autodetect.')
parser.add_argument('-l', '--library', default=[], action='append', help='Add a library to be linked against.')
parser.add_argument('-L', '--library-directory', default=[], action='append', help='Add a library directory to be searched for libraries when linking.')
parser.add_argument('-m', '--method', default='maximum', choices=('vanilla', 'dlfcn', 'hash', 'maximum'), help="Method to use for decreasing output file size:\n\tvanilla:\n\t\tProduce binary normally, use no tricks except unpack header.\n\tdlfcn:\n\t\tUse dlopen/dlsym to decrease size without dependencies to any specific object format.\n\thash:\n\t\tUse knowledge of object file format to perform 'import by hash' loading, but do not break any specifications.\n\tmaximum:\n\t\tUse all available techniques to decrease output file size. Resulting file may violate object file specification.\n(default: %(default)s)")
parser.add_argument('--march', type=str, help='When compiling code, use given architecture as opposed to autodetect.')
parser.add_argument('--nice-exit', action='store_true', help='Do not use debugger trap, exit with proper system call.')
parser.add_argument('--nice-filedump', action='store_true', help='Do not use dirty tricks in compression header, also remove filedumped binary when done.')
parser.add_argument('--no-glesv2', action='store_true', help='Do not probe for OpenGL ES 2.0, always assume regular GL.')
parser.add_argument('--merge-headers', default='auto', choices=('yes', 'no', 'auto'), help='ELF header merging policy:\n\tno:\n\t\tHeaders concatenated sequentially.\n\tyes:\n\t\tTry to interleave headers to decrease file size.\n\tauto:\n\t\tUse interleaving if target platform allows.\n(default: %(default)s)')
parser.add_argument('--glsl-mode', default='full', choices=('none', 'nosquash', 'full'), help='GLSL crunching mode.\n\tnone:\n\t\tJust remove whitespace.\n\tnosquash:\n\t\tRefrain from squashing statements together, otherwise same as full.\n\tfull:\n\t\tTry to minimize file size by any means necessary.\n(default: %(default)s)')
parser.add_argument('--glsl-inlines', default=-1, type=int, help='Maximum number of inline operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--glsl-renames', default=-1, type=int, help='Maximum number of rename operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--glsl-simplifys', default=-1, type=int, help='Maximum number of simplify operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--linux', action='store_true', help="Try to target Linux if not in Linux. Equal to '-O linux'.")
parser.add_argument('-o', '--output-file', default=[], nargs='*', help='Name of output file to generate\nIf the name specified features a path, it will be used verbatim. Otherwise the binary will be created in the same path as source file(s) compiled.\nIf only processing GLSL files, this parameter can be specified multiple times, but must be specified exactly once per input GLSL file.')
parser.add_argument('-O', '--operating-system', help='Try to target given operating system insofar cross-compilation is possible.')
parser.add_argument('-P', '--call-prefix', default='dnload_', help='Call prefix to identify desired calls.\n(default: %(default)s)')
parser.add_argument('--preprocessor', default=None, help='Try to use given preprocessor executable as opposed to autodetect.')
parser.add_argument('--rand', default='bsd', choices=('bsd', 'gnu', 'auto'), help='rand() implementation to use.\n\tbsd: FreeBSD libc\n\tgnu: GNU glibc\n\tauto: Autodetect based on compiling platform.\n(default: %(default)s)')
parser.add_argument('--rpath', default=[], action='append', help='Extra rpath locations for linking.')
parser.add_argument('--symtab-mode', default='auto', choices=('auto', 'safe', 'unsafe'), help='Method for scouring DT_SYMTAB:\n\tsafe:\n\t\tMake less assumptions about header layout.\n\tunsafe:\n\t\tAssume optimal header layout to decrease code size.\n\tauto:\n\t\tTry to autodetect based on target platform.\n(default: %(default)s)')
parser.add_argument('-s', '--search-path', default=[], action='append', help='Directory to search for the header file to generate. May be specified multiple times. If not given, searches paths of source files to compile. If not given and no source files to compile, current path will be used.')
parser.add_argument('-S', '--strip-binary', default=None, help='Try to use given strip executable as opposed to autodetect.')
parser.add_argument('-t', '--target', default='dnload.h', help='Target header file to look for.\n(default: %(default)s)')
parser.add_argument('-T', '--temporary-directory', default=None, help='Directory to store temporary files in.\n(default: autodetect)')
parser.add_argument('-u', '--unpack-header', default=compression, choices=('lzma', 'xz'), help='Unpack header to use.\n(default: %(default)s)')
parser.add_argument('--verbatim', action='store_true', help='Perform select actions in a more verbatim manner:\n\t* Print GLSL to output without C header formatting.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print more info about what is being done.')
parser.add_argument('-V', '--version', action='store_true', help='Print version and exit.')
parser.add_argument('source', default=[], nargs='*', help='Source file(s) to preprocess and/or compile.')
args = parser.parse_args()
if args.help:
print(parser.format_help().strip())
return 0
if args.version:
print('%s %s' % (VERSION_REVISION, VERSION_DATE))
return 0
abstraction_layer = listify(args.abstraction_layer)
assembler = args.assembler
compiler = args.compiler
definition_ld = args.definition_ld
definitions += args.define
compilation_mode = args.method
compression = args.unpack_header
elfling = args.elfling
glsl_inlines = args.glsl_inlines
glsl_renames = args.glsl_renames
glsl_simplifys = args.glsl_simplifys
glsl_mode = args.glsl_mode
include_directories += args.include_directory
libraries = args.library
library_directories += args.library_directory
linker = args.linker
nice_filedump = args.nice_filedump
no_glesv2 = args.no_glesv2
objcopy = args.objcopy
output_file_list = args.output_file
preprocessor = args.preprocessor
rpath = args.rpath
strip = args.strip_binary
symbol_prefix = args.call_prefix
target = args.target
target_search_path = args.search_path
if args.verbose:
set_verbose(True)
if args.nice_exit:
definitions += ['DNLOAD_NO_DEBUGGER_TRAP']
if args.interp:
interp = args.interp
if not re.match('^\\".*\\"$', interp):
interp = '"%s"' % args.interp
replace_platform_variable('interp', interp)
if not args.source:
raise RuntimeError('no source files to process')
source_files = []
source_files_additional = []
source_files_glsl = []
for ii in args.source:
if re.match('.*\\.(c|cpp)$', ii, re.I):
source_files += [ii]
elif re.match('.*\\.(asm|s)$', ii, re.I):
source_files_additional += [ii]
elif re.match('.*\\.(glsl|vert|geom|frag)$', ii, re.I):
source_files_glsl += [ii]
else:
raise RuntimeError("unknown source file: '%s'" % ii)
if not target_search_path:
for ii in source_files:
(source_path, source_file) = os.path.split(os.path.normpath(ii))
if source_path:
if source_path not in target_search_path:
target_search_path += [source_path]
if source_path not in include_directories:
include_directories += [source_path]
if not target_search_path:
target_search_path = ['.']
if not set_temporary_directory(args.temporary_directory):
if args.temporary_directory:
print("WARNING: supplied temporary directory '%s' not usable, autodetecting" % args.temporary_directory)
regex_tmpdir = re.compile('(build|cmakefiles)', re.I)
found_tmpdir = locate(None, regex_tmpdir)
if not found_tmpdir:
<DeepExtract>
if os.path.exists('/tmp'):
found_tmpdir = '/tmp'
if os.path.exists('/var/tmp'):
found_tmpdir = '/var/tmp'
found_tmpdir = None
</DeepExtract>
if set_temporary_directory(found_tmpdir) and is_verbose():
print("Using temporary directory '%s/'." % found_tmpdir)
gles_reason = None
if not no_glesv2:
if os.path.exists(PATH_MALI):
extra_libraries += ['EGL']
definitions += ['DNLOAD_MALI']
gles_reason = "'%s' (Mali)" % PATH_MALI
if os.path.exists(PATH_VIDEOCORE):
definitions += ['DNLOAD_VIDEOCORE']
gles_reason = "'%s' (VideoCore)" % PATH_VIDEOCORE
if 'armv7l' == g_osarch:
replace_osarch('armv6l', 'Workaround (Raspberry Pi): ')
if gles_reason:
definitions += ['DNLOAD_GLESV2']
replace_platform_variable('gl_library', 'GLESv2')
if is_verbose():
print('Assuming OpenGL ES 2.0: %s' % gles_reason)
preprocessor_list = default_preprocessor_list
if os.name == 'nt':
preprocessor_list = ['cl.exe'] + preprocessor_list
preprocessor = Preprocessor(executable_find(preprocessor, preprocessor_list, 'preprocessor'))
preprocessor.set_definitions(definitions)
preprocessor.set_include_dirs(include_directories)
if source_files_glsl:
if source_files or source_files_additional:
raise RuntimeError('can not combine GLSL source files %s with other source files %s' % (str(source_files_glsl), str(source_files + source_files_additional)))
if output_file_list and len(output_file_list) != len(source_files_glsl):
raise RuntimeError("specified output files '%s' must match input glsl files '%s'" % (str(output_file_list), str(source_files_glsl)))
if output_file_list:
source_files_glsl = zip(source_files_glsl, output_file_list)
<DeepExtract>
glsl_db = Glsl()
for ii in source_files_glsl:
if is_listing(ii):
if 3 == len(ii):
glsl_db.read(preprocessor, definition_ld, ii[0], ii[1], ii[2])
elif 2 == len(ii):
glsl_db.read(preprocessor, definition_ld, ii[0], ii[1])
else:
raise RuntimeError("invalid glsl file listing input: '%s'" % str(ii))
else:
glsl_db.read(preprocessor, definition_ld, ii)
glsl_db.parse()
glsl_db.crunch(glsl_mode, glsl_inlines, glsl_renames, glsl_simplifys)
glsl_db = glsl_db
</DeepExtract>
if output_file_list:
glsl_db.write()
else:
print(glsl_db.generatePrintOutput(args.verbatim))
sys.exit(0)
elif output_file_list:
if len(output_file_list) > 1:
raise RuntimeError('more than one output file specified: %s' % str(output_file_list))
output_file = output_file_list[0]
if g_osarch in ('armv6l', 'armv7l'):
current_interp = str(PlatformVar('interp'))[1:-1]
if os.path.exists('/lib/ld-linux-armhf.so.3') and (not os.path.exists(current_interp)):
replace_osarch(g_osarch + 'hf', 'Workaround (armhf ABI): ')
if args.filedrop_mode == 'auto':
if osarch_is_arm32l():
args.filedrop_mode = 'header'
elif osarch_is_64_bit() and args.m32:
args.filedrop_mode = 'cross'
else:
args.filedrop_mode = 'native'
if is_verbose():
print("Autodetected filedrop mode: '%s'" % args.filedrop_mode)
if args.m32:
if osarch_is_32_bit():
print("WARNING: ignoring 32-bit compile, osarch '%s' already 32-bit" % g_osarch)
elif osarch_is_amd64():
replace_osarch('ia32', 'Cross-compile: ')
extra_assembler_flags = ['--32']
extra_compiler_flags = ['-m32']
if osname_is_freebsd():
extra_linker_flags = ['-melf_i386_fbsd']
else:
extra_linker_flags = ['-melf_i386']
else:
raise RuntimeError("cannot attempt 32-bit compile for osarch '%s'" % g_osarch)
if args.march:
if is_verbose:
print("Using explicit march: '%s'" % args.march)
replace_platform_variable('march', args.march)
if args.linux:
if args.operating_system:
print("WARNING: overriding cross operating system choice with 'linux'")
args.operating_system = 'linux'
if args.operating_system:
new_osname = platform_map(args.operating_system.lower())
replace_osname(new_osname, 'Cross-compile:')
elif osname_is_linux():
replace_platform_variable('ei_osabi', 0)
filedrop_interp = None
interp_needed = False
if args.filedrop_mode == 'cross':
try:
filedrop_interp = str(PlatformVar('interp-cross'))
except ValueError as ee:
filedrop_interp = str(PlatformVar('interp'))
elif args.filedrop_mode == 'native':
filedrop_interp = str(PlatformVar('interp'))
else:
if args.filedrop_mode != 'header':
raise RuntimeError("unknown filedrop mode: '%s'" % args.filedrop_mode)
interp_needed = True
if filedrop_interp:
filedrop_interp = filedrop_interp.strip('"')
if args.symtab_mode == 'auto':
if osname_is_freebsd():
args.symtab_mode = 'safe'
else:
args.symtab_mode = 'unsafe'
if is_verbose():
print("Autodetected symtab mode: '%s'" % args.symtab_mode)
if args.symtab_mode == 'safe':
definitions += ['DNLOAD_SAFE_SYMTAB_HANDLING']
if args.merge_headers == 'auto':
if osname_is_freebsd() and args.filedrop_mode == 'header':
args.merge_headers = 'no'
else:
args.merge_headers = 'yes'
if is_verbose():
print("Autodetected header merge mode: '%s'" % args.merge_headers)
if args.merge_headers == 'yes':
args.merge_headers = True
else:
args.merge_headers = False
if not args.rand or args.rand == 'auto':
if osname_is_linux():
args.rand = 'gnu'
else:
args.rand = 'bsd'
if compilation_mode not in ('vanilla', 'dlfcn', 'hash', 'maximum'):
raise RuntimeError("unknown method '%s'" % compilation_mode)
elif 'hash' == compilation_mode:
definitions += ['DNLOAD_NO_FIXED_R_DEBUG_ADDRESS']
if compilation_mode in ('hash', 'maximum'):
if args.hash_function == 'auto':
args.hash_function = 'sdbm'
if is_verbose():
print("Autodetected hash function: '%s'" % args.hash_function)
if args.hash_function == 'crc32':
if osarch_is_ia32() or osarch_is_amd64():
extra_compiler_flags += ['-msse4.2']
(target_path, target_file) = os.path.split(os.path.normpath(target))
if target_path:
if is_verbose():
print("Using explicit target header file '%s'." % target)
else:
target_file = locate(target_search_path, target)
if target_file:
target = os.path.normpath(target_file)
(target_path, target_file) = os.path.split(target)
if is_verbose():
print("Found header file: '%s'" % target)
else:
raise RuntimeError("no information where to put header file '%s' - not found in path(s) %s" % (target, str(target_search_path)))
<DeepExtract>
if not os.path.exists(target):
if is_verbose():
print("Creating nonexistent file '%s'." % target)
fd = open(target, 'w')
fd.close()
elif not os.path.isfile(target):
raise RuntimeError("'%s' exists but is not a normal file" % target)
</DeepExtract>
if not args.preprocess_only or 'dlfcn' == compilation_mode:
compiler_list = default_compiler_list
if os.name == 'nt':
compiler_list = ['cl.exe'] + compiler_list
compiler = Compiler(executable_find(compiler, compiler_list, 'compiler'))
compiler.set_definitions(definitions)
compiler.set_include_dirs(include_directories)
if extra_compiler_flags:
compiler.add_extra_compiler_flags(extra_compiler_flags)
library_directories = compiler.get_extra_library_directories() + library_directories
linker = Linker(executable_find(linker, default_linker_list, 'linker'))
if extra_linker_flags:
linker.addExtraFlags(extra_linker_flags)
linker.set_library_directories(library_directories)
fd = open(target, 'w')
fd.write('\n')
fd.close()
if is_verbose():
print('Analyzing source files: %s' % str(source_files))
for ii in source_files:
<DeepExtract>
(src_path, src_basename) = os.path.split(ii)
if src_path:
src_path += '/'
fd = open(ii, 'r')
lines = fd.readlines()
fd.close()
filenames = []
glslre = re.compile('#\\s*include [\\<\\"](.*\\.glsl)\\.(h|hh|hpp|hxx)[\\>\\"]\\s*((\\/\\*|\\/\\/)\\s*([^\\*\\/\\s]+))?', re.I)
for ii in lines:
match = glslre.match(ii)
if match:
(glsl_path, glsl_base_filename) = os.path.split(match.group(1))
glsl_filename = locate(src_path + glsl_path, glsl_base_filename)
if not glsl_filename:
glsl_filename = locate(glsl_path, glsl_base_filename)
if not glsl_filename:
raise RuntimeError("could not locate GLSL source '%s'" % glsl_base_filename)
glsl_output_name = glsl_filename + '.' + match.group(2)
glsl_varname = match.group(5)
if glsl_varname:
filenames += [[glsl_filename, glsl_output_name, glsl_varname]]
else:
filenames += [[glsl_filename, glsl_output_name]]
if filenames:
glsl_db = generate_glsl(filenames, preprocessor, definition_ld, glsl_mode, glsl_inlines, glsl_renames, glsl_simplifys)
glsl_db.write()
</DeepExtract>
symbols = set()
for ii in source_files:
source = preprocessor.preprocess(ii)
<DeepExtract>
symbolre = re.compile('[\\s:;&\\|\\<\\>\\=\\^\\+\\-\\*/\\(\\)\\?]%s([a-zA-Z0-9_]+)(?=[\\s\\(])' % symbol_prefix)
results = symbolre.findall(source, re.MULTILINE)
ret = set()
for ii in results:
symbolset = set()
symbolset.add(ii)
ret = ret.union(symbolset)
source_symbols = ret
</DeepExtract>
symbols = symbols.union(source_symbols)
<DeepExtract>
ret = []
for ii in symbols:
ret += [find_symbol(ii)]
symbols = ret
</DeepExtract>
if 'dlfcn' == compilation_mode:
symbols = sorted(symbols)
elif 'maximum' == compilation_mode:
sortable_symbols = []
for ii in symbols:
sortable_symbols += [(ii.get_hash(args.hash_function), ii)]
symbols = []
for ii in sorted(sortable_symbols):
symbols += [ii[1]]
<DeepExtract>
src_found = symbols_has_library(symbols, 'SDL')
dst_found = symbols_has_library(symbols, 'SDL2')
if not (src_found and dst_found):
symbols = symbols
if is_verbose():
print("Resolving library conflict: '%s' => '%s'" % ('SDL', 'SDL2'))
ret = []
dst = find_library_definition('SDL2')
for ii in symbols:
if ii.get_library().get_name() == 'SDL':
replacement = dst.find_symbol(ii.get_name())
if replacement:
ret += [replacement]
else:
new_symbol = ii.create_replacement(dst)
dst.add_symbol(new_symbol)
ret += [new_symbol]
else:
ret += [ii]
symbols = ret
</DeepExtract>
subst = {}
if symbols_has_library(symbols, 'c'):
subst['INCLUDE_C'] = g_template_include_c.format()
if symbols_has_library(symbols, 'fftw3'):
subst['INCLUDE_FFTW'] = g_template_include_fftw.format()
if symbols_has_library(symbols, 'freetype'):
subst['INCLUDE_FREETYPE'] = g_template_include_freetype.format()
if symbols_has_library(symbols, 'm'):
subst['INCLUDE_MATH'] = g_template_include_math.format()
if symbols_has_library(symbols, 'ncurses'):
subst['INCLUDE_NCURSES'] = g_template_include_ncurses.format()
if symbols_has_library(symbols, ('GL', 'GLESv2')):
subst['INCLUDE_OPENGL'] = g_template_include_opengl.format({'DEFINITION_LD': definition_ld})
if symbols_has_library(symbols, 'opus'):
subst['INCLUDE_OPUS'] = g_template_include_opus.format()
if symbols_has_library(symbols, 'opusfile'):
subst['INCLUDE_OPUSFILE'] = g_template_include_opusfile.format()
if symbols_has_library(symbols, 'png'):
subst['INCLUDE_PNG'] = g_template_include_png.format()
if symbols_has_library(symbols, ('SDL', 'SDL2')):
subst['INCLUDE_SDL'] = g_template_include_sdl.format()
if symbols_has_library(symbols, 'sndfile'):
subst['INCLUDE_SNDFILE'] = g_template_include_sndfile.format()
if symbols_has_symbol(symbols, 'rand'):
<DeepExtract>
regex_rand_header = re.compile('%s[-_\\s]+rand\\.h(h|pp|xx)?' % args.rand)
regex_rand_source = re.compile('%s[-_\\s]+rand\\.c(c|pp|xx)?' % args.rand)
header_rand = locate(target_search_path, regex_rand_header)
source_rand = locate(target_search_path, regex_rand_source)
if not header_rand or not source_rand:
raise RuntimeError("could not find rand implementation for '%s'" % args.rand)
(header_rand_path, header_rand) = os.path.split(header_rand)
(source_rand_path, source_rand) = os.path.split(source_rand)
if is_verbose:
print("Using rand() implementation: '%s'" % header_rand)
replace_platform_variable('function_rand', '%s_rand' % args.rand)
replace_platform_variable('function_srand', '%s_srand' % args.rand)
rand_type_bsd = str(int(args.rand == 'bsd'))
rand_type_gnu = str(int(args.rand == 'gnu'))
subst['INCLUDE_RAND'] = g_template_include_rand.format({'DEFINITION_LD': definition_ld, 'RAND_TYPE_BSD': rand_type_bsd, 'RAND_TYPE_GNU': rand_type_gnu, 'HEADER_RAND': header_rand, 'SOURCE_RAND': source_rand})
</DeepExtract>
real_symbols = list(filter(lambda x: not x.is_verbatim(), symbols))
if is_verbose():
symbol_strings = list(map(lambda x: str(x), symbols))
print('%i symbols found: %s' % (len(symbols), str(symbol_strings)))
verbatim_symbols = list(set(symbols) - set(real_symbols))
if verbatim_symbols and output_file:
verbatim_symbol_strings = list(map(lambda x: str(x), verbatim_symbols))
print('Not loading verbatim symbols: %s' % str(verbatim_symbol_strings))
symbol_definitions_direct = generate_symbol_definitions_direct(symbols, symbol_prefix)
subst['SYMBOL_DEFINITIONS_DIRECT'] = symbol_definitions_direct
if 'vanilla' == compilation_mode:
subst['SYMBOL_DEFINITIONS_TABLE'] = symbol_definitions_direct
else:
symbol_definitions_table = generate_symbol_definitions_table(symbols, symbol_prefix)
symbol_table = generate_symbol_table(compilation_mode, real_symbols, args.hash_function)
subst['SYMBOL_DEFINITIONS_TABLE'] = symbol_definitions_table
subst['SYMBOL_TABLE'] = symbol_table
if 'vanilla' == compilation_mode:
subst['LOADER'] = generate_loader_vanilla()
elif 'dlfcn' == compilation_mode:
subst['LOADER'] = generate_loader_dlfcn(real_symbols, linker)
else:
subst['LOADER'] = generate_loader_hash(real_symbols, args.hash_function)
if 'maximum' != compilation_mode:
subst['UND_SYMBOLS'] = g_template_und_symbols.format()
subst['DEFINITION_LD'] = definition_ld
subst['FILENAME'] = program_name
file_contents = g_template_header.format(subst)
fd = open(target, 'w')
fd.write(file_contents)
fd.close()
if is_verbose():
print("Wrote header file: '%s'" % target)
if args.preprocess_only:
sys.exit(0)
if 1 < len(source_files):
raise RuntimeError('only one source file supported when generating output file')
if elfling:
elfling = executable_search(['elfling-packer', './elfling-packer'], 'elfling-packer')
if elfling:
elfling = Elfling(elfling)
assembler = Assembler(executable_find(assembler, default_assembler_list, 'assembler'))
if extra_assembler_flags:
assembler.addExtraFlags(extra_assembler_flags)
if not abstraction_layer:
if symbols_has_library(symbols, 'SDL'):
abstraction_layer += ['sdl1']
if symbols_has_library(symbols, 'SDL2'):
abstraction_layer += ['sdl2']
if 1 < len(abstraction_layer):
raise RuntimeError('conflicting abstraction layers detected: %s' % str(abstraction_layer))
if 'sdl2' in abstraction_layer:
(sdl_stdout, sdl_stderr) = run_command(['sdl2-config', '--cflags'])
compiler.add_extra_compiler_flags(sdl_stdout.split())
elif 'sdl1' in abstraction_layer:
(sdl_stdout, sdl_stderr) = run_command(['sdl-config', '--cflags'])
compiler.add_extra_compiler_flags(sdl_stdout.split())
if output_file:
output_file = os.path.normpath(output_file)
else:
(output_path, output_basename) = os.path.split(source_files[0])
(output_basename, source_extension) = os.path.splitext(output_basename)
output_file = os.path.normpath(os.path.join(output_path, output_basename))
if is_verbose():
print("Using output file '%s' after source file '%s'." % (output_file, source_file))
source_file = source_files[0]
<DeepExtract>
library_set = set()
for ii in real_symbols:
library_set = library_set.union(set([ii.get_library().get_name()]))
if not libraries:
if 'dlfcn' == compilation_mode:
raise RuntimeError("cannot autodetect libraries for compilation mode '%s'" % compilation_mode)
if extra_libraries:
if is_verbose():
print('Adding extra libraries due to platform: %s' % str(extra_libraries))
library_set = library_set.union(extra_libraries)
libraries = list(library_set)
output_message = 'Autodetected libraries to link against: '
else:
missing_libraries = library_set.difference(set(libraries))
if missing_libraries:
print('WARNING: found symbols suggest libraries: %s' % str(list(missing_libraries)))
output_message = 'Linking against libraries: '
problematic_libraries = ['gcc', 'c', 'm', 'bcm_host']
front = []
for ii in problematic_libraries:
if ii in libraries:
libraries.remove(ii)
front += [ii]
if 'maximum' == compilation_mode:
ret = list(map(lambda x: collect_libraries_rename(x), front + sorted(libraries)))
else:
ret = front + sorted(libraries)
if is_verbose():
print('%s%s' % (output_message, str(ret)))
libraries = ret
</DeepExtract>
compiler.generate_compiler_flags()
compiler.generate_linker_flags()
compiler.set_libraries(libraries)
compiler.set_library_directories(library_directories)
compiler.set_rpath_directories(rpath)
linker.generate_linker_flags()
linker.set_libraries(libraries)
linker.set_rpath_directories(rpath)
if 'maximum' == compilation_mode:
objcopy = executable_find(objcopy, default_objcopy_list, 'objcopy')
<DeepExtract>
output_file_s = generate_temporary_filename(output_file + '.S')
if source_file:
compiler.compile_asm(source_file, output_file_s, True)
segment_ehdr = AssemblerSegment(g_assembler_ehdr)
segment_dynamic = AssemblerSegment(g_assembler_dynamic)
segment_hash = AssemblerSegment(g_assembler_hash)
segment_interp = AssemblerSegment(g_assembler_interp)
segment_strtab = AssemblerSegment(g_assembler_strtab)
segment_symtab = AssemblerSegment(g_assembler_symtab)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr64_interp)
else:
raise_unknown_address_size()
und_symbols = get_platform_und_symbols()
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab('symtab')
segment_dynamic.add_dt_hash('hash')
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
for ii in reversed(libraries):
for jj in listify(linker.get_library_name(ii)):
segment_dynamic.add_dt_needed(jj)
segment_strtab.add_strtab(jj)
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_final_o = generate_temporary_filename(output_file + '.final.o')
if elfling:
asm = generate_elfling(output_file, compiler, elfling, definition_ld)
else:
asm = AssemblerFile(output_file_s)
if source_files_additional:
for ii in range(len(source_files_additional)):
fname = source_files_additional[ii]
additional_asm = AssemblerFile(fname)
asm.incorporate(additional_asm)
if asm.write(output_file_final_s, assembler):
assembler.assemble(output_file_final_s, output_file_final_o)
extra_symbols = readelf_list_und_symbols(output_file_final_o)
output_file_extra = generate_temporary_filename(output_file + '.extra')
additional_file = g_symbol_sources.compile_asm(compiler, assembler, extra_symbols, output_file_extra)
if additional_file:
additional_asm = AssemblerFile(additional_file)
asm.incorporate(additional_asm, re.sub('[\\/\\.]', '_', output_file + '_extra'))
asm.sort_sections(assembler)
asm.crunch()
phdr_count = 2
segment_phdr_load_bss = None
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
if osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
phdr_count += 1
elif osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_single)
else:
raise_unknown_address_size()
segments_head = [segment_ehdr, segment_phdr_load]
segments_mid = []
if segment_phdr_load_bss:
segments_mid += segment_phdr_load_bss
if interp_needed:
phdr_count += 1
segments_mid += [segment_phdr_interp]
segments_tail = [segment_phdr_dynamic, segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
if is_listing(und_symbols):
segments_tail += [segment_hash]
if interp_needed:
segments_tail += [segment_interp]
segments_tail += [segment_strtab]
if args.merge_headers:
replace_platform_variable('phdr_count', phdr_count)
replace_platform_variable('e_shentsize', 1)
if osarch_is_64_bit():
replace_platform_variable('phdr64_dynamic_p_align', 21)
replace_platform_variable('e_shstrndx', 7)
else:
replace_platform_variable('phdr32_dynamic_p_flags', 21)
segments = merge_segments(segments_head) + segments_mid + merge_segments(segments_tail)
else:
segments = segments_head + segments_mid + segments_tail
if asm.hasSectionAlignment():
asm.getSectionAlignment().create_content(assembler)
bss_section.create_content(assembler, 'end')
fd = open(output_file_final_s, 'w')
header_sizes = 0
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print('Size of headers: %i bytes' % header_sizes)
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source: '%s'" % output_file_final_s)
assembler.assemble(output_file_final_s, output_file_final_o)
link_files = [output_file_final_o]
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
linker.generate_linker_script(output_file_ld, True)
linker.set_linker_script(output_file_ld)
if not osarch_is_aarch64() and (not osarch_is_arm32l()):
objcopy = None
linker.link_binary(objcopy, link_files, output_file_unprocessed)
if bss_section.get_alignment():
readelf_zero(output_file_unprocessed, output_file_stripped)
else:
readelf_truncate(output_file_unprocessed, output_file_stripped)
</DeepExtract>
if elfling:
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
output_file_extracted = generate_temporary_filename(output_file + '.extracted')
elfling.compress(output_file_stripped, output_file_extracted)
<DeepExtract>
output_file_s = generate_temporary_filename(output_file + '.S')
if None:
compiler.compile_asm(None, output_file_s, True)
segment_ehdr = AssemblerSegment(g_assembler_ehdr)
segment_dynamic = AssemblerSegment(g_assembler_dynamic)
segment_hash = AssemblerSegment(g_assembler_hash)
segment_interp = AssemblerSegment(g_assembler_interp)
segment_strtab = AssemblerSegment(g_assembler_strtab)
segment_symtab = AssemblerSegment(g_assembler_symtab)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr64_interp)
else:
raise_unknown_address_size()
und_symbols = get_platform_und_symbols()
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab('symtab')
segment_dynamic.add_dt_hash('hash')
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
for ii in reversed(libraries):
for jj in listify(linker.get_library_name(ii)):
segment_dynamic.add_dt_needed(jj)
segment_strtab.add_strtab(jj)
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_final_o = generate_temporary_filename(output_file + '.final.o')
if elfling:
asm = generate_elfling(output_file, compiler, elfling, definition_ld)
else:
asm = AssemblerFile(output_file_s)
if source_files_additional:
for ii in range(len(source_files_additional)):
fname = source_files_additional[ii]
additional_asm = AssemblerFile(fname)
asm.incorporate(additional_asm)
if asm.write(output_file_final_s, assembler):
assembler.assemble(output_file_final_s, output_file_final_o)
extra_symbols = readelf_list_und_symbols(output_file_final_o)
output_file_extra = generate_temporary_filename(output_file + '.extra')
additional_file = g_symbol_sources.compile_asm(compiler, assembler, extra_symbols, output_file_extra)
if additional_file:
additional_asm = AssemblerFile(additional_file)
asm.incorporate(additional_asm, re.sub('[\\/\\.]', '_', output_file + '_extra'))
asm.sort_sections(assembler)
asm.crunch()
phdr_count = 2
segment_phdr_load_bss = None
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
if osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
phdr_count += 1
elif osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_single)
else:
raise_unknown_address_size()
segments_head = [segment_ehdr, segment_phdr_load]
segments_mid = []
if segment_phdr_load_bss:
segments_mid += segment_phdr_load_bss
if interp_needed:
phdr_count += 1
segments_mid += [segment_phdr_interp]
segments_tail = [segment_phdr_dynamic, segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
if is_listing(und_symbols):
segments_tail += [segment_hash]
if interp_needed:
segments_tail += [segment_interp]
segments_tail += [segment_strtab]
if args.merge_headers:
replace_platform_variable('phdr_count', phdr_count)
replace_platform_variable('e_shentsize', 1)
if osarch_is_64_bit():
replace_platform_variable('phdr64_dynamic_p_align', 21)
replace_platform_variable('e_shstrndx', 7)
else:
replace_platform_variable('phdr32_dynamic_p_flags', 21)
segments = merge_segments(segments_head) + segments_mid + merge_segments(segments_tail)
else:
segments = segments_head + segments_mid + segments_tail
if asm.hasSectionAlignment():
asm.getSectionAlignment().create_content(assembler)
bss_section.create_content(assembler, 'end')
fd = open(output_file_final_s, 'w')
header_sizes = 0
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print('Size of headers: %i bytes' % header_sizes)
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source: '%s'" % output_file_final_s)
assembler.assemble(output_file_final_s, output_file_final_o)
link_files = [output_file_final_o]
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
linker.generate_linker_script(output_file_ld, True)
linker.set_linker_script(output_file_ld)
if not osarch_is_aarch64() and (not osarch_is_arm32l()):
objcopy = None
linker.link_binary(objcopy, link_files, output_file_unprocessed)
if bss_section.get_alignment():
readelf_zero(output_file_unprocessed, output_file_stripped)
else:
readelf_truncate(output_file_unprocessed, output_file_stripped)
</DeepExtract>
elif 'hash' == compilation_mode:
output_file_s = generate_temporary_filename(output_file + '.S')
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_o = generate_temporary_filename(output_file + '.o')
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
compiler.compile_asm(source_file, output_file_s)
asm = AssemblerFile(output_file_s)
asm.write(output_file_final_s, assembler)
assembler.assemble(output_file_final_s, output_file_o)
linker.generate_linker_script(output_file_ld)
linker.set_linker_script(output_file_ld)
linker.link(output_file_o, output_file_unprocessed)
elif 'dlfcn' == compilation_mode or 'vanilla' == compilation_mode:
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
compiler.compile_and_link(source_file, output_file_unprocessed)
else:
raise RuntimeError('unknown compilation mode: %s' % str(compilation_mode))
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
if compilation_mode in ('vanilla', 'dlfcn', 'hash'):
strip = executable_find(strip, default_strip_list, 'strip')
shutil.copy(output_file_unprocessed, output_file_stripped)
run_command([strip, '-K', '.bss', '-K', '.text', '-K', '.data', '-R', '.comment', '-R', '.eh_frame', '-R', '.eh_frame_hdr', '-R', '.fini', '-R', '.gnu.hash', '-R', '.gnu.version', '-R', '.jcr', '-R', '.note', '-R', '.note.ABI-tag', '-R', '.note.tag', output_file_stripped])
<DeepExtract>
str_header = PlatformVar('shelldrop_header').get()
str_tail = PlatformVar('shelldrop_tail').get()
str_chmod = ''
str_ld = ''
str_cleanup = ';exit'
if filedrop_interp:
str_ld = filedrop_interp + ' '
if not filedrop_interp or osname_is_freebsd():
str_chmod = ';chmod +x $I'
if nice_filedump:
str_tail = 'tail -n+2'
str_cleanup = ';rm ~;exit'
if 'lzma' == compression:
command = ['xz', '--format=lzma', '--lzma1=preset=9,lc=1,lp=0,nice=273,pb=0', '--stdout']
str_cat = 'lzcat'
elif 'raw' == compression:
command = ['xz', '-9', '--extreme', '--format=raw', '--stdout']
str_cat = 'xzcat -F raw'
elif 'xz' == compression:
command = ['xz', '--format=xz', '--lzma2=preset=9,lc=1,nice=273,pb=0', '--stdout']
str_cat = 'xzcat'
else:
raise RuntimeError("unknown compression format '%s'" % compression)
header = '%sI=/tmp/i;%s $0|%s>$I%s;%s$I%s' % (str_header, str_tail, str_cat, str_chmod, str_ld, str_cleanup)
(compressed, se) = run_command(command + [output_file_stripped], False)
wfd = open(output_file, 'wb')
wfd.write((header + '\n').encode())
wfd.write(compressed)
wfd.close()
make_executable(output_file)
print("Wrote '%s': %i bytes" % (output_file, os.path.getsize(output_file)))
</DeepExtract>
return 0
|
def main():
"""Main function."""
global g_osarch
global g_osname
compression = str(PlatformVar('compression'))
default_assembler_list = ['/usr/local/bin/as', 'as']
default_compiler_list = ['g++9', 'g++-9', 'g++8', 'g++-8', 'g++7', 'g++-7', 'g++', 'c++']
default_linker_list = ['/usr/local/bin/ld', 'ld']
default_preprocessor_list = ['cpp', 'clang-cpp']
default_objcopy_list = ['/usr/local/bin/objcopy', 'objcopy']
default_strip_list = ['/usr/local/bin/strip', 'strip']
definitions = []
extra_assembler_flags = []
extra_compiler_flags = []
extra_libraries = []
extra_linker_flags = []
include_directories = [PATH_VIDEOCORE + '/include', PATH_VIDEOCORE + '/include/interface/vcos/pthreads', PATH_VIDEOCORE + '/include/interface/vmcs_host/linux', '/usr/include/freetype2/', '/usr/include/opus', '/usr/include/SDL', '/usr/local/include', '/usr/local/include/freetype2/', '/usr/local/include/opus', '/usr/local/include/SDL']
library_directories = ['/lib/x86_64-linux-gnu', '/lib', PATH_VIDEOCORE + '/lib', '/usr/lib/arm-linux-gnueabihf', '/usr/lib/x86_64-linux-gnu', '/usr/lib', '/usr/local/lib']
opengl_reason = None
opengl_version = None
output_file = None
program_name = os.path.basename(sys.argv[0])
sdl_version = 2
parser = argparse.ArgumentParser(usage='%s [args] <source file(s)> [-o output]' % program_name, description='Size-optimized executable generator for *nix platforms.\nPreprocesses given source file(s) looking for specifically marked function calls, then generates a dynamic loader header file that can be used within these same source files to decrease executable size.\nOptionally also perform the actual compilation of a size-optimized binary after generating the header.', formatter_class=CustomHelpFormatter, add_help=False)
parser.add_argument('--32', dest='m32', action='store_true', help='Try to target 32-bit version of the architecture if on a 64-bit system.')
parser.add_argument('-a', '--abstraction-layer', choices=('sdl1', 'sdl2'), help='Specify abstraction layer to use instead of autodetecting.')
parser.add_argument('-A', '--assembler', default=None, help='Try to use given assembler executable as opposed to autodetect.')
parser.add_argument('-B', '--objcopy', default=None, help='Try to use given objcopy executable as opposed to autodetect.')
parser.add_argument('-C', '--compiler', default=None, help='Try to use given compiler executable as opposed to autodetect.')
parser.add_argument('-d', '--definition-ld', default='USE_LD', help="Definition to use for checking whether to use 'safe' mechanism instead of dynamic loading.\n(default: %(default)s)")
parser.add_argument('-D', '--define', default=[], action='append', help='Additional preprocessor definition.')
parser.add_argument('-e', '--elfling', action='store_true', help='Use elfling packer if available.')
parser.add_argument('-E', '--preprocess-only', action='store_true', help='Preprocess only, do not generate compiled output.')
parser.add_argument('-F', '--filedrop-mode', default='auto', choices=('header', 'native', 'cross', 'auto'), help='File dropping and interpreter calling mode.\n\theader:\n\t\tAdd explicit PT_INTERP header into the binary.\n\tnative:\n\t\tCall dynamic linker for the native platform.\n\tcross:\n\t\tCall dynamic linker assuming cross-platform emulation.\n\tauto:\n\t\tTry to autodetect and create the smallest binary to be ran on current machine.\n(default: %(default)s)')
parser.add_argument('-h', '--help', action='store_true', help='Print this help string and exit.')
parser.add_argument('-H', '--hash-function', default='auto', choices=('crc32', 'sdbm', 'auto'), help='Hash function to use for hashing function names:\n\tcrc32:\n\t\tCRC32 intrisic hash.\n\tsdbm:\n\t\tSDBM hash.\n\tauto:\n\t\tUse smallest implementation.\n(default: %(default)s)')
parser.add_argument('-I', '--include-directory', default=[], action='append', help='Add an include directory to be searched for header files.')
parser.add_argument('--interp', default=None, type=str, help='Use given interpreter as opposed to platform default.')
parser.add_argument('-k', '--linker', default=None, help='Try to use given linker executable as opposed to autodetect.')
parser.add_argument('-l', '--library', default=[], action='append', help='Add a library to be linked against.')
parser.add_argument('-L', '--library-directory', default=[], action='append', help='Add a library directory to be searched for libraries when linking.')
parser.add_argument('-m', '--method', default='maximum', choices=('vanilla', 'dlfcn', 'hash', 'maximum'), help="Method to use for decreasing output file size:\n\tvanilla:\n\t\tProduce binary normally, use no tricks except unpack header.\n\tdlfcn:\n\t\tUse dlopen/dlsym to decrease size without dependencies to any specific object format.\n\thash:\n\t\tUse knowledge of object file format to perform 'import by hash' loading, but do not break any specifications.\n\tmaximum:\n\t\tUse all available techniques to decrease output file size. Resulting file may violate object file specification.\n(default: %(default)s)")
parser.add_argument('--march', type=str, help='When compiling code, use given architecture as opposed to autodetect.')
parser.add_argument('--nice-exit', action='store_true', help='Do not use debugger trap, exit with proper system call.')
parser.add_argument('--nice-filedump', action='store_true', help='Do not use dirty tricks in compression header, also remove filedumped binary when done.')
parser.add_argument('--no-glesv2', action='store_true', help='Do not probe for OpenGL ES 2.0, always assume regular GL.')
parser.add_argument('--merge-headers', default='auto', choices=('yes', 'no', 'auto'), help='ELF header merging policy:\n\tno:\n\t\tHeaders concatenated sequentially.\n\tyes:\n\t\tTry to interleave headers to decrease file size.\n\tauto:\n\t\tUse interleaving if target platform allows.\n(default: %(default)s)')
parser.add_argument('--glsl-mode', default='full', choices=('none', 'nosquash', 'full'), help='GLSL crunching mode.\n\tnone:\n\t\tJust remove whitespace.\n\tnosquash:\n\t\tRefrain from squashing statements together, otherwise same as full.\n\tfull:\n\t\tTry to minimize file size by any means necessary.\n(default: %(default)s)')
parser.add_argument('--glsl-inlines', default=-1, type=int, help='Maximum number of inline operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--glsl-renames', default=-1, type=int, help='Maximum number of rename operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--glsl-simplifys', default=-1, type=int, help='Maximum number of simplify operations to do for GLSL.\n(default: unlimited)')
parser.add_argument('--linux', action='store_true', help="Try to target Linux if not in Linux. Equal to '-O linux'.")
parser.add_argument('-o', '--output-file', default=[], nargs='*', help='Name of output file to generate\nIf the name specified features a path, it will be used verbatim. Otherwise the binary will be created in the same path as source file(s) compiled.\nIf only processing GLSL files, this parameter can be specified multiple times, but must be specified exactly once per input GLSL file.')
parser.add_argument('-O', '--operating-system', help='Try to target given operating system insofar cross-compilation is possible.')
parser.add_argument('-P', '--call-prefix', default='dnload_', help='Call prefix to identify desired calls.\n(default: %(default)s)')
parser.add_argument('--preprocessor', default=None, help='Try to use given preprocessor executable as opposed to autodetect.')
parser.add_argument('--rand', default='bsd', choices=('bsd', 'gnu', 'auto'), help='rand() implementation to use.\n\tbsd: FreeBSD libc\n\tgnu: GNU glibc\n\tauto: Autodetect based on compiling platform.\n(default: %(default)s)')
parser.add_argument('--rpath', default=[], action='append', help='Extra rpath locations for linking.')
parser.add_argument('--symtab-mode', default='auto', choices=('auto', 'safe', 'unsafe'), help='Method for scouring DT_SYMTAB:\n\tsafe:\n\t\tMake less assumptions about header layout.\n\tunsafe:\n\t\tAssume optimal header layout to decrease code size.\n\tauto:\n\t\tTry to autodetect based on target platform.\n(default: %(default)s)')
parser.add_argument('-s', '--search-path', default=[], action='append', help='Directory to search for the header file to generate. May be specified multiple times. If not given, searches paths of source files to compile. If not given and no source files to compile, current path will be used.')
parser.add_argument('-S', '--strip-binary', default=None, help='Try to use given strip executable as opposed to autodetect.')
parser.add_argument('-t', '--target', default='dnload.h', help='Target header file to look for.\n(default: %(default)s)')
parser.add_argument('-T', '--temporary-directory', default=None, help='Directory to store temporary files in.\n(default: autodetect)')
parser.add_argument('-u', '--unpack-header', default=compression, choices=('lzma', 'xz'), help='Unpack header to use.\n(default: %(default)s)')
parser.add_argument('--verbatim', action='store_true', help='Perform select actions in a more verbatim manner:\n\t* Print GLSL to output without C header formatting.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print more info about what is being done.')
parser.add_argument('-V', '--version', action='store_true', help='Print version and exit.')
parser.add_argument('source', default=[], nargs='*', help='Source file(s) to preprocess and/or compile.')
args = parser.parse_args()
if args.help:
print(parser.format_help().strip())
return 0
if args.version:
print('%s %s' % (VERSION_REVISION, VERSION_DATE))
return 0
abstraction_layer = listify(args.abstraction_layer)
assembler = args.assembler
compiler = args.compiler
definition_ld = args.definition_ld
definitions += args.define
compilation_mode = args.method
compression = args.unpack_header
elfling = args.elfling
glsl_inlines = args.glsl_inlines
glsl_renames = args.glsl_renames
glsl_simplifys = args.glsl_simplifys
glsl_mode = args.glsl_mode
include_directories += args.include_directory
libraries = args.library
library_directories += args.library_directory
linker = args.linker
nice_filedump = args.nice_filedump
no_glesv2 = args.no_glesv2
objcopy = args.objcopy
output_file_list = args.output_file
preprocessor = args.preprocessor
rpath = args.rpath
strip = args.strip_binary
symbol_prefix = args.call_prefix
target = args.target
target_search_path = args.search_path
if args.verbose:
set_verbose(True)
if args.nice_exit:
definitions += ['DNLOAD_NO_DEBUGGER_TRAP']
if args.interp:
interp = args.interp
if not re.match('^\\".*\\"$', interp):
interp = '"%s"' % args.interp
replace_platform_variable('interp', interp)
if not args.source:
raise RuntimeError('no source files to process')
source_files = []
source_files_additional = []
source_files_glsl = []
for ii in args.source:
if re.match('.*\\.(c|cpp)$', ii, re.I):
source_files += [ii]
elif re.match('.*\\.(asm|s)$', ii, re.I):
source_files_additional += [ii]
elif re.match('.*\\.(glsl|vert|geom|frag)$', ii, re.I):
source_files_glsl += [ii]
else:
raise RuntimeError("unknown source file: '%s'" % ii)
if not target_search_path:
for ii in source_files:
(source_path, source_file) = os.path.split(os.path.normpath(ii))
if source_path:
if source_path not in target_search_path:
target_search_path += [source_path]
if source_path not in include_directories:
include_directories += [source_path]
if not target_search_path:
target_search_path = ['.']
if not set_temporary_directory(args.temporary_directory):
if args.temporary_directory:
print("WARNING: supplied temporary directory '%s' not usable, autodetecting" % args.temporary_directory)
regex_tmpdir = re.compile('(build|cmakefiles)', re.I)
found_tmpdir = locate(None, regex_tmpdir)
if not found_tmpdir:
if os.path.exists('/tmp'):
found_tmpdir = '/tmp'
if os.path.exists('/var/tmp'):
found_tmpdir = '/var/tmp'
found_tmpdir = None
if set_temporary_directory(found_tmpdir) and is_verbose():
print("Using temporary directory '%s/'." % found_tmpdir)
gles_reason = None
if not no_glesv2:
if os.path.exists(PATH_MALI):
extra_libraries += ['EGL']
definitions += ['DNLOAD_MALI']
gles_reason = "'%s' (Mali)" % PATH_MALI
if os.path.exists(PATH_VIDEOCORE):
definitions += ['DNLOAD_VIDEOCORE']
gles_reason = "'%s' (VideoCore)" % PATH_VIDEOCORE
if 'armv7l' == g_osarch:
replace_osarch('armv6l', 'Workaround (Raspberry Pi): ')
if gles_reason:
definitions += ['DNLOAD_GLESV2']
replace_platform_variable('gl_library', 'GLESv2')
if is_verbose():
print('Assuming OpenGL ES 2.0: %s' % gles_reason)
preprocessor_list = default_preprocessor_list
if os.name == 'nt':
preprocessor_list = ['cl.exe'] + preprocessor_list
preprocessor = Preprocessor(executable_find(preprocessor, preprocessor_list, 'preprocessor'))
preprocessor.set_definitions(definitions)
preprocessor.set_include_dirs(include_directories)
if source_files_glsl:
if source_files or source_files_additional:
raise RuntimeError('can not combine GLSL source files %s with other source files %s' % (str(source_files_glsl), str(source_files + source_files_additional)))
if output_file_list and len(output_file_list) != len(source_files_glsl):
raise RuntimeError("specified output files '%s' must match input glsl files '%s'" % (str(output_file_list), str(source_files_glsl)))
if output_file_list:
source_files_glsl = zip(source_files_glsl, output_file_list)
glsl_db = Glsl()
for ii in source_files_glsl:
if is_listing(ii):
if 3 == len(ii):
glsl_db.read(preprocessor, definition_ld, ii[0], ii[1], ii[2])
elif 2 == len(ii):
glsl_db.read(preprocessor, definition_ld, ii[0], ii[1])
else:
raise RuntimeError("invalid glsl file listing input: '%s'" % str(ii))
else:
glsl_db.read(preprocessor, definition_ld, ii)
glsl_db.parse()
glsl_db.crunch(glsl_mode, glsl_inlines, glsl_renames, glsl_simplifys)
glsl_db = glsl_db
if output_file_list:
glsl_db.write()
else:
print(glsl_db.generatePrintOutput(args.verbatim))
sys.exit(0)
elif output_file_list:
if len(output_file_list) > 1:
raise RuntimeError('more than one output file specified: %s' % str(output_file_list))
output_file = output_file_list[0]
if g_osarch in ('armv6l', 'armv7l'):
current_interp = str(PlatformVar('interp'))[1:-1]
if os.path.exists('/lib/ld-linux-armhf.so.3') and (not os.path.exists(current_interp)):
replace_osarch(g_osarch + 'hf', 'Workaround (armhf ABI): ')
if args.filedrop_mode == 'auto':
if osarch_is_arm32l():
args.filedrop_mode = 'header'
elif osarch_is_64_bit() and args.m32:
args.filedrop_mode = 'cross'
else:
args.filedrop_mode = 'native'
if is_verbose():
print("Autodetected filedrop mode: '%s'" % args.filedrop_mode)
if args.m32:
if osarch_is_32_bit():
print("WARNING: ignoring 32-bit compile, osarch '%s' already 32-bit" % g_osarch)
elif osarch_is_amd64():
replace_osarch('ia32', 'Cross-compile: ')
extra_assembler_flags = ['--32']
extra_compiler_flags = ['-m32']
if osname_is_freebsd():
extra_linker_flags = ['-melf_i386_fbsd']
else:
extra_linker_flags = ['-melf_i386']
else:
raise RuntimeError("cannot attempt 32-bit compile for osarch '%s'" % g_osarch)
if args.march:
if is_verbose:
print("Using explicit march: '%s'" % args.march)
replace_platform_variable('march', args.march)
if args.linux:
if args.operating_system:
print("WARNING: overriding cross operating system choice with 'linux'")
args.operating_system = 'linux'
if args.operating_system:
new_osname = platform_map(args.operating_system.lower())
replace_osname(new_osname, 'Cross-compile:')
elif osname_is_linux():
replace_platform_variable('ei_osabi', 0)
filedrop_interp = None
interp_needed = False
if args.filedrop_mode == 'cross':
try:
filedrop_interp = str(PlatformVar('interp-cross'))
except ValueError as ee:
filedrop_interp = str(PlatformVar('interp'))
elif args.filedrop_mode == 'native':
filedrop_interp = str(PlatformVar('interp'))
else:
if args.filedrop_mode != 'header':
raise RuntimeError("unknown filedrop mode: '%s'" % args.filedrop_mode)
interp_needed = True
if filedrop_interp:
filedrop_interp = filedrop_interp.strip('"')
if args.symtab_mode == 'auto':
if osname_is_freebsd():
args.symtab_mode = 'safe'
else:
args.symtab_mode = 'unsafe'
if is_verbose():
print("Autodetected symtab mode: '%s'" % args.symtab_mode)
if args.symtab_mode == 'safe':
definitions += ['DNLOAD_SAFE_SYMTAB_HANDLING']
if args.merge_headers == 'auto':
if osname_is_freebsd() and args.filedrop_mode == 'header':
args.merge_headers = 'no'
else:
args.merge_headers = 'yes'
if is_verbose():
print("Autodetected header merge mode: '%s'" % args.merge_headers)
if args.merge_headers == 'yes':
args.merge_headers = True
else:
args.merge_headers = False
if not args.rand or args.rand == 'auto':
if osname_is_linux():
args.rand = 'gnu'
else:
args.rand = 'bsd'
if compilation_mode not in ('vanilla', 'dlfcn', 'hash', 'maximum'):
raise RuntimeError("unknown method '%s'" % compilation_mode)
elif 'hash' == compilation_mode:
definitions += ['DNLOAD_NO_FIXED_R_DEBUG_ADDRESS']
if compilation_mode in ('hash', 'maximum'):
if args.hash_function == 'auto':
args.hash_function = 'sdbm'
if is_verbose():
print("Autodetected hash function: '%s'" % args.hash_function)
if args.hash_function == 'crc32':
if osarch_is_ia32() or osarch_is_amd64():
extra_compiler_flags += ['-msse4.2']
(target_path, target_file) = os.path.split(os.path.normpath(target))
if target_path:
if is_verbose():
print("Using explicit target header file '%s'." % target)
else:
target_file = locate(target_search_path, target)
if target_file:
target = os.path.normpath(target_file)
(target_path, target_file) = os.path.split(target)
if is_verbose():
print("Found header file: '%s'" % target)
else:
raise RuntimeError("no information where to put header file '%s' - not found in path(s) %s" % (target, str(target_search_path)))
if not os.path.exists(target):
if is_verbose():
print("Creating nonexistent file '%s'." % target)
fd = open(target, 'w')
fd.close()
elif not os.path.isfile(target):
raise RuntimeError("'%s' exists but is not a normal file" % target)
if not args.preprocess_only or 'dlfcn' == compilation_mode:
compiler_list = default_compiler_list
if os.name == 'nt':
compiler_list = ['cl.exe'] + compiler_list
compiler = Compiler(executable_find(compiler, compiler_list, 'compiler'))
compiler.set_definitions(definitions)
compiler.set_include_dirs(include_directories)
if extra_compiler_flags:
compiler.add_extra_compiler_flags(extra_compiler_flags)
library_directories = compiler.get_extra_library_directories() + library_directories
linker = Linker(executable_find(linker, default_linker_list, 'linker'))
if extra_linker_flags:
linker.addExtraFlags(extra_linker_flags)
linker.set_library_directories(library_directories)
fd = open(target, 'w')
fd.write('\n')
fd.close()
if is_verbose():
print('Analyzing source files: %s' % str(source_files))
for ii in source_files:
(src_path, src_basename) = os.path.split(ii)
if src_path:
src_path += '/'
fd = open(ii, 'r')
lines = fd.readlines()
fd.close()
filenames = []
glslre = re.compile('#\\s*include [\\<\\"](.*\\.glsl)\\.(h|hh|hpp|hxx)[\\>\\"]\\s*((\\/\\*|\\/\\/)\\s*([^\\*\\/\\s]+))?', re.I)
for ii in lines:
match = glslre.match(ii)
if match:
(glsl_path, glsl_base_filename) = os.path.split(match.group(1))
glsl_filename = locate(src_path + glsl_path, glsl_base_filename)
if not glsl_filename:
glsl_filename = locate(glsl_path, glsl_base_filename)
if not glsl_filename:
raise RuntimeError("could not locate GLSL source '%s'" % glsl_base_filename)
glsl_output_name = glsl_filename + '.' + match.group(2)
glsl_varname = match.group(5)
if glsl_varname:
filenames += [[glsl_filename, glsl_output_name, glsl_varname]]
else:
filenames += [[glsl_filename, glsl_output_name]]
if filenames:
glsl_db = generate_glsl(filenames, preprocessor, definition_ld, glsl_mode, glsl_inlines, glsl_renames, glsl_simplifys)
glsl_db.write()
symbols = set()
for ii in source_files:
source = preprocessor.preprocess(ii)
symbolre = re.compile('[\\s:;&\\|\\<\\>\\=\\^\\+\\-\\*/\\(\\)\\?]%s([a-zA-Z0-9_]+)(?=[\\s\\(])' % symbol_prefix)
results = symbolre.findall(source, re.MULTILINE)
ret = set()
for ii in results:
symbolset = set()
symbolset.add(ii)
ret = ret.union(symbolset)
source_symbols = ret
symbols = symbols.union(source_symbols)
ret = []
for ii in symbols:
ret += [find_symbol(ii)]
symbols = ret
if 'dlfcn' == compilation_mode:
symbols = sorted(symbols)
elif 'maximum' == compilation_mode:
sortable_symbols = []
for ii in symbols:
sortable_symbols += [(ii.get_hash(args.hash_function), ii)]
symbols = []
for ii in sorted(sortable_symbols):
symbols += [ii[1]]
src_found = symbols_has_library(symbols, 'SDL')
dst_found = symbols_has_library(symbols, 'SDL2')
if not (src_found and dst_found):
symbols = symbols
if is_verbose():
print("Resolving library conflict: '%s' => '%s'" % ('SDL', 'SDL2'))
ret = []
dst = find_library_definition('SDL2')
for ii in symbols:
if ii.get_library().get_name() == 'SDL':
replacement = dst.find_symbol(ii.get_name())
if replacement:
ret += [replacement]
else:
new_symbol = ii.create_replacement(dst)
dst.add_symbol(new_symbol)
ret += [new_symbol]
else:
ret += [ii]
symbols = ret
subst = {}
if symbols_has_library(symbols, 'c'):
subst['INCLUDE_C'] = g_template_include_c.format()
if symbols_has_library(symbols, 'fftw3'):
subst['INCLUDE_FFTW'] = g_template_include_fftw.format()
if symbols_has_library(symbols, 'freetype'):
subst['INCLUDE_FREETYPE'] = g_template_include_freetype.format()
if symbols_has_library(symbols, 'm'):
subst['INCLUDE_MATH'] = g_template_include_math.format()
if symbols_has_library(symbols, 'ncurses'):
subst['INCLUDE_NCURSES'] = g_template_include_ncurses.format()
if symbols_has_library(symbols, ('GL', 'GLESv2')):
subst['INCLUDE_OPENGL'] = g_template_include_opengl.format({'DEFINITION_LD': definition_ld})
if symbols_has_library(symbols, 'opus'):
subst['INCLUDE_OPUS'] = g_template_include_opus.format()
if symbols_has_library(symbols, 'opusfile'):
subst['INCLUDE_OPUSFILE'] = g_template_include_opusfile.format()
if symbols_has_library(symbols, 'png'):
subst['INCLUDE_PNG'] = g_template_include_png.format()
if symbols_has_library(symbols, ('SDL', 'SDL2')):
subst['INCLUDE_SDL'] = g_template_include_sdl.format()
if symbols_has_library(symbols, 'sndfile'):
subst['INCLUDE_SNDFILE'] = g_template_include_sndfile.format()
if symbols_has_symbol(symbols, 'rand'):
regex_rand_header = re.compile('%s[-_\\s]+rand\\.h(h|pp|xx)?' % args.rand)
regex_rand_source = re.compile('%s[-_\\s]+rand\\.c(c|pp|xx)?' % args.rand)
header_rand = locate(target_search_path, regex_rand_header)
source_rand = locate(target_search_path, regex_rand_source)
if not header_rand or not source_rand:
raise RuntimeError("could not find rand implementation for '%s'" % args.rand)
(header_rand_path, header_rand) = os.path.split(header_rand)
(source_rand_path, source_rand) = os.path.split(source_rand)
if is_verbose:
print("Using rand() implementation: '%s'" % header_rand)
replace_platform_variable('function_rand', '%s_rand' % args.rand)
replace_platform_variable('function_srand', '%s_srand' % args.rand)
rand_type_bsd = str(int(args.rand == 'bsd'))
rand_type_gnu = str(int(args.rand == 'gnu'))
subst['INCLUDE_RAND'] = g_template_include_rand.format({'DEFINITION_LD': definition_ld, 'RAND_TYPE_BSD': rand_type_bsd, 'RAND_TYPE_GNU': rand_type_gnu, 'HEADER_RAND': header_rand, 'SOURCE_RAND': source_rand})
real_symbols = list(filter(lambda x: not x.is_verbatim(), symbols))
if is_verbose():
symbol_strings = list(map(lambda x: str(x), symbols))
print('%i symbols found: %s' % (len(symbols), str(symbol_strings)))
verbatim_symbols = list(set(symbols) - set(real_symbols))
if verbatim_symbols and output_file:
verbatim_symbol_strings = list(map(lambda x: str(x), verbatim_symbols))
print('Not loading verbatim symbols: %s' % str(verbatim_symbol_strings))
symbol_definitions_direct = generate_symbol_definitions_direct(symbols, symbol_prefix)
subst['SYMBOL_DEFINITIONS_DIRECT'] = symbol_definitions_direct
if 'vanilla' == compilation_mode:
subst['SYMBOL_DEFINITIONS_TABLE'] = symbol_definitions_direct
else:
symbol_definitions_table = generate_symbol_definitions_table(symbols, symbol_prefix)
symbol_table = generate_symbol_table(compilation_mode, real_symbols, args.hash_function)
subst['SYMBOL_DEFINITIONS_TABLE'] = symbol_definitions_table
subst['SYMBOL_TABLE'] = symbol_table
if 'vanilla' == compilation_mode:
subst['LOADER'] = generate_loader_vanilla()
elif 'dlfcn' == compilation_mode:
subst['LOADER'] = generate_loader_dlfcn(real_symbols, linker)
else:
subst['LOADER'] = generate_loader_hash(real_symbols, args.hash_function)
if 'maximum' != compilation_mode:
subst['UND_SYMBOLS'] = g_template_und_symbols.format()
subst['DEFINITION_LD'] = definition_ld
subst['FILENAME'] = program_name
file_contents = g_template_header.format(subst)
fd = open(target, 'w')
fd.write(file_contents)
fd.close()
if is_verbose():
print("Wrote header file: '%s'" % target)
if args.preprocess_only:
sys.exit(0)
if 1 < len(source_files):
raise RuntimeError('only one source file supported when generating output file')
if elfling:
elfling = executable_search(['elfling-packer', './elfling-packer'], 'elfling-packer')
if elfling:
elfling = Elfling(elfling)
assembler = Assembler(executable_find(assembler, default_assembler_list, 'assembler'))
if extra_assembler_flags:
assembler.addExtraFlags(extra_assembler_flags)
if not abstraction_layer:
if symbols_has_library(symbols, 'SDL'):
abstraction_layer += ['sdl1']
if symbols_has_library(symbols, 'SDL2'):
abstraction_layer += ['sdl2']
if 1 < len(abstraction_layer):
raise RuntimeError('conflicting abstraction layers detected: %s' % str(abstraction_layer))
if 'sdl2' in abstraction_layer:
(sdl_stdout, sdl_stderr) = run_command(['sdl2-config', '--cflags'])
compiler.add_extra_compiler_flags(sdl_stdout.split())
elif 'sdl1' in abstraction_layer:
(sdl_stdout, sdl_stderr) = run_command(['sdl-config', '--cflags'])
compiler.add_extra_compiler_flags(sdl_stdout.split())
if output_file:
output_file = os.path.normpath(output_file)
else:
(output_path, output_basename) = os.path.split(source_files[0])
(output_basename, source_extension) = os.path.splitext(output_basename)
output_file = os.path.normpath(os.path.join(output_path, output_basename))
if is_verbose():
print("Using output file '%s' after source file '%s'." % (output_file, source_file))
source_file = source_files[0]
library_set = set()
for ii in real_symbols:
library_set = library_set.union(set([ii.get_library().get_name()]))
if not libraries:
if 'dlfcn' == compilation_mode:
raise RuntimeError("cannot autodetect libraries for compilation mode '%s'" % compilation_mode)
if extra_libraries:
if is_verbose():
print('Adding extra libraries due to platform: %s' % str(extra_libraries))
library_set = library_set.union(extra_libraries)
libraries = list(library_set)
output_message = 'Autodetected libraries to link against: '
else:
missing_libraries = library_set.difference(set(libraries))
if missing_libraries:
print('WARNING: found symbols suggest libraries: %s' % str(list(missing_libraries)))
output_message = 'Linking against libraries: '
problematic_libraries = ['gcc', 'c', 'm', 'bcm_host']
front = []
for ii in problematic_libraries:
if ii in libraries:
libraries.remove(ii)
front += [ii]
if 'maximum' == compilation_mode:
ret = list(map(lambda x: collect_libraries_rename(x), front + sorted(libraries)))
else:
ret = front + sorted(libraries)
if is_verbose():
print('%s%s' % (output_message, str(ret)))
libraries = ret
compiler.generate_compiler_flags()
compiler.generate_linker_flags()
compiler.set_libraries(libraries)
compiler.set_library_directories(library_directories)
compiler.set_rpath_directories(rpath)
linker.generate_linker_flags()
linker.set_libraries(libraries)
linker.set_rpath_directories(rpath)
if 'maximum' == compilation_mode:
objcopy = executable_find(objcopy, default_objcopy_list, 'objcopy')
output_file_s = generate_temporary_filename(output_file + '.S')
if source_file:
compiler.compile_asm(source_file, output_file_s, True)
segment_ehdr = AssemblerSegment(g_assembler_ehdr)
segment_dynamic = AssemblerSegment(g_assembler_dynamic)
segment_hash = AssemblerSegment(g_assembler_hash)
segment_interp = AssemblerSegment(g_assembler_interp)
segment_strtab = AssemblerSegment(g_assembler_strtab)
segment_symtab = AssemblerSegment(g_assembler_symtab)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr64_interp)
else:
raise_unknown_address_size()
und_symbols = get_platform_und_symbols()
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab('symtab')
segment_dynamic.add_dt_hash('hash')
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
for ii in reversed(libraries):
for jj in listify(linker.get_library_name(ii)):
segment_dynamic.add_dt_needed(jj)
segment_strtab.add_strtab(jj)
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_final_o = generate_temporary_filename(output_file + '.final.o')
if elfling:
asm = generate_elfling(output_file, compiler, elfling, definition_ld)
else:
asm = AssemblerFile(output_file_s)
if source_files_additional:
for ii in range(len(source_files_additional)):
fname = source_files_additional[ii]
additional_asm = AssemblerFile(fname)
asm.incorporate(additional_asm)
if asm.write(output_file_final_s, assembler):
assembler.assemble(output_file_final_s, output_file_final_o)
extra_symbols = readelf_list_und_symbols(output_file_final_o)
output_file_extra = generate_temporary_filename(output_file + '.extra')
additional_file = g_symbol_sources.compile_asm(compiler, assembler, extra_symbols, output_file_extra)
if additional_file:
additional_asm = AssemblerFile(additional_file)
asm.incorporate(additional_asm, re.sub('[\\/\\.]', '_', output_file + '_extra'))
asm.sort_sections(assembler)
asm.crunch()
phdr_count = 2
segment_phdr_load_bss = None
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
if osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
phdr_count += 1
elif osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_single)
else:
raise_unknown_address_size()
segments_head = [segment_ehdr, segment_phdr_load]
segments_mid = []
if segment_phdr_load_bss:
segments_mid += segment_phdr_load_bss
if interp_needed:
phdr_count += 1
segments_mid += [segment_phdr_interp]
segments_tail = [segment_phdr_dynamic, segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
if is_listing(und_symbols):
segments_tail += [segment_hash]
if interp_needed:
segments_tail += [segment_interp]
segments_tail += [segment_strtab]
if args.merge_headers:
replace_platform_variable('phdr_count', phdr_count)
replace_platform_variable('e_shentsize', 1)
if osarch_is_64_bit():
replace_platform_variable('phdr64_dynamic_p_align', 21)
replace_platform_variable('e_shstrndx', 7)
else:
replace_platform_variable('phdr32_dynamic_p_flags', 21)
segments = merge_segments(segments_head) + segments_mid + merge_segments(segments_tail)
else:
segments = segments_head + segments_mid + segments_tail
if asm.hasSectionAlignment():
asm.getSectionAlignment().create_content(assembler)
bss_section.create_content(assembler, 'end')
fd = open(output_file_final_s, 'w')
header_sizes = 0
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print('Size of headers: %i bytes' % header_sizes)
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source: '%s'" % output_file_final_s)
assembler.assemble(output_file_final_s, output_file_final_o)
link_files = [output_file_final_o]
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
linker.generate_linker_script(output_file_ld, True)
linker.set_linker_script(output_file_ld)
if not osarch_is_aarch64() and (not osarch_is_arm32l()):
objcopy = None
linker.link_binary(objcopy, link_files, output_file_unprocessed)
if bss_section.get_alignment():
readelf_zero(output_file_unprocessed, output_file_stripped)
else:
readelf_truncate(output_file_unprocessed, output_file_stripped)
if elfling:
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
output_file_extracted = generate_temporary_filename(output_file + '.extracted')
elfling.compress(output_file_stripped, output_file_extracted)
output_file_s = generate_temporary_filename(output_file + '.S')
if None:
compiler.compile_asm(None, output_file_s, True)
segment_ehdr = AssemblerSegment(g_assembler_ehdr)
segment_dynamic = AssemblerSegment(g_assembler_dynamic)
segment_hash = AssemblerSegment(g_assembler_hash)
segment_interp = AssemblerSegment(g_assembler_interp)
segment_strtab = AssemblerSegment(g_assembler_strtab)
segment_symtab = AssemblerSegment(g_assembler_symtab)
if osarch_is_32_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr32_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr32_interp)
elif osarch_is_64_bit():
segment_phdr_dynamic = AssemblerSegment(g_assembler_phdr64_dynamic)
segment_phdr_interp = AssemblerSegment(g_assembler_phdr64_interp)
else:
raise_unknown_address_size()
und_symbols = get_platform_und_symbols()
if is_listing(und_symbols):
segment_symtab.add_symbol_empty()
for ii in und_symbols:
segment_symtab.add_symbol_und(ii)
for ii in reversed(und_symbols):
segment_strtab.add_strtab(ii)
segment_dynamic.add_dt_symtab('symtab')
segment_dynamic.add_dt_hash('hash')
segment_hash.add_hash(und_symbols)
else:
segment_dynamic.add_dt_symtab(0)
for ii in reversed(libraries):
for jj in listify(linker.get_library_name(ii)):
segment_dynamic.add_dt_needed(jj)
segment_strtab.add_strtab(jj)
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_final_o = generate_temporary_filename(output_file + '.final.o')
if elfling:
asm = generate_elfling(output_file, compiler, elfling, definition_ld)
else:
asm = AssemblerFile(output_file_s)
if source_files_additional:
for ii in range(len(source_files_additional)):
fname = source_files_additional[ii]
additional_asm = AssemblerFile(fname)
asm.incorporate(additional_asm)
if asm.write(output_file_final_s, assembler):
assembler.assemble(output_file_final_s, output_file_final_o)
extra_symbols = readelf_list_und_symbols(output_file_final_o)
output_file_extra = generate_temporary_filename(output_file + '.extra')
additional_file = g_symbol_sources.compile_asm(compiler, assembler, extra_symbols, output_file_extra)
if additional_file:
additional_asm = AssemblerFile(additional_file)
asm.incorporate(additional_asm, re.sub('[\\/\\.]', '_', output_file + '_extra'))
asm.sort_sections(assembler)
asm.crunch()
phdr_count = 2
segment_phdr_load_bss = None
bss_section = asm.generate_fake_bss(assembler, und_symbols, elfling)
if 0 < bss_section.get_alignment():
if osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr32_load_bss)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_double)
segment_phdr_load_bss = AssemblerSegment(g_assembler_phdr64_load_bss)
else:
raise_unknown_address_size()
phdr_count += 1
elif osarch_is_32_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr32_load_single)
elif osarch_is_64_bit():
segment_phdr_load = AssemblerSegment(g_assembler_phdr64_load_single)
else:
raise_unknown_address_size()
segments_head = [segment_ehdr, segment_phdr_load]
segments_mid = []
if segment_phdr_load_bss:
segments_mid += segment_phdr_load_bss
if interp_needed:
phdr_count += 1
segments_mid += [segment_phdr_interp]
segments_tail = [segment_phdr_dynamic, segment_dynamic]
if is_listing(und_symbols):
segments_tail += [segment_symtab]
if is_listing(und_symbols):
segments_tail += [segment_hash]
if interp_needed:
segments_tail += [segment_interp]
segments_tail += [segment_strtab]
if args.merge_headers:
replace_platform_variable('phdr_count', phdr_count)
replace_platform_variable('e_shentsize', 1)
if osarch_is_64_bit():
replace_platform_variable('phdr64_dynamic_p_align', 21)
replace_platform_variable('e_shstrndx', 7)
else:
replace_platform_variable('phdr32_dynamic_p_flags', 21)
segments = merge_segments(segments_head) + segments_mid + merge_segments(segments_tail)
else:
segments = segments_head + segments_mid + segments_tail
if asm.hasSectionAlignment():
asm.getSectionAlignment().create_content(assembler)
bss_section.create_content(assembler, 'end')
fd = open(output_file_final_s, 'w')
header_sizes = 0
for ii in segments:
ii.write(fd, assembler)
header_sizes += ii.size()
if is_verbose():
print('Size of headers: %i bytes' % header_sizes)
asm.write(fd, assembler)
fd.close()
if is_verbose():
print("Wrote assembler source: '%s'" % output_file_final_s)
assembler.assemble(output_file_final_s, output_file_final_o)
link_files = [output_file_final_o]
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
linker.generate_linker_script(output_file_ld, True)
linker.set_linker_script(output_file_ld)
if not osarch_is_aarch64() and (not osarch_is_arm32l()):
objcopy = None
linker.link_binary(objcopy, link_files, output_file_unprocessed)
if bss_section.get_alignment():
readelf_zero(output_file_unprocessed, output_file_stripped)
else:
readelf_truncate(output_file_unprocessed, output_file_stripped)
elif 'hash' == compilation_mode:
output_file_s = generate_temporary_filename(output_file + '.S')
output_file_final_s = generate_temporary_filename(output_file + '.final.S')
output_file_o = generate_temporary_filename(output_file + '.o')
output_file_ld = generate_temporary_filename(output_file + '.ld')
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
compiler.compile_asm(source_file, output_file_s)
asm = AssemblerFile(output_file_s)
asm.write(output_file_final_s, assembler)
assembler.assemble(output_file_final_s, output_file_o)
linker.generate_linker_script(output_file_ld)
linker.set_linker_script(output_file_ld)
linker.link(output_file_o, output_file_unprocessed)
elif 'dlfcn' == compilation_mode or 'vanilla' == compilation_mode:
output_file_unprocessed = generate_temporary_filename(output_file + '.unprocessed')
compiler.compile_and_link(source_file, output_file_unprocessed)
else:
raise RuntimeError('unknown compilation mode: %s' % str(compilation_mode))
output_file_stripped = generate_temporary_filename(output_file + '.stripped')
if compilation_mode in ('vanilla', 'dlfcn', 'hash'):
strip = executable_find(strip, default_strip_list, 'strip')
shutil.copy(output_file_unprocessed, output_file_stripped)
run_command([strip, '-K', '.bss', '-K', '.text', '-K', '.data', '-R', '.comment', '-R', '.eh_frame', '-R', '.eh_frame_hdr', '-R', '.fini', '-R', '.gnu.hash', '-R', '.gnu.version', '-R', '.jcr', '-R', '.note', '-R', '.note.ABI-tag', '-R', '.note.tag', output_file_stripped])
str_header = PlatformVar('shelldrop_header').get()
str_tail = PlatformVar('shelldrop_tail').get()
str_chmod = ''
str_ld = ''
str_cleanup = ';exit'
if filedrop_interp:
str_ld = filedrop_interp + ' '
if not filedrop_interp or osname_is_freebsd():
str_chmod = ';chmod +x $I'
if nice_filedump:
str_tail = 'tail -n+2'
str_cleanup = ';rm ~;exit'
if 'lzma' == compression:
command = ['xz', '--format=lzma', '--lzma1=preset=9,lc=1,lp=0,nice=273,pb=0', '--stdout']
str_cat = 'lzcat'
elif 'raw' == compression:
command = ['xz', '-9', '--extreme', '--format=raw', '--stdout']
str_cat = 'xzcat -F raw'
elif 'xz' == compression:
command = ['xz', '--format=xz', '--lzma2=preset=9,lc=1,nice=273,pb=0', '--stdout']
str_cat = 'xzcat'
else:
raise RuntimeError("unknown compression format '%s'" % compression)
header = '%sI=/tmp/i;%s $0|%s>$I%s;%s$I%s' % (str_header, str_tail, str_cat, str_chmod, str_ld, str_cleanup)
(compressed, se) = run_command(command + [output_file_stripped], False)
wfd = open(output_file, 'wb')
wfd.write((header + '\n').encode())
wfd.write(compressed)
wfd.close()
make_executable(output_file)
print("Wrote '%s': %i bytes" % (output_file, os.path.getsize(output_file)))
return 0
|
dnload
|
positive
|
def requires(self):
<DeepExtract>
if not os.path.exists(pipeline_args.hal):
raise InputMissingException('HAL file not found at {}.'.format(pipeline_args.hal))
for d in [pipeline_args.out_dir, pipeline_args.work_dir]:
if not os.path.exists(d):
if not tools.fileOps.dir_is_writeable(os.path.dirname(d)):
raise UserException('Cannot create directory {}.'.format(d))
elif not tools.fileOps.dir_is_writeable(d):
raise UserException('Directory {} is not writeable.'.format(d))
if not os.path.exists(pipeline_args.annotation):
raise InputMissingException('Annotation file {} not found.'.format(pipeline_args.annotation))
if pipeline_args.ref_genome not in pipeline_args.hal_genomes:
raise InvalidInputException('Reference genome {} not present in HAL.'.format(pipeline_args.ref_genome))
missing_genomes = {g for g in pipeline_args.target_genomes if g not in pipeline_args.hal_genomes}
if len(missing_genomes) > 0:
missing_genomes = ','.join(missing_genomes)
raise InvalidInputException('Target genomes {} not present in HAL.'.format(missing_genomes))
if pipeline_args.ref_genome in pipeline_args.target_genomes:
raise InvalidInputException('A target genome cannot be the reference genome.')
</DeepExtract>
<DeepExtract>
args = tools.misc.PipelineNamespace()
args.set('binary_mode', self.binary_mode, False)
args.set('hal', os.path.abspath(self.hal), True)
args.set('ref_genome', self.ref_genome, True)
args.set('out_dir', os.path.abspath(self.out_dir), True)
args.set('work_dir', os.path.abspath(self.work_dir), True)
args.set('augustus', self.augustus, True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('augustus_pb', self.augustus_pb, True)
args.set('augustus_species', self.augustus_species, True)
args.set('tm_cfg', os.path.abspath(self.tm_cfg), True)
args.set('tmr_cfg', os.path.abspath(self.tmr_cfg), True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('maf_chunksize', self.maf_chunksize, True)
args.set('maf_overlap', self.maf_overlap, True)
args.set('pb_genome_chunksize', self.pb_genome_chunksize, True)
args.set('pb_genome_overlap', self.pb_genome_overlap, True)
args.set('pb_cfg', os.path.abspath(self.pb_cfg), True)
args.set('augustus_cgp_cfg_template', os.path.abspath(self.augustus_cgp_cfg_template), True)
args.set('augustus_utr_off', self.augustus_utr_off, True)
if self.cgp_param is not None:
args.set('cgp_param', os.path.abspath(self.cgp_param), True)
else:
args.set('cgp_param', None, True)
args.set('cgp_train_num_exons', self.cgp_train_num_exons, True)
args.set('hgm_cpu', self.hgm_cpu, False)
args.set('global_near_best', self.global_near_best, True)
args.set('filter_overlapping_genes', self.filter_overlapping_genes, True)
args.set('overlapping_ignore_bases', self.overlapping_ignore_bases, True)
args.set('intron_rnaseq_support', self.intron_rnaseq_support, False)
args.set('exon_rnaseq_support', self.exon_rnaseq_support, False)
args.set('intron_annot_support', self.intron_annot_support, False)
args.set('exon_annot_support', self.exon_annot_support, False)
args.set('original_intron_support', self.original_intron_support, False)
args.set('denovo_num_introns', self.denovo_num_introns, False)
args.set('denovo_splice_support', self.denovo_splice_support, False)
args.set('denovo_exon_support', self.denovo_exon_support, False)
args.set('denovo_ignore_novel_genes', self.denovo_ignore_novel_genes, False)
args.set('denovo_only_novel_genes', self.denovo_only_novel_genes, False)
args.set('denovo_allow_novel_ends', self.denovo_allow_novel_ends, False)
args.set('denovo_novel_end_distance', self.denovo_novel_end_distance, False)
args.set('denovo_allow_unsupported', self.denovo_allow_unsupported, False)
args.set('denovo_allow_bad_annot_or_tm', self.denovo_allow_bad_annot_or_tm, False)
args.set('require_pacbio_support', self.require_pacbio_support, False)
args.set('in_species_rna_support_only', self.in_species_rna_support_only, False)
args.set('rebuild_consensus', self.rebuild_consensus, False)
args.set('stats_db', os.path.join(args.out_dir, 'databases', 'timing_stats.db'), False)
args.set('assembly_hub', self.assembly_hub, False)
args.set('hub_email', self.hub_email, False)
args.set('annotate_ancestors', self.annotate_ancestors, True)
if not tools.misc.is_exec('halStats'):
raise ToolMissingException('halStats from the HAL tools package not in global path')
args.set('hal_genomes', tools.hal.extract_genomes(args.hal, self.annotate_ancestors), True)
target_genomes = tools.hal.extract_genomes(args.hal, self.annotate_ancestors, self.target_genomes)
target_genomes = tuple((x for x in target_genomes if x != self.ref_genome))
args.set('target_genomes', target_genomes, True)
args.set('cfg', self.parse_cfg(), True)
args.set('dbs', PipelineTask.get_databases(args), True)
args.set('annotation', args.cfg['ANNOTATION'][args.ref_genome], True)
args.set('hints_db', os.path.join(args.work_dir, 'hints_database', 'hints.db'), True)
args.set('rnaseq_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) | set(args.cfg['BAM'].keys())), True)
args.set('intron_only_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) - set(args.cfg['BAM'].keys())), True)
args.set('isoseq_genomes', frozenset(list(args.cfg['ISO_SEQ_BAM'].keys())), True)
args.set('annotation_genomes', frozenset(list(args.cfg['ANNOTATION'].keys())), True)
args.set('external_ref_genomes', args.annotation_genomes - {args.ref_genome}, True)
args.set('modes', self.get_modes(args), True)
args.set('augustus_tmr', True if 'augTMR' in args.modes else False, True)
if self.__class__.__name__ in ['RunCat', 'Augustus', 'AugustusCgp', 'AugustusPb']:
self.validate_cfg(args)
pipeline_args = args
</DeepExtract>
for target_genome in pipeline_args.target_genomes:
yield self.clone(AugustusDriverTask, genome=target_genome)
|
def requires(self):
if not os.path.exists(pipeline_args.hal):
raise InputMissingException('HAL file not found at {}.'.format(pipeline_args.hal))
for d in [pipeline_args.out_dir, pipeline_args.work_dir]:
if not os.path.exists(d):
if not tools.fileOps.dir_is_writeable(os.path.dirname(d)):
raise UserException('Cannot create directory {}.'.format(d))
elif not tools.fileOps.dir_is_writeable(d):
raise UserException('Directory {} is not writeable.'.format(d))
if not os.path.exists(pipeline_args.annotation):
raise InputMissingException('Annotation file {} not found.'.format(pipeline_args.annotation))
if pipeline_args.ref_genome not in pipeline_args.hal_genomes:
raise InvalidInputException('Reference genome {} not present in HAL.'.format(pipeline_args.ref_genome))
missing_genomes = {g for g in pipeline_args.target_genomes if g not in pipeline_args.hal_genomes}
if len(missing_genomes) > 0:
missing_genomes = ','.join(missing_genomes)
raise InvalidInputException('Target genomes {} not present in HAL.'.format(missing_genomes))
if pipeline_args.ref_genome in pipeline_args.target_genomes:
raise InvalidInputException('A target genome cannot be the reference genome.')
args = tools.misc.PipelineNamespace()
args.set('binary_mode', self.binary_mode, False)
args.set('hal', os.path.abspath(self.hal), True)
args.set('ref_genome', self.ref_genome, True)
args.set('out_dir', os.path.abspath(self.out_dir), True)
args.set('work_dir', os.path.abspath(self.work_dir), True)
args.set('augustus', self.augustus, True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('augustus_pb', self.augustus_pb, True)
args.set('augustus_species', self.augustus_species, True)
args.set('tm_cfg', os.path.abspath(self.tm_cfg), True)
args.set('tmr_cfg', os.path.abspath(self.tmr_cfg), True)
args.set('augustus_cgp', self.augustus_cgp, True)
args.set('maf_chunksize', self.maf_chunksize, True)
args.set('maf_overlap', self.maf_overlap, True)
args.set('pb_genome_chunksize', self.pb_genome_chunksize, True)
args.set('pb_genome_overlap', self.pb_genome_overlap, True)
args.set('pb_cfg', os.path.abspath(self.pb_cfg), True)
args.set('augustus_cgp_cfg_template', os.path.abspath(self.augustus_cgp_cfg_template), True)
args.set('augustus_utr_off', self.augustus_utr_off, True)
if self.cgp_param is not None:
args.set('cgp_param', os.path.abspath(self.cgp_param), True)
else:
args.set('cgp_param', None, True)
args.set('cgp_train_num_exons', self.cgp_train_num_exons, True)
args.set('hgm_cpu', self.hgm_cpu, False)
args.set('global_near_best', self.global_near_best, True)
args.set('filter_overlapping_genes', self.filter_overlapping_genes, True)
args.set('overlapping_ignore_bases', self.overlapping_ignore_bases, True)
args.set('intron_rnaseq_support', self.intron_rnaseq_support, False)
args.set('exon_rnaseq_support', self.exon_rnaseq_support, False)
args.set('intron_annot_support', self.intron_annot_support, False)
args.set('exon_annot_support', self.exon_annot_support, False)
args.set('original_intron_support', self.original_intron_support, False)
args.set('denovo_num_introns', self.denovo_num_introns, False)
args.set('denovo_splice_support', self.denovo_splice_support, False)
args.set('denovo_exon_support', self.denovo_exon_support, False)
args.set('denovo_ignore_novel_genes', self.denovo_ignore_novel_genes, False)
args.set('denovo_only_novel_genes', self.denovo_only_novel_genes, False)
args.set('denovo_allow_novel_ends', self.denovo_allow_novel_ends, False)
args.set('denovo_novel_end_distance', self.denovo_novel_end_distance, False)
args.set('denovo_allow_unsupported', self.denovo_allow_unsupported, False)
args.set('denovo_allow_bad_annot_or_tm', self.denovo_allow_bad_annot_or_tm, False)
args.set('require_pacbio_support', self.require_pacbio_support, False)
args.set('in_species_rna_support_only', self.in_species_rna_support_only, False)
args.set('rebuild_consensus', self.rebuild_consensus, False)
args.set('stats_db', os.path.join(args.out_dir, 'databases', 'timing_stats.db'), False)
args.set('assembly_hub', self.assembly_hub, False)
args.set('hub_email', self.hub_email, False)
args.set('annotate_ancestors', self.annotate_ancestors, True)
if not tools.misc.is_exec('halStats'):
raise ToolMissingException('halStats from the HAL tools package not in global path')
args.set('hal_genomes', tools.hal.extract_genomes(args.hal, self.annotate_ancestors), True)
target_genomes = tools.hal.extract_genomes(args.hal, self.annotate_ancestors, self.target_genomes)
target_genomes = tuple((x for x in target_genomes if x != self.ref_genome))
args.set('target_genomes', target_genomes, True)
args.set('cfg', self.parse_cfg(), True)
args.set('dbs', PipelineTask.get_databases(args), True)
args.set('annotation', args.cfg['ANNOTATION'][args.ref_genome], True)
args.set('hints_db', os.path.join(args.work_dir, 'hints_database', 'hints.db'), True)
args.set('rnaseq_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) | set(args.cfg['BAM'].keys())), True)
args.set('intron_only_genomes', frozenset(set(args.cfg['INTRONBAM'].keys()) - set(args.cfg['BAM'].keys())), True)
args.set('isoseq_genomes', frozenset(list(args.cfg['ISO_SEQ_BAM'].keys())), True)
args.set('annotation_genomes', frozenset(list(args.cfg['ANNOTATION'].keys())), True)
args.set('external_ref_genomes', args.annotation_genomes - {args.ref_genome}, True)
args.set('modes', self.get_modes(args), True)
args.set('augustus_tmr', True if 'augTMR' in args.modes else False, True)
if self.__class__.__name__ in ['RunCat', 'Augustus', 'AugustusCgp', 'AugustusPb']:
self.validate_cfg(args)
pipeline_args = args
for target_genome in pipeline_args.target_genomes:
yield self.clone(AugustusDriverTask, genome=target_genome)
|
Comparative-Annotation-Toolkit
|
positive
|
def get_ip_by_container(self, container_id):
"""Copied from calicoctl, must use endpoint to get IPs bound to container_id"""
<DeepExtract>
from eru.models.container import Container
container = Container.get_by_container_id(container_id)
hostname = container.host.name
endpoints = _ipam.get_endpoints(hostname=hostname, orchestrator_id='docker', workload_id=container.container_id)
ip_list = [IPAddress(i) for endpoint in endpoints for i in endpoint.ipv4_nets]
</DeepExtract>
pools = [_ipam.get_pool(ip) for ip in ip_list]
return [WrappedIP.from_calico(ip, pool, container_id) for (ip, pool) in zip(ip_list, pools)]
|
def get_ip_by_container(self, container_id):
"""Copied from calicoctl, must use endpoint to get IPs bound to container_id"""
from eru.models.container import Container
container = Container.get_by_container_id(container_id)
hostname = container.host.name
endpoints = _ipam.get_endpoints(hostname=hostname, orchestrator_id='docker', workload_id=container.container_id)
ip_list = [IPAddress(i) for endpoint in endpoints for i in endpoint.ipv4_nets]
pools = [_ipam.get_pool(ip) for ip in ip_list]
return [WrappedIP.from_calico(ip, pool, container_id) for (ip, pool) in zip(ip_list, pools)]
|
eru-core
|
positive
|
def acquire(self, timeout=0, target=None):
<DeepExtract>
old_key = self.client.getset(self.check_exists_key, self.exists_val)
if old_key:
return False
return self._init()
</DeepExtract>
if self.stale_client_timeout is not None:
<DeepExtract>
token = self.client.getset(self.check_release_locks_key, self.exists_val)
if token:
return False
self.client.expire(self.check_release_locks_key, expires)
try:
for (token, looked_at) in self.client.hgetall(self.grabbed_key).items():
timed_out_at = float(looked_at) + self.stale_client_timeout
if timed_out_at < self.current_time:
self.signal(token)
finally:
self.client.delete(self.check_release_locks_key)
</DeepExtract>
if self.blocking:
pair = self.client.blpop(self.available_key, timeout)
if pair is None:
raise NotAvailable
token = pair[1]
else:
token = self.client.lpop(self.available_key)
if token is None:
raise NotAvailable
self._local_tokens.append(token)
self.client.hset(self.grabbed_key, token, self.current_time)
if target is not None:
try:
target(token)
finally:
<DeepExtract>
if token is None:
return None
with self.client.pipeline() as pipe:
pipe.multi()
pipe.hdel(self.grabbed_key, token)
pipe.lpush(self.available_key, token)
pipe.execute()
return token
</DeepExtract>
return token
|
def acquire(self, timeout=0, target=None):
old_key = self.client.getset(self.check_exists_key, self.exists_val)
if old_key:
return False
return self._init()
if self.stale_client_timeout is not None:
token = self.client.getset(self.check_release_locks_key, self.exists_val)
if token:
return False
self.client.expire(self.check_release_locks_key, expires)
try:
for (token, looked_at) in self.client.hgetall(self.grabbed_key).items():
timed_out_at = float(looked_at) + self.stale_client_timeout
if timed_out_at < self.current_time:
self.signal(token)
finally:
self.client.delete(self.check_release_locks_key)
if self.blocking:
pair = self.client.blpop(self.available_key, timeout)
if pair is None:
raise NotAvailable
token = pair[1]
else:
token = self.client.lpop(self.available_key)
if token is None:
raise NotAvailable
self._local_tokens.append(token)
self.client.hset(self.grabbed_key, token, self.current_time)
if target is not None:
try:
target(token)
finally:
if token is None:
return None
with self.client.pipeline() as pipe:
pipe.multi()
pipe.hdel(self.grabbed_key, token)
pipe.lpush(self.available_key, token)
pipe.execute()
return token
return token
|
eoj3
|
positive
|
@pytest.mark.test_version_older_no_envar
@pytest.mark.parametrize('version', ['0.01.0'])
def test_c2r_latest_older_inhibit(convert2rhel, c2r_version, version):
"""
Check if running older version inhibits the conversion.
"""
<DeepExtract>
path_to_version = subprocess.check_output(['find', '/usr/lib/', '-path', '*/convert2rhel/__init__.py', '-printf', '%p']).decode('utf-8')
with open(path_to_version, 'r') as version_file:
old_version_content = version_file.read()
def _update_c2r_version(version):
"""
Modify the Convert2RHEL version value in the __init__.py file.
We want to simulate the running version is older/newer than in the repositories.
"""
with open(path_to_version, 'w') as version_file:
version_pattern = '__version__ = "(\\d+\\.\\d+\\.\\d+)"'
updated_version_content = re.sub(version_pattern, '__version__ = "{}"'.format(version), old_version_content)
version_file.write(updated_version_content)
yield _update_c2r_version
def _restore_c2r_version():
with open(path_to_version, 'w') as version_file:
version_file.write(old_version_content)
_restore_c2r_version()
</DeepExtract>
with convert2rhel('--no-rpm-va --debug') as c2r:
c2r.expect('Continue with the system conversion?')
c2r.sendline('y')
assert c2r.expect('CRITICAL - You are currently running 0.01', timeout=300) == 0
assert c2r.expect('Only the latest version is supported for conversion.', timeout=300) == 0
assert c2r.exitstatus != 0
|
@pytest.mark.test_version_older_no_envar
@pytest.mark.parametrize('version', ['0.01.0'])
def test_c2r_latest_older_inhibit(convert2rhel, c2r_version, version):
"""
Check if running older version inhibits the conversion.
"""
path_to_version = subprocess.check_output(['find', '/usr/lib/', '-path', '*/convert2rhel/__init__.py', '-printf', '%p']).decode('utf-8')
with open(path_to_version, 'r') as version_file:
old_version_content = version_file.read()
def _update_c2r_version(version):
"""
Modify the Convert2RHEL version value in the __init__.py file.
We want to simulate the running version is older/newer than in the repositories.
"""
with open(path_to_version, 'w') as version_file:
version_pattern = '__version__ = "(\\d+\\.\\d+\\.\\d+)"'
updated_version_content = re.sub(version_pattern, '__version__ = "{}"'.format(version), old_version_content)
version_file.write(updated_version_content)
yield _update_c2r_version
def _restore_c2r_version():
with open(path_to_version, 'w') as version_file:
version_file.write(old_version_content)
_restore_c2r_version()
with convert2rhel('--no-rpm-va --debug') as c2r:
c2r.expect('Continue with the system conversion?')
c2r.sendline('y')
assert c2r.expect('CRITICAL - You are currently running 0.01', timeout=300) == 0
assert c2r.expect('Only the latest version is supported for conversion.', timeout=300) == 0
assert c2r.exitstatus != 0
|
convert2rhel
|
positive
|
def get_serializer_formats():
if not _serializers:
<DeepExtract>
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, 'DOCKIT_SERIALIZATION_MODULES'):
for format in settings.DOCKIT_SERIALIZATION_MODULES:
register_serializer(format, settings.DOCKIT_SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
</DeepExtract>
return _serializers.keys()
|
def get_serializer_formats():
if not _serializers:
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, 'DOCKIT_SERIALIZATION_MODULES'):
for format in settings.DOCKIT_SERIALIZATION_MODULES:
register_serializer(format, settings.DOCKIT_SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
return _serializers.keys()
|
django-dockit
|
positive
|
def get_kfold_partition(x, y, start, end):
<DeepExtract>
assert x[:start]._shape[1] == x[end:]._shape[1], 'The arrays must have the same\n number of columns.'
assert x[:start]._sparse == x[end:]._sparse, 'A sparse and a dense array cannot\n be merged.'
assert x[:start]._reg_shape == x[end:]._reg_shape, 'The array regular blocks must\n have the same shape.'
len_s1 = x[:start].shape[0]
len_s2 = x[end:].shape[0]
if len_s1 == 0:
train_x = x[end:]
if len_s2 == 0:
train_x = x[:start]
reg_shape = x[:start]._reg_shape
reg_rows = reg_shape[0]
top_rows_s1 = x[:start]._top_left_shape[0]
reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0
reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows
top_rows_s2 = x[end:]._top_left_shape[0]
reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0
reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows
reg_s1 = x[:start][reg_rows_start_s1:reg_rows_end_s1]
reg_s2 = x[end:][reg_rows_start_s2:reg_rows_end_s2]
all_blocks = []
if reg_s1.shape[0]:
all_blocks.extend(reg_s1._blocks)
if reg_s2.shape[0]:
all_blocks.extend(reg_s2._blocks)
extras = []
if reg_rows_start_s1 > 0:
extras.append(x[:start][:reg_rows_start_s1])
if reg_rows_start_s2 > 0:
extras.append(x[:start][:reg_rows_start_s2])
if reg_rows_end_s1 < len_s1:
extras.append(x[:start][reg_rows_end_s1:])
if reg_rows_end_s2 < len_s2:
extras.append(x[end:][reg_rows_end_s2:])
groups = []
current_capacity = 0
for extra in extras:
len_extra = extra.shape[0]
if current_capacity == 0:
current_capacity = reg_rows
groups.append([])
if extra.shape[0] <= current_capacity:
current_capacity -= extra.shape[0]
groups[-1].append(extra)
else:
groups[-1].append(extra[:current_capacity])
groups.append([extra[current_capacity:]])
current_capacity = current_capacity - len_extra + reg_rows
for g in groups:
blocks = []
for a in g:
for row_block in a._blocks:
blocks.append(row_block)
group_blocks = [object() for _ in range(x[:start]._n_blocks[1])]
_merge_rows_keeping_cols(blocks, group_blocks)
all_blocks.append(group_blocks)
train_x = Array(blocks=all_blocks, top_left_shape=reg_shape, reg_shape=reg_shape, shape=(len_s1 + len_s2, x[:start].shape[1]), sparse=x[:start]._sparse)
</DeepExtract>
test_x = x[start:end]
train_y = None
test_y = None
if y is not None:
<DeepExtract>
assert y[:start]._shape[1] == y[end:]._shape[1], 'The arrays must have the same\n number of columns.'
assert y[:start]._sparse == y[end:]._sparse, 'A sparse and a dense array cannot\n be merged.'
assert y[:start]._reg_shape == y[end:]._reg_shape, 'The array regular blocks must\n have the same shape.'
len_s1 = y[:start].shape[0]
len_s2 = y[end:].shape[0]
if len_s1 == 0:
train_y = y[end:]
if len_s2 == 0:
train_y = y[:start]
reg_shape = y[:start]._reg_shape
reg_rows = reg_shape[0]
top_rows_s1 = y[:start]._top_left_shape[0]
reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0
reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows
top_rows_s2 = y[end:]._top_left_shape[0]
reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0
reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows
reg_s1 = y[:start][reg_rows_start_s1:reg_rows_end_s1]
reg_s2 = y[end:][reg_rows_start_s2:reg_rows_end_s2]
all_blocks = []
if reg_s1.shape[0]:
all_blocks.extend(reg_s1._blocks)
if reg_s2.shape[0]:
all_blocks.extend(reg_s2._blocks)
extras = []
if reg_rows_start_s1 > 0:
extras.append(y[:start][:reg_rows_start_s1])
if reg_rows_start_s2 > 0:
extras.append(y[:start][:reg_rows_start_s2])
if reg_rows_end_s1 < len_s1:
extras.append(y[:start][reg_rows_end_s1:])
if reg_rows_end_s2 < len_s2:
extras.append(y[end:][reg_rows_end_s2:])
groups = []
current_capacity = 0
for extra in extras:
len_extra = extra.shape[0]
if current_capacity == 0:
current_capacity = reg_rows
groups.append([])
if extra.shape[0] <= current_capacity:
current_capacity -= extra.shape[0]
groups[-1].append(extra)
else:
groups[-1].append(extra[:current_capacity])
groups.append([extra[current_capacity:]])
current_capacity = current_capacity - len_extra + reg_rows
for g in groups:
blocks = []
for a in g:
for row_block in a._blocks:
blocks.append(row_block)
group_blocks = [object() for _ in range(y[:start]._n_blocks[1])]
_merge_rows_keeping_cols(blocks, group_blocks)
all_blocks.append(group_blocks)
train_y = Array(blocks=all_blocks, top_left_shape=reg_shape, reg_shape=reg_shape, shape=(len_s1 + len_s2, y[:start].shape[1]), sparse=y[:start]._sparse)
</DeepExtract>
test_y = y[start:end]
return ((train_x, train_y), (test_x, test_y))
|
def get_kfold_partition(x, y, start, end):
assert x[:start]._shape[1] == x[end:]._shape[1], 'The arrays must have the same\n number of columns.'
assert x[:start]._sparse == x[end:]._sparse, 'A sparse and a dense array cannot\n be merged.'
assert x[:start]._reg_shape == x[end:]._reg_shape, 'The array regular blocks must\n have the same shape.'
len_s1 = x[:start].shape[0]
len_s2 = x[end:].shape[0]
if len_s1 == 0:
train_x = x[end:]
if len_s2 == 0:
train_x = x[:start]
reg_shape = x[:start]._reg_shape
reg_rows = reg_shape[0]
top_rows_s1 = x[:start]._top_left_shape[0]
reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0
reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows
top_rows_s2 = x[end:]._top_left_shape[0]
reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0
reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows
reg_s1 = x[:start][reg_rows_start_s1:reg_rows_end_s1]
reg_s2 = x[end:][reg_rows_start_s2:reg_rows_end_s2]
all_blocks = []
if reg_s1.shape[0]:
all_blocks.extend(reg_s1._blocks)
if reg_s2.shape[0]:
all_blocks.extend(reg_s2._blocks)
extras = []
if reg_rows_start_s1 > 0:
extras.append(x[:start][:reg_rows_start_s1])
if reg_rows_start_s2 > 0:
extras.append(x[:start][:reg_rows_start_s2])
if reg_rows_end_s1 < len_s1:
extras.append(x[:start][reg_rows_end_s1:])
if reg_rows_end_s2 < len_s2:
extras.append(x[end:][reg_rows_end_s2:])
groups = []
current_capacity = 0
for extra in extras:
len_extra = extra.shape[0]
if current_capacity == 0:
current_capacity = reg_rows
groups.append([])
if extra.shape[0] <= current_capacity:
current_capacity -= extra.shape[0]
groups[-1].append(extra)
else:
groups[-1].append(extra[:current_capacity])
groups.append([extra[current_capacity:]])
current_capacity = current_capacity - len_extra + reg_rows
for g in groups:
blocks = []
for a in g:
for row_block in a._blocks:
blocks.append(row_block)
group_blocks = [object() for _ in range(x[:start]._n_blocks[1])]
_merge_rows_keeping_cols(blocks, group_blocks)
all_blocks.append(group_blocks)
train_x = Array(blocks=all_blocks, top_left_shape=reg_shape, reg_shape=reg_shape, shape=(len_s1 + len_s2, x[:start].shape[1]), sparse=x[:start]._sparse)
test_x = x[start:end]
train_y = None
test_y = None
if y is not None:
assert y[:start]._shape[1] == y[end:]._shape[1], 'The arrays must have the same\n number of columns.'
assert y[:start]._sparse == y[end:]._sparse, 'A sparse and a dense array cannot\n be merged.'
assert y[:start]._reg_shape == y[end:]._reg_shape, 'The array regular blocks must\n have the same shape.'
len_s1 = y[:start].shape[0]
len_s2 = y[end:].shape[0]
if len_s1 == 0:
train_y = y[end:]
if len_s2 == 0:
train_y = y[:start]
reg_shape = y[:start]._reg_shape
reg_rows = reg_shape[0]
top_rows_s1 = y[:start]._top_left_shape[0]
reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0
reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows
top_rows_s2 = y[end:]._top_left_shape[0]
reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0
reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows
reg_s1 = y[:start][reg_rows_start_s1:reg_rows_end_s1]
reg_s2 = y[end:][reg_rows_start_s2:reg_rows_end_s2]
all_blocks = []
if reg_s1.shape[0]:
all_blocks.extend(reg_s1._blocks)
if reg_s2.shape[0]:
all_blocks.extend(reg_s2._blocks)
extras = []
if reg_rows_start_s1 > 0:
extras.append(y[:start][:reg_rows_start_s1])
if reg_rows_start_s2 > 0:
extras.append(y[:start][:reg_rows_start_s2])
if reg_rows_end_s1 < len_s1:
extras.append(y[:start][reg_rows_end_s1:])
if reg_rows_end_s2 < len_s2:
extras.append(y[end:][reg_rows_end_s2:])
groups = []
current_capacity = 0
for extra in extras:
len_extra = extra.shape[0]
if current_capacity == 0:
current_capacity = reg_rows
groups.append([])
if extra.shape[0] <= current_capacity:
current_capacity -= extra.shape[0]
groups[-1].append(extra)
else:
groups[-1].append(extra[:current_capacity])
groups.append([extra[current_capacity:]])
current_capacity = current_capacity - len_extra + reg_rows
for g in groups:
blocks = []
for a in g:
for row_block in a._blocks:
blocks.append(row_block)
group_blocks = [object() for _ in range(y[:start]._n_blocks[1])]
_merge_rows_keeping_cols(blocks, group_blocks)
all_blocks.append(group_blocks)
train_y = Array(blocks=all_blocks, top_left_shape=reg_shape, reg_shape=reg_shape, shape=(len_s1 + len_s2, y[:start].shape[1]), sparse=y[:start]._sparse)
test_y = y[start:end]
return ((train_x, train_y), (test_x, test_y))
|
dislib
|
positive
|
def test_register_removed_exam(self):
"""
If an active exam is not included in the registration payload
that exam will be disabled
"""
exam_data = [{'course_id': self.course_id, 'content_id': '123aaaa', 'exam_name': 'midterm1', 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'external_id': '123', 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}]
<DeepExtract>
response = self.client.patch(reverse('edx_proctoring:proctored_exam.register_exams_by_course_id', kwargs={'course_id': self.course_id}), exam_data, content_type='application/json')
</DeepExtract>
<DeepExtract>
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
for result in response.data:
self.assertGreater(result.get('exam_id'), 0)
</DeepExtract>
exams = get_all_exams_for_course(course_id=self.course_id, active_only=True)
expected_content_ids = ['123aaaa']
actual_content_ids = [exam.get('content_id') for exam in exams]
self.assertEqual(expected_content_ids, actual_content_ids)
self.exam.refresh_from_db()
self.assertFalse(self.exam.is_active)
self.assertFalse(self.exam.is_proctored)
|
def test_register_removed_exam(self):
"""
If an active exam is not included in the registration payload
that exam will be disabled
"""
exam_data = [{'course_id': self.course_id, 'content_id': '123aaaa', 'exam_name': 'midterm1', 'due_date': '2026-01-01T00:00:00Z', 'time_limit_mins': 90, 'external_id': '123', 'is_proctored': True, 'is_practice_exam': False, 'is_active': True, 'hide_after_due': False, 'backend': 'null'}]
response = self.client.patch(reverse('edx_proctoring:proctored_exam.register_exams_by_course_id', kwargs={'course_id': self.course_id}), exam_data, content_type='application/json')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
for result in response.data:
self.assertGreater(result.get('exam_id'), 0)
exams = get_all_exams_for_course(course_id=self.course_id, active_only=True)
expected_content_ids = ['123aaaa']
actual_content_ids = [exam.get('content_id') for exam in exams]
self.assertEqual(expected_content_ids, actual_content_ids)
self.exam.refresh_from_db()
self.assertFalse(self.exam.is_active)
self.assertFalse(self.exam.is_proctored)
|
edx-proctoring
|
positive
|
def p_func_definition(p):
"""func_definition : assignment_expression function_try_block
| assignment_expression function_body
| decl_specifier_prefix func_definition
"""
global _parse_info
if p[2] is not None and p[2][0] == '{':
<DeepExtract>
try:
strtypes = str
except:
strtypes = (str, bytes)
result = []
for el in p[1]:
if hasattr(el, '__iter__') and (not isinstance(el, strtypes)):
result.extend(flatten(el))
else:
result.append(el)
decl = result
</DeepExtract>
if decl[-1] == ')':
decl = decl[-3]
else:
decl = decl[-1]
p[0] = decl
if decl != 'operator':
_parse_info.add_function(decl)
else:
p[0] = p[2]
|
def p_func_definition(p):
"""func_definition : assignment_expression function_try_block
| assignment_expression function_body
| decl_specifier_prefix func_definition
"""
global _parse_info
if p[2] is not None and p[2][0] == '{':
try:
strtypes = str
except:
strtypes = (str, bytes)
result = []
for el in p[1]:
if hasattr(el, '__iter__') and (not isinstance(el, strtypes)):
result.extend(flatten(el))
else:
result.append(el)
decl = result
if decl[-1] == ')':
decl = decl[-3]
else:
decl = decl[-1]
p[0] = decl
if decl != 'operator':
_parse_info.add_function(decl)
else:
p[0] = p[2]
|
cxxtest
|
positive
|
@utils._requires('h5py')
def _hdf5_dump(fname, data, compression):
"""Adds dictionary entries recursively to hdf5 file fname.
Parameters
----------
fname : str
Absolute path/name of a HDF5-file, ending in .h5.
(In recursion it is an HDF5-file handle).
data : dict
Dictionary containing the data.
compression : {str, int}
Passed through to h5py.
"""
if isinstance(fname, str):
with h5py.File(fname, 'w') as h5file:
<DeepExtract>
if isinstance(h5file, str):
with h5py.File(h5file, 'w') as h5file:
_hdf5_dump(h5file, data, compression)
else:
for (key, value) in data.items():
if isinstance(value, dict):
_hdf5_dump(h5file.create_group(key, track_order=True), value, compression)
elif np.ndim(value) > 0:
h5file.create_dataset(key, data=value, compression=compression)
else:
h5file.create_dataset(key, data=value)
</DeepExtract>
else:
for (key, value) in data.items():
if isinstance(value, dict):
<DeepExtract>
if isinstance(fname.create_group(key, track_order=True), str):
with h5py.File(fname.create_group(key, track_order=True), 'w') as h5file:
_hdf5_dump(h5file, value, compression)
else:
for (key, value) in value.items():
if isinstance(value, dict):
_hdf5_dump(fname.create_group(key, track_order=True).create_group(key, track_order=True), value, compression)
elif np.ndim(value) > 0:
fname.create_group(key, track_order=True).create_dataset(key, data=value, compression=compression)
else:
fname.create_group(key, track_order=True).create_dataset(key, data=value)
</DeepExtract>
elif np.ndim(value) > 0:
fname.create_dataset(key, data=value, compression=compression)
else:
fname.create_dataset(key, data=value)
|
@utils._requires('h5py')
def _hdf5_dump(fname, data, compression):
"""Adds dictionary entries recursively to hdf5 file fname.
Parameters
----------
fname : str
Absolute path/name of a HDF5-file, ending in .h5.
(In recursion it is an HDF5-file handle).
data : dict
Dictionary containing the data.
compression : {str, int}
Passed through to h5py.
"""
if isinstance(fname, str):
with h5py.File(fname, 'w') as h5file:
if isinstance(h5file, str):
with h5py.File(h5file, 'w') as h5file:
_hdf5_dump(h5file, data, compression)
else:
for (key, value) in data.items():
if isinstance(value, dict):
_hdf5_dump(h5file.create_group(key, track_order=True), value, compression)
elif np.ndim(value) > 0:
h5file.create_dataset(key, data=value, compression=compression)
else:
h5file.create_dataset(key, data=value)
else:
for (key, value) in data.items():
if isinstance(value, dict):
if isinstance(fname.create_group(key, track_order=True), str):
with h5py.File(fname.create_group(key, track_order=True), 'w') as h5file:
_hdf5_dump(h5file, value, compression)
else:
for (key, value) in value.items():
if isinstance(value, dict):
_hdf5_dump(fname.create_group(key, track_order=True).create_group(key, track_order=True), value, compression)
elif np.ndim(value) > 0:
fname.create_group(key, track_order=True).create_dataset(key, data=value, compression=compression)
else:
fname.create_group(key, track_order=True).create_dataset(key, data=value)
elif np.ndim(value) > 0:
fname.create_dataset(key, data=value, compression=compression)
else:
fname.create_dataset(key, data=value)
|
emg3d
|
positive
|
def activate(self, ctx):
<DeepExtract>
global g_bindiff
if g_bindiff is not None:
filename = ask_file(1, '*.diaphora', 'Select the file to store diffing results')
if filename is not None:
g_bindiff.save_results(filename)
</DeepExtract>
return 1
|
def activate(self, ctx):
global g_bindiff
if g_bindiff is not None:
filename = ask_file(1, '*.diaphora', 'Select the file to store diffing results')
if filename is not None:
g_bindiff.save_results(filename)
return 1
|
diaphora
|
positive
|
def evaluate_function(self, aug_function: Callable[..., Image.Image], **kwargs):
<DeepExtract>
ref_img_name = f'test_{aug_function.__name__}.png'
ref_local_path = pathmgr.get_local_path(os.path.join(self.ref_img_dir, ref_img_name))
ref = Image.open(ref_local_path)
</DeepExtract>
with tempfile.NamedTemporaryFile(suffix='.png') as tmpfile:
aug_function(self.local_img_path, output_path=tmpfile.name, **kwargs)
file_dst = Image.open(tmpfile.name)
pil_dst = aug_function(self.img, **kwargs)
self.assertTrue(are_equal_images(pil_dst, ref), 'Expected and outputted images do not match')
self.assertTrue(are_equal_images(file_dst, ref), 'Expected and outputted images do not match')
|
def evaluate_function(self, aug_function: Callable[..., Image.Image], **kwargs):
ref_img_name = f'test_{aug_function.__name__}.png'
ref_local_path = pathmgr.get_local_path(os.path.join(self.ref_img_dir, ref_img_name))
ref = Image.open(ref_local_path)
with tempfile.NamedTemporaryFile(suffix='.png') as tmpfile:
aug_function(self.local_img_path, output_path=tmpfile.name, **kwargs)
file_dst = Image.open(tmpfile.name)
pil_dst = aug_function(self.img, **kwargs)
self.assertTrue(are_equal_images(pil_dst, ref), 'Expected and outputted images do not match')
self.assertTrue(are_equal_images(file_dst, ref), 'Expected and outputted images do not match')
|
AugLy
|
positive
|
def on_save(self, doc_class, collection, doc_id, data):
<DeepExtract>
if not self.pending_indexes:
return
if not db_table_exists(RegisteredIndex._meta.db_table):
return
router = get_index_router()
while self.pending_indexes:
queryset = self.pending_indexes.pop()
document = queryset.document
collection = queryset.document._meta.collection
key = queryset.global_hash()
if collection in router.registered_querysets and key in router.registered_querysets[collection]:
self.index_tasks.register_index(queryset)
</DeepExtract>
self.index_tasks.on_save(collection, doc_id, data)
|
def on_save(self, doc_class, collection, doc_id, data):
if not self.pending_indexes:
return
if not db_table_exists(RegisteredIndex._meta.db_table):
return
router = get_index_router()
while self.pending_indexes:
queryset = self.pending_indexes.pop()
document = queryset.document
collection = queryset.document._meta.collection
key = queryset.global_hash()
if collection in router.registered_querysets and key in router.registered_querysets[collection]:
self.index_tasks.register_index(queryset)
self.index_tasks.on_save(collection, doc_id, data)
|
django-dockit
|
positive
|
def check_file(filename):
if filename == '-':
filename = '<STDIN>'
self.valid_parquet_msg = '%s => Parquet OK' % filename
self.invalid_parquet_msg = '%s => Parquet INVALID' % filename
if filename == '<STDIN>':
try:
tmp = tempfile.NamedTemporaryFile()
log.debug('created tmp file from stdin: %s', tmp.name)
tmp.write(sys.stdin.read())
tmp.seek(0)
<DeepExtract>
stderr = subprocess.PIPE
if self.verbose > 2:
stderr = None
if not which('parquet-cat'):
die('parquet-cat not found in $PATH')
if subprocess.call(['parquet-cat', tmp.name], stdout=subprocess.PIPE, stderr=stderr, shell=False) == 0:
print(self.valid_parquet_msg)
else:
die(self.invalid_parquet_msg)
</DeepExtract>
tmp.close()
except IOError as _:
die('ERROR: %s' % _)
else:
if self.is_excluded(filename):
return
try:
<DeepExtract>
stderr = subprocess.PIPE
if self.verbose > 2:
stderr = None
if not which('parquet-cat'):
die('parquet-cat not found in $PATH')
if subprocess.call(['parquet-cat', filename], stdout=subprocess.PIPE, stderr=stderr, shell=False) == 0:
print(self.valid_parquet_msg)
else:
die(self.invalid_parquet_msg)
</DeepExtract>
except IOError as _:
die('ERROR: %s' % _)
|
def check_file(filename):
if filename == '-':
filename = '<STDIN>'
self.valid_parquet_msg = '%s => Parquet OK' % filename
self.invalid_parquet_msg = '%s => Parquet INVALID' % filename
if filename == '<STDIN>':
try:
tmp = tempfile.NamedTemporaryFile()
log.debug('created tmp file from stdin: %s', tmp.name)
tmp.write(sys.stdin.read())
tmp.seek(0)
stderr = subprocess.PIPE
if self.verbose > 2:
stderr = None
if not which('parquet-cat'):
die('parquet-cat not found in $PATH')
if subprocess.call(['parquet-cat', tmp.name], stdout=subprocess.PIPE, stderr=stderr, shell=False) == 0:
print(self.valid_parquet_msg)
else:
die(self.invalid_parquet_msg)
tmp.close()
except IOError as _:
die('ERROR: %s' % _)
else:
if self.is_excluded(filename):
return
try:
stderr = subprocess.PIPE
if self.verbose > 2:
stderr = None
if not which('parquet-cat'):
die('parquet-cat not found in $PATH')
if subprocess.call(['parquet-cat', filename], stdout=subprocess.PIPE, stderr=stderr, shell=False) == 0:
print(self.valid_parquet_msg)
else:
die(self.invalid_parquet_msg)
except IOError as _:
die('ERROR: %s' % _)
|
DevOps-Python-tools
|
positive
|
def quick_sort(arr, first, last):
""" Quicksort
Complexity: best O(n) avg O(n log(n)), worst O(N^2)
"""
if first < last:
<DeepExtract>
wall = first
for pos in range(first, last):
if arr[pos] < arr[last]:
(arr[pos], arr[wall]) = (arr[wall], arr[pos])
wall += 1
(arr[wall], arr[last]) = (arr[last], arr[wall])
print(wall)
pos = wall
</DeepExtract>
print(arr[first:pos - 1], arr[pos + 1:last])
<DeepExtract>
if first < pos - 1:
pos = partition(arr, first, pos - 1)
print(arr[first:pos - 1], arr[pos + 1:pos - 1])
quick_sort(arr, first, pos - 1)
quick_sort(arr, pos + 1, pos - 1)
</DeepExtract>
<DeepExtract>
if pos + 1 < last:
pos = partition(arr, pos + 1, last)
print(arr[pos + 1:pos - 1], arr[pos + 1:last])
quick_sort(arr, pos + 1, pos - 1)
quick_sort(arr, pos + 1, last)
</DeepExtract>
|
def quick_sort(arr, first, last):
""" Quicksort
Complexity: best O(n) avg O(n log(n)), worst O(N^2)
"""
if first < last:
wall = first
for pos in range(first, last):
if arr[pos] < arr[last]:
(arr[pos], arr[wall]) = (arr[wall], arr[pos])
wall += 1
(arr[wall], arr[last]) = (arr[last], arr[wall])
print(wall)
pos = wall
print(arr[first:pos - 1], arr[pos + 1:last])
if first < pos - 1:
pos = partition(arr, first, pos - 1)
print(arr[first:pos - 1], arr[pos + 1:pos - 1])
quick_sort(arr, first, pos - 1)
quick_sort(arr, pos + 1, pos - 1)
if pos + 1 < last:
pos = partition(arr, pos + 1, last)
print(arr[pos + 1:pos - 1], arr[pos + 1:last])
quick_sort(arr, pos + 1, pos - 1)
quick_sort(arr, pos + 1, last)
</DeepExtract>
|
algorithms
|
positive
|
def sub_tick_init(self, dt):
self.cell_dlens_dev[0:self.n_cells].set(dt * self.cell_growth_rates[0:self.n_cells])
self.cell_centers[0:self.n_cells] = self.cell_centers_dev[0:self.n_cells].get()
<DeepExtract>
coords = self.cell_centers.view(numpy.float32).reshape((self.max_cells, 4))
x_coords = coords[:, 0]
min_x_coord = x_coords.min()
max_x_coord = x_coords.max()
self.grid_x_min = int(math.floor(min_x_coord / self.grid_spacing))
self.grid_x_max = int(math.ceil(max_x_coord / self.grid_spacing))
if self.grid_x_min == self.grid_x_max:
self.grid_x_max += 1
y_coords = coords[:, 1]
min_y_coord = y_coords.min()
max_y_coord = y_coords.max()
self.grid_y_min = int(math.floor(min_y_coord / self.grid_spacing))
self.grid_y_max = int(math.ceil(max_y_coord / self.grid_spacing))
if self.grid_y_min == self.grid_y_max:
self.grid_y_max += 1
self.n_sqs = (self.grid_x_max - self.grid_x_min) * (self.grid_y_max - self.grid_y_min)
</DeepExtract>
<DeepExtract>
self.program.bin_cells(self.queue, (self.n_cells,), None, numpy.int32(self.grid_x_min), numpy.int32(self.grid_x_max), numpy.int32(self.grid_y_min), numpy.int32(self.grid_y_max), numpy.float32(self.grid_spacing), self.cell_centers_dev.data, self.cell_sqs_dev.data).wait()
</DeepExtract>
self.cell_sqs = self.cell_sqs_dev[0:self.n_cells].get()
<DeepExtract>
self.sorted_ids.put(numpy.arange(self.n_cells), numpy.argsort(self.cell_sqs[:self.n_cells]))
self.sorted_ids_dev[0:self.n_cells].set(self.sorted_ids[0:self.n_cells])
sorted_sqs = numpy.sort(self.cell_sqs[:self.n_cells])
self.sq_inds.put(numpy.arange(self.n_sqs), numpy.searchsorted(sorted_sqs, numpy.arange(self.n_sqs), side='left'))
self.sq_inds_dev.set(self.sq_inds)
</DeepExtract>
self.sorted_ids_dev.set(self.sorted_ids)
self.sq_inds_dev.set(self.sq_inds)
self.n_cts = 0
self.vcleari(self.cell_n_cts_dev)
self.sub_tick_i = 0
self.sub_tick_initialised = True
|
def sub_tick_init(self, dt):
self.cell_dlens_dev[0:self.n_cells].set(dt * self.cell_growth_rates[0:self.n_cells])
self.cell_centers[0:self.n_cells] = self.cell_centers_dev[0:self.n_cells].get()
coords = self.cell_centers.view(numpy.float32).reshape((self.max_cells, 4))
x_coords = coords[:, 0]
min_x_coord = x_coords.min()
max_x_coord = x_coords.max()
self.grid_x_min = int(math.floor(min_x_coord / self.grid_spacing))
self.grid_x_max = int(math.ceil(max_x_coord / self.grid_spacing))
if self.grid_x_min == self.grid_x_max:
self.grid_x_max += 1
y_coords = coords[:, 1]
min_y_coord = y_coords.min()
max_y_coord = y_coords.max()
self.grid_y_min = int(math.floor(min_y_coord / self.grid_spacing))
self.grid_y_max = int(math.ceil(max_y_coord / self.grid_spacing))
if self.grid_y_min == self.grid_y_max:
self.grid_y_max += 1
self.n_sqs = (self.grid_x_max - self.grid_x_min) * (self.grid_y_max - self.grid_y_min)
self.program.bin_cells(self.queue, (self.n_cells,), None, numpy.int32(self.grid_x_min), numpy.int32(self.grid_x_max), numpy.int32(self.grid_y_min), numpy.int32(self.grid_y_max), numpy.float32(self.grid_spacing), self.cell_centers_dev.data, self.cell_sqs_dev.data).wait()
self.cell_sqs = self.cell_sqs_dev[0:self.n_cells].get()
self.sorted_ids.put(numpy.arange(self.n_cells), numpy.argsort(self.cell_sqs[:self.n_cells]))
self.sorted_ids_dev[0:self.n_cells].set(self.sorted_ids[0:self.n_cells])
sorted_sqs = numpy.sort(self.cell_sqs[:self.n_cells])
self.sq_inds.put(numpy.arange(self.n_sqs), numpy.searchsorted(sorted_sqs, numpy.arange(self.n_sqs), side='left'))
self.sq_inds_dev.set(self.sq_inds)
self.sorted_ids_dev.set(self.sorted_ids)
self.sq_inds_dev.set(self.sq_inds)
self.n_cts = 0
self.vcleari(self.cell_n_cts_dev)
self.sub_tick_i = 0
self.sub_tick_initialised = True
|
CellModeller
|
positive
|
def diff_transform(X, dtype='float32'):
"""
A helper function that implements discrete differentiation for stacked state observations. See
:func:`diff_transform_matrix` for a detailed description.
.. code:: python
M = diff_transform_matrix(num_frames=X.shape[-1])
X_transformed = np.dot(X, M)
Parameters
----------
X : ndarray
An array whose shape is such that the last axis is the frame-stack axis, i.e.
:code:`X.shape[-1] == num_frames`.
Returns
-------
X_transformed : ndarray
The shape is the same as the input shape, but the last axis are mixed to represent position,
velocity, acceleration, etc.
"""
<DeepExtract>
assert isinstance(X.shape[-1], int) and X.shape[-1] >= 1
s = jnp.diag(jnp.power(-1, jnp.arange(X.shape[-1])))
m = s.dot(pascal(X.shape[-1], kind='upper'))[::-1, ::-1]
M = m.astype(dtype)
</DeepExtract>
return jnp.dot(X, M)
|
def diff_transform(X, dtype='float32'):
"""
A helper function that implements discrete differentiation for stacked state observations. See
:func:`diff_transform_matrix` for a detailed description.
.. code:: python
M = diff_transform_matrix(num_frames=X.shape[-1])
X_transformed = np.dot(X, M)
Parameters
----------
X : ndarray
An array whose shape is such that the last axis is the frame-stack axis, i.e.
:code:`X.shape[-1] == num_frames`.
Returns
-------
X_transformed : ndarray
The shape is the same as the input shape, but the last axis are mixed to represent position,
velocity, acceleration, etc.
"""
assert isinstance(X.shape[-1], int) and X.shape[-1] >= 1
s = jnp.diag(jnp.power(-1, jnp.arange(X.shape[-1])))
m = s.dot(pascal(X.shape[-1], kind='upper'))[::-1, ::-1]
M = m.astype(dtype)
return jnp.dot(X, M)
|
coax
|
positive
|
def killScript(reason=None):
if reason is None:
print(readMe)
sys.exit()
else:
<DeepExtract>
logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
sys.exit()
|
def killScript(reason=None):
if reason is None:
print(readMe)
sys.exit()
else:
logString = '%s -- %s' % (datetime.datetime.now(), 'ERROR: %s' % reason)
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
sys.exit()
|
automation-scripts
|
positive
|
def find_nonzero_channels_list(param, param_name):
<DeepExtract>
(num_filters, num_channels) = (param.size(0), param.size(1))
view_2d = param.view(-1, param.size(2) * param.size(3))
kernel_sums = view_2d.abs().sum(dim=1)
k_sums_mat = kernel_sums.view(num_filters, num_channels).t()
nonzero_channels = torch.nonzero(k_sums_mat.abs().sum(dim=1))
if num_channels > nonzero_channels.nelement():
msglogger.info('In tensor %s found %d/%d zero channels', param_name, num_channels - nonzero_channels.nelement(), num_channels)
nnz_channels = nonzero_channels
</DeepExtract>
nnz_channels = nnz_channels.view(nnz_channels.numel())
return nnz_channels.cpu().numpy().tolist()
|
def find_nonzero_channels_list(param, param_name):
(num_filters, num_channels) = (param.size(0), param.size(1))
view_2d = param.view(-1, param.size(2) * param.size(3))
kernel_sums = view_2d.abs().sum(dim=1)
k_sums_mat = kernel_sums.view(num_filters, num_channels).t()
nonzero_channels = torch.nonzero(k_sums_mat.abs().sum(dim=1))
if num_channels > nonzero_channels.nelement():
msglogger.info('In tensor %s found %d/%d zero channels', param_name, num_channels - nonzero_channels.nelement(), num_channels)
nnz_channels = nonzero_channels
nnz_channels = nnz_channels.view(nnz_channels.numel())
return nnz_channels.cpu().numpy().tolist()
|
EagleEye
|
positive
|
@pytest.mark.django_db
def test_validate_quote_bad_price():
<DeepExtract>
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
</DeepExtract>
with pytest.raises(ValueError, match='the price saved to Quote.price did not have the correct number of significant decimals'):
validate_quote_provided(Quote(type=Quote.TYPE.firm, buy_asset='stellar:test:test', sell_asset='test:test', buy_amount=Decimal(100), price=Decimal('2.123'), sell_delivery_method=data['offchain_assets'][0].delivery_methods.first(), expires_at=datetime.now(timezone.utc) + timedelta(hours=1)), '', 2)
|
@pytest.mark.django_db
def test_validate_quote_bad_price():
usd_stellar = Asset.objects.create(code='usd', issuer=Keypair.random().public_key, sep38_enabled=True)
brl_offchain = OffChainAsset.objects.create(scheme='iso4217', identifier='BRL', country_codes='BRA')
delivery_methods = [DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.buy, name='cash_pickup', description='cash pick-up'), DeliveryMethod.objects.create(type=DeliveryMethod.TYPE.sell, name='cash_dropoff', description='cash drop-off')]
brl_offchain.delivery_methods.add(*delivery_methods)
pair = ExchangePair.objects.create(buy_asset=brl_offchain.asset_identification_format, sell_asset=usd_stellar.asset_identification_format)
data = {'stellar_assets': [usd_stellar], 'offchain_assets': [brl_offchain], 'exchange_pairs': [pair], 'delivery_methods': delivery_methods}
with pytest.raises(ValueError, match='the price saved to Quote.price did not have the correct number of significant decimals'):
validate_quote_provided(Quote(type=Quote.TYPE.firm, buy_asset='stellar:test:test', sell_asset='test:test', buy_amount=Decimal(100), price=Decimal('2.123'), sell_delivery_method=data['offchain_assets'][0].delivery_methods.first(), expires_at=datetime.now(timezone.utc) + timedelta(hours=1)), '', 2)
|
django-polaris
|
positive
|
def stack_fn(x):
<DeepExtract>
x = block2(x, 64, conv_shortcut=True, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3):
x = block2(x, 64, name='conv2' + '_block' + str(i))
x = block2(x, 64, stride=stride1, name='conv2' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block2(x, 128, conv_shortcut=True, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 4):
x = block2(x, 128, name='conv3' + '_block' + str(i))
x = block2(x, 128, stride=stride1, name='conv3' + '_block' + str(4), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block2(x, 256, conv_shortcut=True, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 6):
x = block2(x, 256, name='conv4' + '_block' + str(i))
x = block2(x, 256, stride=stride1, name='conv4' + '_block' + str(6), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
<DeepExtract>
x = block2(x, 512, conv_shortcut=True, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3):
x = block2(x, 512, name='conv5' + '_block' + str(i))
x = block2(x, 512, stride=1, name='conv5' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay)
x = x
</DeepExtract>
return x
|
def stack_fn(x):
x = block2(x, 64, conv_shortcut=True, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3):
x = block2(x, 64, name='conv2' + '_block' + str(i))
x = block2(x, 64, stride=stride1, name='conv2' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay)
x = x
x = block2(x, 128, conv_shortcut=True, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 4):
x = block2(x, 128, name='conv3' + '_block' + str(i))
x = block2(x, 128, stride=stride1, name='conv3' + '_block' + str(4), trainable=trainable, weight_decay=weight_decay)
x = x
x = block2(x, 256, conv_shortcut=True, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 6):
x = block2(x, 256, name='conv4' + '_block' + str(i))
x = block2(x, 256, stride=stride1, name='conv4' + '_block' + str(6), trainable=trainable, weight_decay=weight_decay)
x = x
x = block2(x, 512, conv_shortcut=True, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay)
for i in range(2, 3):
x = block2(x, 512, name='conv5' + '_block' + str(i))
x = block2(x, 512, stride=1, name='conv5' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay)
x = x
return x
|
deep-learning-models
|
positive
|
def test_match_lines_no_empty_ends(self):
<DeepExtract>
kwargs.setdefault('max_width', 50)
f = util.Formatter(**kwargs)
</DeepExtract>
cols = f.columns()
cols.widths = [4, 4]
cols.wrap = [True, False]
self.assertEqual(list(cols.match_lines(['col1', 'word word word'])), [['col1', 'word word word']])
|
def test_match_lines_no_empty_ends(self):
kwargs.setdefault('max_width', 50)
f = util.Formatter(**kwargs)
cols = f.columns()
cols.widths = [4, 4]
cols.wrap = [True, False]
self.assertEqual(list(cols.match_lines(['col1', 'word word word'])), [['col1', 'word word word']])
|
clize
|
positive
|
@pytest.mark.parametrize('clsDof6', [Generic6DofConstraint, Generic6DofSpringConstraint])
def test_Generic6DofConstraint_emulateSlider_pivot_sim(self, clsDof6):
"""
Same as test_Generic6DofConstraint_emulateP2P_pivot_sim except
that the pivot does not coincide with the center of mass and the
constraint is setup such that it mimicks a slider.
"""
pos_a = Vec3(-1, 0, 0)
pos_b = Vec3(1, 0, 0)
<DeepExtract>
t = Transform(Quaternion(0, 0, 0, 1), pos_a)
ms = DefaultMotionState(t)
ci = RigidBodyConstructionInfo(mass, ms, SphereShape(1), Vec3(*inertia))
rb = RigidBody(ci, bodyID)
rb.forceActivationState(4)
rb_a = rb
</DeepExtract>
<DeepExtract>
t = Transform(Quaternion(0, 0, 0, 1), pos_b)
ms = DefaultMotionState(t)
ci = RigidBodyConstructionInfo(mass, ms, BoxShape(Vec3(1, 2, 3)), Vec3(*inertia))
rb = RigidBody(ci, bodyID)
rb.forceActivationState(4)
rb_b = rb
</DeepExtract>
frameInA = Transform(Quaternion(0, 0, 0, 1), pos_b)
frameInB = Transform(Quaternion(0, 0, 0, 1), pos_a)
refIsA = True
dof = clsDof6(rb_a, rb_b, frameInA, frameInB, refIsA)
sliderLimitLo = -1
sliderLimitHi = 1
dof.setLinearLowerLimit(Vec3(sliderLimitLo, 0, 0))
dof.setLinearUpperLimit(Vec3(sliderLimitHi, 0, 0))
bb = BulletBase()
bb.setGravity(Vec3(0, 0, 0))
bb.addRigidBody(rb_a)
bb.addRigidBody(rb_b)
bb.addConstraint(dof)
p_a = rb_a.getCenterOfMassTransform().getOrigin().topy()
p_b = rb_b.getCenterOfMassTransform().getOrigin().topy()
init_pos = (p_a[0], p_b[0])
assert init_pos == (-1, 1)
for ii in range(5):
rb_b.applyCentralForce(Vec3(10, 0, 0))
bb.stepSimulation(10 / 60, 60)
p_a = rb_a.getCenterOfMassTransform().getOrigin().topy()
p_b = rb_b.getCenterOfMassTransform().getOrigin().topy()
if p_b[0] <= init_pos[1] + sliderLimitHi:
assert p_a[0] == init_pos[0]
else:
assert p_a[0] > init_pos[0]
assert p_b[0] > init_pos[1] + sliderLimitHi
|
@pytest.mark.parametrize('clsDof6', [Generic6DofConstraint, Generic6DofSpringConstraint])
def test_Generic6DofConstraint_emulateSlider_pivot_sim(self, clsDof6):
"""
Same as test_Generic6DofConstraint_emulateP2P_pivot_sim except
that the pivot does not coincide with the center of mass and the
constraint is setup such that it mimicks a slider.
"""
pos_a = Vec3(-1, 0, 0)
pos_b = Vec3(1, 0, 0)
t = Transform(Quaternion(0, 0, 0, 1), pos_a)
ms = DefaultMotionState(t)
ci = RigidBodyConstructionInfo(mass, ms, SphereShape(1), Vec3(*inertia))
rb = RigidBody(ci, bodyID)
rb.forceActivationState(4)
rb_a = rb
t = Transform(Quaternion(0, 0, 0, 1), pos_b)
ms = DefaultMotionState(t)
ci = RigidBodyConstructionInfo(mass, ms, BoxShape(Vec3(1, 2, 3)), Vec3(*inertia))
rb = RigidBody(ci, bodyID)
rb.forceActivationState(4)
rb_b = rb
frameInA = Transform(Quaternion(0, 0, 0, 1), pos_b)
frameInB = Transform(Quaternion(0, 0, 0, 1), pos_a)
refIsA = True
dof = clsDof6(rb_a, rb_b, frameInA, frameInB, refIsA)
sliderLimitLo = -1
sliderLimitHi = 1
dof.setLinearLowerLimit(Vec3(sliderLimitLo, 0, 0))
dof.setLinearUpperLimit(Vec3(sliderLimitHi, 0, 0))
bb = BulletBase()
bb.setGravity(Vec3(0, 0, 0))
bb.addRigidBody(rb_a)
bb.addRigidBody(rb_b)
bb.addConstraint(dof)
p_a = rb_a.getCenterOfMassTransform().getOrigin().topy()
p_b = rb_b.getCenterOfMassTransform().getOrigin().topy()
init_pos = (p_a[0], p_b[0])
assert init_pos == (-1, 1)
for ii in range(5):
rb_b.applyCentralForce(Vec3(10, 0, 0))
bb.stepSimulation(10 / 60, 60)
p_a = rb_a.getCenterOfMassTransform().getOrigin().topy()
p_b = rb_b.getCenterOfMassTransform().getOrigin().topy()
if p_b[0] <= init_pos[1] + sliderLimitHi:
assert p_a[0] == init_pos[0]
else:
assert p_a[0] > init_pos[0]
assert p_b[0] > init_pos[1] + sliderLimitHi
|
azrael
|
positive
|
def __init__(self, data, filename, view, parent):
super(DisassemblerView, self).__init__(parent)
self.status = ''
self.view = view
self.data = data
for type in ExeFormats:
exe = type(data)
if exe.valid:
self.data = exe
self.view.exe = exe
break
self.analysis = Analysis(self.data)
self.analysis_thread = threading.Thread(None, self.analysis_thread_proc)
self.analysis_thread.daemon = True
self.analysis_thread.start()
if hasattr(self.data, 'entry'):
self.function = self.data.entry()
else:
self.function = None
self.update_id = None
self.ready = False
self.desired_pos = None
self.highlight_token = None
self.cur_instr = None
self.scroll_mode = False
self.blocks = {}
self.show_il = False
self.simulation = None
self.updateTimer = QTimer()
self.updateTimer.setInterval(100)
self.updateTimer.setSingleShot(False)
self.updateTimer.timeout.connect(self.updateTimerEvent)
self.updateTimer.start()
<DeepExtract>
self.font = getMonospaceFont()
self.baseline = int(QFontMetricsF(self.font).ascent())
self.charWidth = QFontMetricsF(self.font).width('X')
self.charHeight = int(QFontMetricsF(self.font).height()) + getExtraFontSpacing()
self.charOffset = getFontVerticalOffset()
</DeepExtract>
self.width = 0
self.height = 0
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.horizontalScrollBar().setSingleStep(self.charWidth)
self.verticalScrollBar().setSingleStep(self.charHeight)
areaSize = self.viewport().size()
<DeepExtract>
self.renderWidth = self.width
self.renderHeight = self.height
self.renderXOfs = 0
self.renderYOfs = 0
if self.renderWidth < areaSize.width():
self.renderXOfs = int((areaSize.width() - self.renderWidth) / 2)
self.renderWidth = areaSize.width()
if self.renderHeight < areaSize.height():
self.renderYOfs = int((areaSize.height() - self.renderHeight) / 2)
self.renderHeight = areaSize.height()
self.horizontalScrollBar().setPageStep(areaSize.width())
self.horizontalScrollBar().setRange(0, self.renderWidth - areaSize.width())
self.verticalScrollBar().setPageStep(areaSize.height())
self.verticalScrollBar().setRange(0, self.renderHeight - areaSize.height())
</DeepExtract>
self.view.register_navigate('disassembler', self, self.navigate)
self.view.register_navigate('make_proc', self, self.make_proc)
self.search_regex = None
self.last_search_type = FindDialog.SEARCH_HEX
|
def __init__(self, data, filename, view, parent):
super(DisassemblerView, self).__init__(parent)
self.status = ''
self.view = view
self.data = data
for type in ExeFormats:
exe = type(data)
if exe.valid:
self.data = exe
self.view.exe = exe
break
self.analysis = Analysis(self.data)
self.analysis_thread = threading.Thread(None, self.analysis_thread_proc)
self.analysis_thread.daemon = True
self.analysis_thread.start()
if hasattr(self.data, 'entry'):
self.function = self.data.entry()
else:
self.function = None
self.update_id = None
self.ready = False
self.desired_pos = None
self.highlight_token = None
self.cur_instr = None
self.scroll_mode = False
self.blocks = {}
self.show_il = False
self.simulation = None
self.updateTimer = QTimer()
self.updateTimer.setInterval(100)
self.updateTimer.setSingleShot(False)
self.updateTimer.timeout.connect(self.updateTimerEvent)
self.updateTimer.start()
self.font = getMonospaceFont()
self.baseline = int(QFontMetricsF(self.font).ascent())
self.charWidth = QFontMetricsF(self.font).width('X')
self.charHeight = int(QFontMetricsF(self.font).height()) + getExtraFontSpacing()
self.charOffset = getFontVerticalOffset()
self.width = 0
self.height = 0
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.horizontalScrollBar().setSingleStep(self.charWidth)
self.verticalScrollBar().setSingleStep(self.charHeight)
areaSize = self.viewport().size()
self.renderWidth = self.width
self.renderHeight = self.height
self.renderXOfs = 0
self.renderYOfs = 0
if self.renderWidth < areaSize.width():
self.renderXOfs = int((areaSize.width() - self.renderWidth) / 2)
self.renderWidth = areaSize.width()
if self.renderHeight < areaSize.height():
self.renderYOfs = int((areaSize.height() - self.renderHeight) / 2)
self.renderHeight = areaSize.height()
self.horizontalScrollBar().setPageStep(areaSize.width())
self.horizontalScrollBar().setRange(0, self.renderWidth - areaSize.width())
self.verticalScrollBar().setPageStep(areaSize.height())
self.verticalScrollBar().setRange(0, self.renderHeight - areaSize.height())
self.view.register_navigate('disassembler', self, self.navigate)
self.view.register_navigate('make_proc', self, self.make_proc)
self.search_regex = None
self.last_search_type = FindDialog.SEARCH_HEX
|
deprecated-binaryninja-python
|
positive
|
def after_train_epoch(self, runner):
"""
Args:
runner (Runner): the controller of the training process
Returns:
"""
self.save_type = 'epoch'
if not self.by_epoch:
return
if self.every_n_epochs(runner, self.interval) or (self.save_last and self.is_last_epoch(runner)):
if self.sync_buffer:
allreduce_params(runner.model.buffers())
if self.save_mode == 'general':
if runner.epoch + 1 > int(runner.max_epochs * self.model_milestone):
runner.logger.info(f'Saving checkpoint at {runner.epoch + 1} epochs')
self._save_checkpoint(runner)
elif self.save_mode == 'lightweight':
<DeepExtract>
if runner.meta is not None:
if self.by_epoch:
cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
else:
cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
runner.meta.setdefault('hook_msgs', dict())
runner.meta['hook_msgs']['last_ckpt'] = os.path.join(self.out_dir, cur_ckpt_filename)
if not self.out_dir:
self.out_dir = runner.work_dir
if self.save_type == 'epoch':
self.init_metric = save_best_checkpoint(runner, self.metric, self.init_metric, self.save_type, self.model_milestone, self.out_dir, self.save_optimizer, self.save_last, self.is_last_epoch, self.compare_func)
if self.save_type == 'iter':
self.init_metric = save_best_checkpoint(runner, self.metric, self.init_metric, self.save_type, self.model_milestone, self.out_dir, self.save_optimizer, self.save_last, self.is_last_iter, self.compare_func)
</DeepExtract>
else:
raise NotImplementedError("Only support the ['general', 'lightweight'] save mode!!")
|
def after_train_epoch(self, runner):
"""
Args:
runner (Runner): the controller of the training process
Returns:
"""
self.save_type = 'epoch'
if not self.by_epoch:
return
if self.every_n_epochs(runner, self.interval) or (self.save_last and self.is_last_epoch(runner)):
if self.sync_buffer:
allreduce_params(runner.model.buffers())
if self.save_mode == 'general':
if runner.epoch + 1 > int(runner.max_epochs * self.model_milestone):
runner.logger.info(f'Saving checkpoint at {runner.epoch + 1} epochs')
self._save_checkpoint(runner)
elif self.save_mode == 'lightweight':
if runner.meta is not None:
if self.by_epoch:
cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
else:
cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
runner.meta.setdefault('hook_msgs', dict())
runner.meta['hook_msgs']['last_ckpt'] = os.path.join(self.out_dir, cur_ckpt_filename)
if not self.out_dir:
self.out_dir = runner.work_dir
if self.save_type == 'epoch':
self.init_metric = save_best_checkpoint(runner, self.metric, self.init_metric, self.save_type, self.model_milestone, self.out_dir, self.save_optimizer, self.save_last, self.is_last_epoch, self.compare_func)
if self.save_type == 'iter':
self.init_metric = save_best_checkpoint(runner, self.metric, self.init_metric, self.save_type, self.model_milestone, self.out_dir, self.save_optimizer, self.save_last, self.is_last_iter, self.compare_func)
else:
raise NotImplementedError("Only support the ['general', 'lightweight'] save mode!!")
|
DAVAR-Lab-OCR
|
positive
|
def compile(self, srcfile, base_dir, output_dir):
srcfile = os.path.realpath(srcfile)
base_dir = os.path.realpath(base_dir)
output_dir = os.path.realpath(output_dir)
<DeepExtract>
srcfile = os.path.realpath(srcfile)
if srcfile in self.found_files:
files = set()
self.found_files.add(srcfile)
fp = open(srcfile, 'rt')
lines = fp.readlines()
fp.close()
imports = []
for line in lines:
if line.find('import') == -1:
continue
line = line.strip().strip(';')
ps = line.split()
if ps[0] != 'import':
continue
for p in ps[1:]:
p = p.strip(',')
imports.append(p)
for p in imports:
self.find_files(p, base_dir)
files = self.found_files
</DeepExtract>
files.remove(srcfile)
if len(files) > 0:
files = list(files)
files.sort()
(shead, stail) = os.path.split(srcfile)
slen = len(shead)
for f in files:
(head, tail) = os.path.split(f)
rel_dir = head[slen:]
<DeepExtract>
(head, tail) = os.path.split(f)
dstfile = os.path.normpath(output_dir + rel_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(f)
dst_mtime = os.path.getmtime(dstfile)
if src_mtime < dst_mtime:
return dstfile
if not os.path.exists(output_dir + rel_dir):
os.makedirs(output_dir + rel_dir)
if not os.path.exists(output_dir + rel_dir + '/__init__.py'):
fp = open(output_dir + rel_dir + '/__init__.py', 'w')
fp.close()
fp = open(f, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
root = r.tree
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
cpy = CpyBuilder(dstfile, base_dir, output_dir + rel_dir)
eval.prog(cpy)
return dstfile
</DeepExtract>
<DeepExtract>
(head, tail) = os.path.split(srcfile)
dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(srcfile)
dst_mtime = os.path.getmtime(dstfile)
if src_mtime < dst_mtime:
dstfile = dstfile
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + '/__init__.py'):
fp = open(output_dir + '/__init__.py', 'w')
fp.close()
fp = open(srcfile, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
root = r.tree
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
cpy = CpyBuilder(dstfile, base_dir, output_dir)
eval.prog(cpy)
dstfile = dstfile
</DeepExtract>
return dstfile
|
def compile(self, srcfile, base_dir, output_dir):
srcfile = os.path.realpath(srcfile)
base_dir = os.path.realpath(base_dir)
output_dir = os.path.realpath(output_dir)
srcfile = os.path.realpath(srcfile)
if srcfile in self.found_files:
files = set()
self.found_files.add(srcfile)
fp = open(srcfile, 'rt')
lines = fp.readlines()
fp.close()
imports = []
for line in lines:
if line.find('import') == -1:
continue
line = line.strip().strip(';')
ps = line.split()
if ps[0] != 'import':
continue
for p in ps[1:]:
p = p.strip(',')
imports.append(p)
for p in imports:
self.find_files(p, base_dir)
files = self.found_files
files.remove(srcfile)
if len(files) > 0:
files = list(files)
files.sort()
(shead, stail) = os.path.split(srcfile)
slen = len(shead)
for f in files:
(head, tail) = os.path.split(f)
rel_dir = head[slen:]
(head, tail) = os.path.split(f)
dstfile = os.path.normpath(output_dir + rel_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(f)
dst_mtime = os.path.getmtime(dstfile)
if src_mtime < dst_mtime:
return dstfile
if not os.path.exists(output_dir + rel_dir):
os.makedirs(output_dir + rel_dir)
if not os.path.exists(output_dir + rel_dir + '/__init__.py'):
fp = open(output_dir + rel_dir + '/__init__.py', 'w')
fp.close()
fp = open(f, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
root = r.tree
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
cpy = CpyBuilder(dstfile, base_dir, output_dir + rel_dir)
eval.prog(cpy)
return dstfile
(head, tail) = os.path.split(srcfile)
dstfile = os.path.normpath(output_dir + '/' + tail.split('.')[0] + '.py')
if os.path.exists(dstfile):
src_mtime = os.path.getmtime(srcfile)
dst_mtime = os.path.getmtime(dstfile)
if src_mtime < dst_mtime:
dstfile = dstfile
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + '/__init__.py'):
fp = open(output_dir + '/__init__.py', 'w')
fp.close()
fp = open(srcfile, 'r')
char_stream = antlr3.ANTLRInputStream(fp)
lexer = ExprLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = ExprParser(tokens)
r = parser.prog()
root = r.tree
nodes = antlr3.tree.CommonTreeNodeStream(root)
nodes.setTokenStream(tokens)
from Eval import Eval
eval = Eval(nodes)
cpy = CpyBuilder(dstfile, base_dir, output_dir)
eval.prog(cpy)
dstfile = dstfile
return dstfile
|
cpy
|
positive
|
def scheme(self, value):
<DeepExtract>
if value not in Scheme:
raise TypeError('expected value to be one of', str(list(Scheme)))
</DeepExtract>
self._specifier['type'] = value
return self
|
def scheme(self, value):
if value not in Scheme:
raise TypeError('expected value to be one of', str(list(Scheme)))
self._specifier['type'] = value
return self
|
dash
|
positive
|
def test_ricker_gpu(self, gpubenchmark, num_samps, a):
output = gpubenchmark(self.gpu_version, num_samps, a)
<DeepExtract>
key = signal.qmf(num_samps)
</DeepExtract>
array_equal(output, key)
|
def test_ricker_gpu(self, gpubenchmark, num_samps, a):
output = gpubenchmark(self.gpu_version, num_samps, a)
key = signal.qmf(num_samps)
array_equal(output, key)
|
cusignal
|
positive
|
def predict_proposals(self, top_feats):
sampled_boxes = []
bundle = {'l': self.locations, 'o': self.logits_pred, 'r': self.reg_pred, 'c': self.ctrness_pred, 's': self.strides}
if len(top_feats) > 0:
bundle['t'] = top_feats
for (i, instance) in enumerate(zip(*bundle.values())):
instance_dict = dict(zip(bundle.keys(), instance))
l = instance_dict['l']
o = instance_dict['o']
r = instance_dict['r'] * instance_dict['s']
c = instance_dict['c']
t = instance_dict['t'] * instance_dict['s'] if 't' in bundle else None
sampled_boxes.append(self.forward_for_single_feature_map(l, o, r, c, self.image_sizes, t))
boxlists = list(zip(*sampled_boxes))
boxlists = [Instances.cat(boxlist) for boxlist in boxlists]
<DeepExtract>
num_images = len(boxlists)
results = []
for i in range(num_images):
result = ml_nms(boxlists[i], self.nms_thresh)
number_of_detections = len(result)
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.scores
(image_thresh, _) = torch.kthvalue(cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
boxlists = results
</DeepExtract>
return boxlists
|
def predict_proposals(self, top_feats):
sampled_boxes = []
bundle = {'l': self.locations, 'o': self.logits_pred, 'r': self.reg_pred, 'c': self.ctrness_pred, 's': self.strides}
if len(top_feats) > 0:
bundle['t'] = top_feats
for (i, instance) in enumerate(zip(*bundle.values())):
instance_dict = dict(zip(bundle.keys(), instance))
l = instance_dict['l']
o = instance_dict['o']
r = instance_dict['r'] * instance_dict['s']
c = instance_dict['c']
t = instance_dict['t'] * instance_dict['s'] if 't' in bundle else None
sampled_boxes.append(self.forward_for_single_feature_map(l, o, r, c, self.image_sizes, t))
boxlists = list(zip(*sampled_boxes))
boxlists = [Instances.cat(boxlist) for boxlist in boxlists]
num_images = len(boxlists)
results = []
for i in range(num_images):
result = ml_nms(boxlists[i], self.nms_thresh)
number_of_detections = len(result)
if number_of_detections > self.fpn_post_nms_top_n > 0:
cls_scores = result.scores
(image_thresh, _) = torch.kthvalue(cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
results.append(result)
boxlists = results
return boxlists
|
dict-guided
|
positive
|
def read_struct(self):
<DeepExtract>
length = struct.calcsize('<L')
(htype,) = struct.unpack('<L', self.f.read(length))
</DeepExtract>
if htype == 0:
count = 0
else:
<DeepExtract>
length = struct.calcsize('<LH')
(_, count) = struct.unpack('<LH', self.f.read(length))
</DeepExtract>
return BinStruct(htype, [self.read_field() for _ in range(count)])
|
def read_struct(self):
length = struct.calcsize('<L')
(htype,) = struct.unpack('<L', self.f.read(length))
if htype == 0:
count = 0
else:
length = struct.calcsize('<LH')
(_, count) = struct.unpack('<LH', self.f.read(length))
return BinStruct(htype, [self.read_field() for _ in range(count)])
|
CDTB
|
positive
|
def clean_modules(self, *modules):
""" [UNIT]... -- remove the state directories
/// it recognizes --what=all or any of configuration, state, cache, logs, runtime
while an empty value (the default) removes cache and runtime directories"""
found_all = True
units = []
for module in modules:
<DeepExtract>
found = []
for unit in self.match_sysd_units(to_list(module), suffix):
if unit not in found:
found.append(unit)
for unit in self.match_sysd_templates(to_list(module), suffix):
if unit not in found:
found.append(unit)
for unit in self.match_sysv_units(to_list(module), suffix):
if unit not in found:
found.append(unit)
matched = found
</DeepExtract>
if not matched:
logg.error('Unit %s not found.', unit_of(module))
self.error |= NOT_FOUND
found_all = False
continue
for unit in matched:
if unit not in units:
units += [unit]
lines = _log_lines
follow = _force
<DeepExtract>
if not what:
what = _what_kind
ok = True
for unit in units:
ok = self.clean_unit(unit, what) and ok
ok = ok
</DeepExtract>
return ok and found_all
|
def clean_modules(self, *modules):
""" [UNIT]... -- remove the state directories
/// it recognizes --what=all or any of configuration, state, cache, logs, runtime
while an empty value (the default) removes cache and runtime directories"""
found_all = True
units = []
for module in modules:
found = []
for unit in self.match_sysd_units(to_list(module), suffix):
if unit not in found:
found.append(unit)
for unit in self.match_sysd_templates(to_list(module), suffix):
if unit not in found:
found.append(unit)
for unit in self.match_sysv_units(to_list(module), suffix):
if unit not in found:
found.append(unit)
matched = found
if not matched:
logg.error('Unit %s not found.', unit_of(module))
self.error |= NOT_FOUND
found_all = False
continue
for unit in matched:
if unit not in units:
units += [unit]
lines = _log_lines
follow = _force
if not what:
what = _what_kind
ok = True
for unit in units:
ok = self.clean_unit(unit, what) and ok
ok = ok
return ok and found_all
|
docker-systemctl-images
|
positive
|
def compute_all(self):
"""
Computes and prints all the attributes of this solar object. Spatial
averages are printed for numpy array type attributes.
"""
print('=' * 50)
print('Interogation of entire matrix of points.')
print('Some values displayed below are spatial averages')
print('=' * 50)
if self.is_numpy:
print('latitude, longitude \t{0}, {1}'.format(self.lat.mean(), self.lon.mean()))
else:
print('latitude, longitude \t{0}, {1}'.format(self.lat, self.lon))
print('datetime \t\t{0} (GMT)'.format(self.rdt))
print('time zone \t\t{0} (GMT offset)'.format(self.tz))
print('')
print('abs julian day \t\t{0}\t (day)'.format(self.ajd))
print('abs julian century \t{0}\t (cen)'.format(self.ajc))
print('suns geomean long \t{0}\t (deg)'.format(self.get_geomean_long()))
print('suns geomean anom \t{0}\t (deg)'.format(self.get_geomean_anom()))
print('earth eccentricity \t{0}'.format(self.get_earth_eccent()))
print('suns eq of center \t{0}'.format(self.get_sun_eq_of_center()))
print('suns true long \t\t{0}\t (deg)'.format(self.get_true_long()))
print('suns true anom \t\t{0}\t (deg)'.format(self.get_true_anom()))
print('suns apparent long \t{0}\t (deg)'.format(self.get_app_long()))
print('earth obliq mean elip \t{0}\t (deg)'.format(self.get_oblique_mean_elip()))
print('earth obliq correction\t{0}\t (deg)'.format(self.get_oblique_corr()))
print('sun right ascension \t{0}\t (deg)'.format(self.get_right_ascension()))
print('solar declination angle {0}\t (deg)'.format(self.get_declination()))
print('equation of time \t{0}\t (min)'.format(self.get_equation_of_time))
if self.is_numpy:
print('hour angle sunrise\t{0}\t (deg)'.format(self.get_hour_angle_sunrise().mean()))
else:
print('hour angle sunrise\t{0}\t (deg)'.format(self.get_hour_angle_sunrise()))
print('')
<DeepExtract>
if not self.solar_noon is None:
return self.solar_noon
if self.equation_of_time is None:
self.get_equation_of_time()
lon = self.lon
eot = self.equation_of_time
tz = self.tz
self.solar_noon = (720 - 4 * lon - eot + tz * 60) / 1440
if self.is_numpy:
self.solar_noon_time = timedelta(days=self.solar_noon.mean())
else:
self.solar_noon_time = timedelta(days=self.solar_noon)
return self.solar_noon
</DeepExtract>
print('solar noon \t\t{0}\t (HMS - LST)'.format(self.solar_noon_time))
<DeepExtract>
if not self.sunrise is None:
return self.sunrise
if self.solar_noon is None:
self.get_solar_noon()
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
sn = self.solar_noon
ha = self.hour_angle_sunrise
self.sunrise = (sn * 1440 - ha * 4) / 1440
if self.is_numpy:
self.sunrise_time = timedelta(days=self.sunrise.mean())
else:
self.sunrise_time = timedelta(days=self.sunrise)
return self.sunrise
</DeepExtract>
print('sunrise \t\t{0}\t (HMS - LST)'.format(self.sunrise_time))
<DeepExtract>
if not self.sunset is None:
return self.sunset
if self.solar_noon is None:
self.get_solar_noon()
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
sn = self.solar_noon
ha = self.hour_angle_sunrise
self.sunset = (sn * 1440 + ha * 4) / 1440
if self.is_numpy:
self.sunset_time = timedelta(days=self.sunset.mean())
else:
self.sunset_time = timedelta(days=self.sunset)
return self.sunset
</DeepExtract>
print('sunset \t\t{0}\t (HMS - LST)'.format(self.sunset_time))
<DeepExtract>
if not self.sunlight is None:
return self.sunlight
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
self.sunlight = 8 * self.hour_angle_sunrise / (60 * 24)
if self.is_numpy:
self.sunlight_time = timedelta(days=self.sunlight.mean())
else:
self.sunlight_time = timedelta(days=self.sunlight)
return self.sunlight
</DeepExtract>
print('sunlight durration \t{0}\t (HMS)'.format(self.sunlight_time))
<DeepExtract>
if not self.true_solar is None:
return self.true_solar
if self.equation_of_time is None:
self.get_equation_of_time
lon = self.lon
eot = self.equation_of_time
frac_sec = (self.rdt - datetime(self.rdt.year, self.rdt.month, self.rdt.day)).total_seconds()
frac_hr = frac_sec / (60 * 60) + self.tz
frac_day = frac_hr / 24
self.frac_day = frac_day
self.true_solar = (frac_day * 1440 + eot + 4 * lon - 60 * self.tz) % 1440
if self.is_numpy:
self.true_solar_time = timedelta(days=self.true_solar.mean() / (60 * 24))
else:
self.true_solar_time = timedelta(days=self.true_solar / (60 * 24))
return self.true_solar
</DeepExtract>
print('true solar time \t{0}\t (HMS - LST)'.format(self.true_solar_time))
print('')
if self.is_numpy:
print('hour angle \t\t{0}\t (deg)'.format(self.get_hour_angle().mean()))
print('solar zenith angle \t{0}\t (deg)'.format(self.get_zenith().mean()))
print('solar elevation angle \t{0}\t (deg)'.format(self.get_elevation().mean()))
print('solar azimuth angle \t{0}\t (deg)'.format(self.get_azimuth().mean()))
else:
print('hour angle \t\t{0}\t (deg)'.format(self.get_hour_angle()))
print('solar zenith angle \t{0}\t (deg)'.format(self.get_zenith()))
print('solar elevation angle \t{0}\t (deg)'.format(self.get_elevation()))
print('solar azimuth angle \t{0}\t (deg)'.format(self.get_azimuth()))
print('')
print('radiation vector \t{0}\t (AU)'.format(self.get_rad_vector()))
print('earth sun distance \t{0}(m)'.format(self.get_earth_distance()))
print('norm irradiance \t{0}\t (W/m*m)'.format(self.get_norm_irradiance()))
print('=' * 50)
|
def compute_all(self):
"""
Computes and prints all the attributes of this solar object. Spatial
averages are printed for numpy array type attributes.
"""
print('=' * 50)
print('Interogation of entire matrix of points.')
print('Some values displayed below are spatial averages')
print('=' * 50)
if self.is_numpy:
print('latitude, longitude \t{0}, {1}'.format(self.lat.mean(), self.lon.mean()))
else:
print('latitude, longitude \t{0}, {1}'.format(self.lat, self.lon))
print('datetime \t\t{0} (GMT)'.format(self.rdt))
print('time zone \t\t{0} (GMT offset)'.format(self.tz))
print('')
print('abs julian day \t\t{0}\t (day)'.format(self.ajd))
print('abs julian century \t{0}\t (cen)'.format(self.ajc))
print('suns geomean long \t{0}\t (deg)'.format(self.get_geomean_long()))
print('suns geomean anom \t{0}\t (deg)'.format(self.get_geomean_anom()))
print('earth eccentricity \t{0}'.format(self.get_earth_eccent()))
print('suns eq of center \t{0}'.format(self.get_sun_eq_of_center()))
print('suns true long \t\t{0}\t (deg)'.format(self.get_true_long()))
print('suns true anom \t\t{0}\t (deg)'.format(self.get_true_anom()))
print('suns apparent long \t{0}\t (deg)'.format(self.get_app_long()))
print('earth obliq mean elip \t{0}\t (deg)'.format(self.get_oblique_mean_elip()))
print('earth obliq correction\t{0}\t (deg)'.format(self.get_oblique_corr()))
print('sun right ascension \t{0}\t (deg)'.format(self.get_right_ascension()))
print('solar declination angle {0}\t (deg)'.format(self.get_declination()))
print('equation of time \t{0}\t (min)'.format(self.get_equation_of_time))
if self.is_numpy:
print('hour angle sunrise\t{0}\t (deg)'.format(self.get_hour_angle_sunrise().mean()))
else:
print('hour angle sunrise\t{0}\t (deg)'.format(self.get_hour_angle_sunrise()))
print('')
if not self.solar_noon is None:
return self.solar_noon
if self.equation_of_time is None:
self.get_equation_of_time()
lon = self.lon
eot = self.equation_of_time
tz = self.tz
self.solar_noon = (720 - 4 * lon - eot + tz * 60) / 1440
if self.is_numpy:
self.solar_noon_time = timedelta(days=self.solar_noon.mean())
else:
self.solar_noon_time = timedelta(days=self.solar_noon)
return self.solar_noon
print('solar noon \t\t{0}\t (HMS - LST)'.format(self.solar_noon_time))
if not self.sunrise is None:
return self.sunrise
if self.solar_noon is None:
self.get_solar_noon()
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
sn = self.solar_noon
ha = self.hour_angle_sunrise
self.sunrise = (sn * 1440 - ha * 4) / 1440
if self.is_numpy:
self.sunrise_time = timedelta(days=self.sunrise.mean())
else:
self.sunrise_time = timedelta(days=self.sunrise)
return self.sunrise
print('sunrise \t\t{0}\t (HMS - LST)'.format(self.sunrise_time))
if not self.sunset is None:
return self.sunset
if self.solar_noon is None:
self.get_solar_noon()
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
sn = self.solar_noon
ha = self.hour_angle_sunrise
self.sunset = (sn * 1440 + ha * 4) / 1440
if self.is_numpy:
self.sunset_time = timedelta(days=self.sunset.mean())
else:
self.sunset_time = timedelta(days=self.sunset)
return self.sunset
print('sunset \t\t{0}\t (HMS - LST)'.format(self.sunset_time))
if not self.sunlight is None:
return self.sunlight
if self.hour_angle_sunrise is None:
self.get_hour_angle_sunrise()
self.sunlight = 8 * self.hour_angle_sunrise / (60 * 24)
if self.is_numpy:
self.sunlight_time = timedelta(days=self.sunlight.mean())
else:
self.sunlight_time = timedelta(days=self.sunlight)
return self.sunlight
print('sunlight durration \t{0}\t (HMS)'.format(self.sunlight_time))
if not self.true_solar is None:
return self.true_solar
if self.equation_of_time is None:
self.get_equation_of_time
lon = self.lon
eot = self.equation_of_time
frac_sec = (self.rdt - datetime(self.rdt.year, self.rdt.month, self.rdt.day)).total_seconds()
frac_hr = frac_sec / (60 * 60) + self.tz
frac_day = frac_hr / 24
self.frac_day = frac_day
self.true_solar = (frac_day * 1440 + eot + 4 * lon - 60 * self.tz) % 1440
if self.is_numpy:
self.true_solar_time = timedelta(days=self.true_solar.mean() / (60 * 24))
else:
self.true_solar_time = timedelta(days=self.true_solar / (60 * 24))
return self.true_solar
print('true solar time \t{0}\t (HMS - LST)'.format(self.true_solar_time))
print('')
if self.is_numpy:
print('hour angle \t\t{0}\t (deg)'.format(self.get_hour_angle().mean()))
print('solar zenith angle \t{0}\t (deg)'.format(self.get_zenith().mean()))
print('solar elevation angle \t{0}\t (deg)'.format(self.get_elevation().mean()))
print('solar azimuth angle \t{0}\t (deg)'.format(self.get_azimuth().mean()))
else:
print('hour angle \t\t{0}\t (deg)'.format(self.get_hour_angle()))
print('solar zenith angle \t{0}\t (deg)'.format(self.get_zenith()))
print('solar elevation angle \t{0}\t (deg)'.format(self.get_elevation()))
print('solar azimuth angle \t{0}\t (deg)'.format(self.get_azimuth()))
print('')
print('radiation vector \t{0}\t (AU)'.format(self.get_rad_vector()))
print('earth sun distance \t{0}(m)'.format(self.get_earth_distance()))
print('norm irradiance \t{0}\t (W/m*m)'.format(self.get_norm_irradiance()))
print('=' * 50)
|
dnppy
|
positive
|
def inorder_traversal(root, num):
if root:
<DeepExtract>
if root.left:
inorder_traversal(root.left.left, num)
if root.left.val > num:
successors.append(root.left.val)
inorder_traversal(root.left.right, num)
</DeepExtract>
if root.val > num:
successors.append(root.val)
<DeepExtract>
if root.right:
inorder_traversal(root.right.left, num)
if root.right.val > num:
successors.append(root.right.val)
inorder_traversal(root.right.right, num)
</DeepExtract>
|
def inorder_traversal(root, num):
if root:
if root.left:
inorder_traversal(root.left.left, num)
if root.left.val > num:
successors.append(root.left.val)
inorder_traversal(root.left.right, num)
if root.val > num:
successors.append(root.val)
if root.right:
inorder_traversal(root.right.left, num)
if root.right.val > num:
successors.append(root.right.val)
inorder_traversal(root.right.right, num)
</DeepExtract>
|
Competitive-Coding-Platforms
|
positive
|
def mask_coco2voc(coco_masks, im_height, im_width):
voc_masks = np.zeros((len(coco_masks), im_height, im_width))
for (i, ann) in enumerate(coco_masks):
if type(ann) == list:
<DeepExtract>
M = np.zeros((im_height, im_width), dtype=np.bool)
for s in ann:
N = len(s)
(rr, cc) = polygon(np.array(s[1:N:2]).clip(max=im_height - 1), np.array(s[0:N:2]).clip(max=im_width - 1))
M[rr, cc] = 1
m = M
</DeepExtract>
else:
<DeepExtract>
N = len(ann['counts'])
M = np.zeros((ann['size'][0] * ann['size'][1],))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(ann['counts'][pos]):
ann['counts'][pos]
M[n] = val
n += 1
m = M.reshape(ann['size'], order='F')
</DeepExtract>
voc_masks[i, :, :] = m
return voc_masks
|
def mask_coco2voc(coco_masks, im_height, im_width):
voc_masks = np.zeros((len(coco_masks), im_height, im_width))
for (i, ann) in enumerate(coco_masks):
if type(ann) == list:
M = np.zeros((im_height, im_width), dtype=np.bool)
for s in ann:
N = len(s)
(rr, cc) = polygon(np.array(s[1:N:2]).clip(max=im_height - 1), np.array(s[0:N:2]).clip(max=im_width - 1))
M[rr, cc] = 1
m = M
else:
N = len(ann['counts'])
M = np.zeros((ann['size'][0] * ann['size'][1],))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(ann['counts'][pos]):
ann['counts'][pos]
M[n] = val
n += 1
m = M.reshape(ann['size'], order='F')
voc_masks[i, :, :] = m
return voc_masks
|
DRN-MXNet
|
positive
|
def forward(self, data_dict):
if self.training:
nets_names = self.nets_names_train
networks_to_train = self.nets_names_to_train
losses_names = self.losses_names_train
else:
nets_names = self.nets_names_test
networks_to_train = []
losses_names = self.losses_names_test
self.data_dict = data_dict
for net_name in nets_names:
self.data_dict = self.nets[net_name](self.data_dict, networks_to_train, self.nets)
losses_dict = {}
for loss_name in losses_names:
if hasattr(self, 'losses') and loss_name in self.losses.keys():
losses_dict = self.losses[loss_name](self.data_dict, losses_dict)
<DeepExtract>
loss = torch.zeros(1)
if self.args.num_gpus > 0:
loss = loss.cuda()
for (key, value) in losses_dict.items():
if key not in self.losses_history[self.training]:
self.losses_history[self.training][key] = []
self.losses_history[self.training][key] += [value.item()]
loss += value
loss = loss
</DeepExtract>
return loss
|
def forward(self, data_dict):
if self.training:
nets_names = self.nets_names_train
networks_to_train = self.nets_names_to_train
losses_names = self.losses_names_train
else:
nets_names = self.nets_names_test
networks_to_train = []
losses_names = self.losses_names_test
self.data_dict = data_dict
for net_name in nets_names:
self.data_dict = self.nets[net_name](self.data_dict, networks_to_train, self.nets)
losses_dict = {}
for loss_name in losses_names:
if hasattr(self, 'losses') and loss_name in self.losses.keys():
losses_dict = self.losses[loss_name](self.data_dict, losses_dict)
loss = torch.zeros(1)
if self.args.num_gpus > 0:
loss = loss.cuda()
for (key, value) in losses_dict.items():
if key not in self.losses_history[self.training]:
self.losses_history[self.training][key] = []
self.losses_history[self.training][key] += [value.item()]
loss += value
loss = loss
return loss
|
bilayer-model
|
positive
|
def __init__(self, mode: str, serial_nr: Optional[str]=None, i2c_speed: float=100000.0, spi_turbo: bool=False) -> None:
"""Checks the arguments validity, initializes the device and sets the
locks.
Args:
mode (:obj:`str`): The communication mode, can be :
::
'SPI', 'I2C', 'GPIO_only', 'Write_serial_nr'
GPIOs can be driven in any mode, but faster speeds are achievable in
`GPIO_only` mode.
serial_nr (:obj:`str`, optional): The serial number of the FT232H to
drive. In `Write_serial_nr` mode, the serial number to be written.
i2c_speed (:obj:`str`, optional): In I2C mode, the I2C bus clock
frequency in Hz. Available values are :
::
100E3, 400E3, 1E6
or any value between `10kHz` and `100kHz`. Lowering below the default
value may solve I2C clock stretching issues on some devices.
spi_turbo (:obj:`str`, optional): Increases the achievable bus speed, but
may not work with some devices.
Note:
- **CS pin**:
The CS pin for selecting SPI devices is always `D3`. This pin is
reserved and cannot be used as a GPIO. If you want to drive the CS line
manually, it is possible not to drive the CS pin by setting the SPI
parameter :attr:`no_cs` to :obj:`True` and to drive the CS line from a
GPIO instead.
- ``mode``:
It is not possible to simultaneously control slaves over SPI and I2C,
due to different hardware requirements for the two protocols. Trying to
do so will most likely raise an error or lead to inconsistent behavior.
"""
if mode not in ft232h_modes:
raise ValueError('mode should be in {}'.format(ft232h_modes))
self._ft232h_mode = mode
if mode == 'Write_serial_nr' and serial_nr is None:
raise ValueError('Cannot set serial number if it is not specified !')
if i2c_speed not in ft232h_i2c_speed:
try:
if not 10000.0 <= i2c_speed < 100000.0:
raise ValueError('i2c_speed should be in {} or between 10E3 and 100E3'.format(list(ft232h_i2c_speed.values())))
except TypeError:
raise TypeError('i2c_speed should be a float or an int !')
self._gpio_low = 0
self._gpio_high = 0
self._gpio_dir = 0
self._retry_count = 16
self._usb_write_timeout = 5000
self._usb_read_timeout = 5000
self._serial_nr = serial_nr
self._turbo = spi_turbo
self._i2c_speed = i2c_speed
self._nb_attempt_1 = 8
self._nb_attempt_2 = 8
<DeepExtract>
fifo_sizes = (1024, 1024)
latency = 16
if self._ft232h_mode == 'I2C':
timings = ft232h_i2c_speed[self._i2c_speed if self._i2c_speed in ft232h_i2c_speed else 100000.0]
frequency = self._i2c_speed
self._ck_hd_sta = self._compute_delay_cycles(timings.t_hd_sta)
self._ck_su_sto = self._compute_delay_cycles(timings.t_su_sto)
ck_su_sta = self._compute_delay_cycles(timings.t_su_sta)
ck_buf = self._compute_delay_cycles(timings.t_buf)
self._ck_idle = max(ck_su_sta, ck_buf)
self._ck_delay = ck_buf
self._i2c_mask = ft232h_pins['SCL'] | ft232h_pins['SDAO'] | ft232h_pins['SDAI']
self._i2c_dir = ft232h_pins['SCL'] | ft232h_pins['SDAO']
elif self._ft232h_mode == 'SPI':
frequency = 400000.0
self._bits_per_word = 8
self._cshigh = False
self._no_cs = False
self._loop = False
self._lsbfirst = False
self._max_speed_hz = 400000.0
self._mode = 0
self._threewire = False
self._spi_param_changed = True
self._cs_bit = ft232h_pins['CS']
self._spi_dir = self._cs_bit | ft232h_pins['SCK'] | ft232h_pins['DO']
self._spi_mask = self._cs_bit | ft232h_pins['SCK'] | ft232h_pins['DO'] | ft232h_pins['DI']
else:
frequency = 400000.0
if self._serial_nr is not None and self._ft232h_mode != 'Write_serial_nr':
devices = find(find_all=True, idVendor=Ftdi_vendor_id, idProduct=ft232h_product_id, custom_match=Find_serial_number(self._serial_nr))
else:
devices = find(find_all=True, idVendor=Ftdi_vendor_id, idProduct=ft232h_product_id)
devices = list(devices)
if len(devices) == 0:
raise IOError('No matching ft232h connected')
elif len(devices) > 1:
raise IOError('Several ft232h devices found, please specify a serial_nr')
else:
self._usb_dev = devices[0]
try:
self._serial_nr = self._usb_dev.serial_number
except ValueError:
self._serial_nr = ''
try:
if self._usb_dev.is_kernel_driver_active(0):
self._usb_dev.detach_kernel_driver(0)
self._usb_dev.set_configuration()
except USBError:
print('You may have to install the udev-rules for this USB device, this can be done using the udev_rule_setter utility in the util folder')
raise
config = self._usb_dev.get_active_configuration()
interface = config[0, 0]
self._index = interface.bInterfaceNumber + 1
endpoints = sorted([ep.bEndpointAddress for ep in interface])
(self._in_ep, self._out_ep) = endpoints[:2]
endpoint = interface[0]
self._max_packet_size = endpoint.wMaxPacketSize
self._readoffset = 0
self._readbuffer = bytearray()
self._purge_buffers()
if self._ctrl_transfer_out(ft232h_sio_req['reset'], ft232h_sio_args['reset']):
raise IOError('Unable to reset FTDI device')
self._set_bitmode(0, ft232h.BitMode.RESET)
self._set_latency_timer(latency)
self._writebuffer_chunksize = fifo_sizes[0]
self._readoffset = 0
self._readbuffer = bytearray()
self._readbuffer_chunksize = min(fifo_sizes[0], fifo_sizes[1], self._max_packet_size)
self._set_bitmode(0, ft232h.BitMode.RESET)
self._purge_buffers()
if self._ctrl_transfer_out(ft232h_sio_req['set_event_char'], 0):
raise IOError('Unable to set event char')
if self._ctrl_transfer_out(ft232h_sio_req['set_error_char'], 0):
raise IOError('Unable to set error char')
if self._ft232h_mode == 'GPIO_only':
self._set_bitmode(255, ft232h.BitMode.MPSSE)
else:
self._set_bitmode(self._direction, ft232h.BitMode.MPSSE)
if self._ft232h_mode == 'I2C':
self._set_frequency(3 * frequency / 2)
else:
self._set_frequency(frequency)
if self._ft232h_mode == 'I2C':
cmd = bytearray(self._idle)
cmd.extend((ft232h_cmds['set_bits_high'], 0, 0))
self._write_data(cmd)
elif self._ft232h_mode == 'SPI':
cmd = bytearray((ft232h_cmds['set_bits_low'], self._cs_bit & 255, self._direction & 255))
cmd.extend((ft232h_cmds['set_bits_high'], self._cs_bit >> 8 & 255, self._direction >> 8 & 255))
self._write_data(cmd)
else:
cmd = bytearray((ft232h_cmds['set_bits_low'], 0, 0))
cmd.extend((ft232h_cmds['set_bits_high'], 0, 0))
self._write_data(cmd)
self._write_data(bytearray((ft232h_cmds['loopback_end'],)))
bytes_ = bytes(self._read_data_bytes(2))
if len(bytes_) >= 2 and bytes_[0] == 'ú':
raise IOError('Invalid command @ %d' % bytes_[1])
if self._ft232h_mode == 'I2C':
(self._tx_size, self._rx_size) = fifo_sizes
self._write_data(bytearray([True and ft232h_cmds['enable_clk_3phase'] or ft232h_cmds['disable_clk_3phase']]))
self._write_data(bytearray([ft232h_cmds['drive_zero'], self._i2c_mask & 255, self._i2c_mask >> 8 & 255]))
self._write_data(bytearray([False and ft232h_cmds['enable_clk_adaptative'] or ft232h_cmds['disable_clk_adaptative']]))
</DeepExtract>
if mode == 'Write_serial_nr':
<DeepExtract>
if not isinstance(serial_nr, str):
serial_nr = str(serial_nr)
if any((char in serial_nr for char in ':/')):
raise ValueError('Invalid character : or / in serial number')
word_count = round(ft232h_eeprom_size / 2)
word_addr = 0
data = bytearray()
while word_count:
try:
buf = self._usb_dev.ctrl_transfer(Ftdi_req_in, ft232h_sio_req['read_eeprom'], 0, word_addr, 2, self._usb_read_timeout)
except USBError as exc:
raise IOError('UsbError: %s' % exc) from exc
if not buf:
raise IOError('EEPROM read error @ %d' % (word_addr << 1))
data.extend(buf)
word_count -= 1
word_addr += 1
new_eeprom = data[0:ft232h_eeprom_size]
new_eeprom[ft232h_eeprom['has_serial_pos']] |= 1 << 3
str_descriptors = {'manufacturer': 'FTDI', 'product': 'FT232H', 'serial': serial_nr}
stream = bytearray()
str_pos = ft232h_eeprom['str_position']
tbl_pos = ft232h_eeprom['str_table']
data_pos = str_pos
for name in str_descriptors:
new_str = str_descriptors[name].encode('utf-16le')
length = len(new_str) + 2
stream.append(length)
stream.append(util.DESC_TYPE_STRING)
stream.extend(new_str)
new_eeprom[tbl_pos] = data_pos
tbl_pos += 1
new_eeprom[tbl_pos] = length
tbl_pos += 1
data_pos += length
new_eeprom[str_pos:str_pos + len(stream)] = stream
crc_pos = len(new_eeprom)
rem = crc_pos - (str_pos + len(stream))
new_eeprom[str_pos + len(stream):crc_pos] = bytes(rem)
if len(new_eeprom) != ft232h_eeprom_size:
raise ValueError('Eeprom_size not matching, serial number may be too long, eeprom not written')
checksum = 43690
for idx in range(0, len(new_eeprom[:-2]), 2):
v = (new_eeprom[:-2][idx + 1] << 8) + new_eeprom[:-2][idx] & 65535
checksum = v ^ checksum
checksum = checksum << 1 & 65535 | checksum >> 15 & 65535
new_eeprom[-2] = checksum & 255
new_eeprom[-1] = checksum >> 8
addr = 0
for word in unpack('<%dH' % (len(new_eeprom) // 2), new_eeprom):
out = self._usb_dev.ctrl_transfer(Ftdi_req_out, ft232h_sio_req['write_eeprom'], word, addr >> 1, b'', self._usb_write_timeout)
if out:
raise IOError('EEPROM Write Error @ %d' % addr)
addr += 2
</DeepExtract>
<DeepExtract>
if self._usb_dev:
if bool(self._usb_dev._ctx.handle):
try:
self._set_bitmode(0, ft232h.BitMode.RESET)
util.release_interface(self._usb_dev, self._index - 1)
except (IOError, ValueError, USBError):
pass
try:
self._usb_dev.attach_kernel_driver(self._index - 1)
except (NotImplementedError, USBError):
pass
util.dispose_resources(self._usb_dev)
self._usb_dev = None
</DeepExtract>
|
def __init__(self, mode: str, serial_nr: Optional[str]=None, i2c_speed: float=100000.0, spi_turbo: bool=False) -> None:
"""Checks the arguments validity, initializes the device and sets the
locks.
Args:
mode (:obj:`str`): The communication mode, can be :
::
'SPI', 'I2C', 'GPIO_only', 'Write_serial_nr'
GPIOs can be driven in any mode, but faster speeds are achievable in
`GPIO_only` mode.
serial_nr (:obj:`str`, optional): The serial number of the FT232H to
drive. In `Write_serial_nr` mode, the serial number to be written.
i2c_speed (:obj:`str`, optional): In I2C mode, the I2C bus clock
frequency in Hz. Available values are :
::
100E3, 400E3, 1E6
or any value between `10kHz` and `100kHz`. Lowering below the default
value may solve I2C clock stretching issues on some devices.
spi_turbo (:obj:`str`, optional): Increases the achievable bus speed, but
may not work with some devices.
Note:
- **CS pin**:
The CS pin for selecting SPI devices is always `D3`. This pin is
reserved and cannot be used as a GPIO. If you want to drive the CS line
manually, it is possible not to drive the CS pin by setting the SPI
parameter :attr:`no_cs` to :obj:`True` and to drive the CS line from a
GPIO instead.
- ``mode``:
It is not possible to simultaneously control slaves over SPI and I2C,
due to different hardware requirements for the two protocols. Trying to
do so will most likely raise an error or lead to inconsistent behavior.
"""
if mode not in ft232h_modes:
raise ValueError('mode should be in {}'.format(ft232h_modes))
self._ft232h_mode = mode
if mode == 'Write_serial_nr' and serial_nr is None:
raise ValueError('Cannot set serial number if it is not specified !')
if i2c_speed not in ft232h_i2c_speed:
try:
if not 10000.0 <= i2c_speed < 100000.0:
raise ValueError('i2c_speed should be in {} or between 10E3 and 100E3'.format(list(ft232h_i2c_speed.values())))
except TypeError:
raise TypeError('i2c_speed should be a float or an int !')
self._gpio_low = 0
self._gpio_high = 0
self._gpio_dir = 0
self._retry_count = 16
self._usb_write_timeout = 5000
self._usb_read_timeout = 5000
self._serial_nr = serial_nr
self._turbo = spi_turbo
self._i2c_speed = i2c_speed
self._nb_attempt_1 = 8
self._nb_attempt_2 = 8
fifo_sizes = (1024, 1024)
latency = 16
if self._ft232h_mode == 'I2C':
timings = ft232h_i2c_speed[self._i2c_speed if self._i2c_speed in ft232h_i2c_speed else 100000.0]
frequency = self._i2c_speed
self._ck_hd_sta = self._compute_delay_cycles(timings.t_hd_sta)
self._ck_su_sto = self._compute_delay_cycles(timings.t_su_sto)
ck_su_sta = self._compute_delay_cycles(timings.t_su_sta)
ck_buf = self._compute_delay_cycles(timings.t_buf)
self._ck_idle = max(ck_su_sta, ck_buf)
self._ck_delay = ck_buf
self._i2c_mask = ft232h_pins['SCL'] | ft232h_pins['SDAO'] | ft232h_pins['SDAI']
self._i2c_dir = ft232h_pins['SCL'] | ft232h_pins['SDAO']
elif self._ft232h_mode == 'SPI':
frequency = 400000.0
self._bits_per_word = 8
self._cshigh = False
self._no_cs = False
self._loop = False
self._lsbfirst = False
self._max_speed_hz = 400000.0
self._mode = 0
self._threewire = False
self._spi_param_changed = True
self._cs_bit = ft232h_pins['CS']
self._spi_dir = self._cs_bit | ft232h_pins['SCK'] | ft232h_pins['DO']
self._spi_mask = self._cs_bit | ft232h_pins['SCK'] | ft232h_pins['DO'] | ft232h_pins['DI']
else:
frequency = 400000.0
if self._serial_nr is not None and self._ft232h_mode != 'Write_serial_nr':
devices = find(find_all=True, idVendor=Ftdi_vendor_id, idProduct=ft232h_product_id, custom_match=Find_serial_number(self._serial_nr))
else:
devices = find(find_all=True, idVendor=Ftdi_vendor_id, idProduct=ft232h_product_id)
devices = list(devices)
if len(devices) == 0:
raise IOError('No matching ft232h connected')
elif len(devices) > 1:
raise IOError('Several ft232h devices found, please specify a serial_nr')
else:
self._usb_dev = devices[0]
try:
self._serial_nr = self._usb_dev.serial_number
except ValueError:
self._serial_nr = ''
try:
if self._usb_dev.is_kernel_driver_active(0):
self._usb_dev.detach_kernel_driver(0)
self._usb_dev.set_configuration()
except USBError:
print('You may have to install the udev-rules for this USB device, this can be done using the udev_rule_setter utility in the util folder')
raise
config = self._usb_dev.get_active_configuration()
interface = config[0, 0]
self._index = interface.bInterfaceNumber + 1
endpoints = sorted([ep.bEndpointAddress for ep in interface])
(self._in_ep, self._out_ep) = endpoints[:2]
endpoint = interface[0]
self._max_packet_size = endpoint.wMaxPacketSize
self._readoffset = 0
self._readbuffer = bytearray()
self._purge_buffers()
if self._ctrl_transfer_out(ft232h_sio_req['reset'], ft232h_sio_args['reset']):
raise IOError('Unable to reset FTDI device')
self._set_bitmode(0, ft232h.BitMode.RESET)
self._set_latency_timer(latency)
self._writebuffer_chunksize = fifo_sizes[0]
self._readoffset = 0
self._readbuffer = bytearray()
self._readbuffer_chunksize = min(fifo_sizes[0], fifo_sizes[1], self._max_packet_size)
self._set_bitmode(0, ft232h.BitMode.RESET)
self._purge_buffers()
if self._ctrl_transfer_out(ft232h_sio_req['set_event_char'], 0):
raise IOError('Unable to set event char')
if self._ctrl_transfer_out(ft232h_sio_req['set_error_char'], 0):
raise IOError('Unable to set error char')
if self._ft232h_mode == 'GPIO_only':
self._set_bitmode(255, ft232h.BitMode.MPSSE)
else:
self._set_bitmode(self._direction, ft232h.BitMode.MPSSE)
if self._ft232h_mode == 'I2C':
self._set_frequency(3 * frequency / 2)
else:
self._set_frequency(frequency)
if self._ft232h_mode == 'I2C':
cmd = bytearray(self._idle)
cmd.extend((ft232h_cmds['set_bits_high'], 0, 0))
self._write_data(cmd)
elif self._ft232h_mode == 'SPI':
cmd = bytearray((ft232h_cmds['set_bits_low'], self._cs_bit & 255, self._direction & 255))
cmd.extend((ft232h_cmds['set_bits_high'], self._cs_bit >> 8 & 255, self._direction >> 8 & 255))
self._write_data(cmd)
else:
cmd = bytearray((ft232h_cmds['set_bits_low'], 0, 0))
cmd.extend((ft232h_cmds['set_bits_high'], 0, 0))
self._write_data(cmd)
self._write_data(bytearray((ft232h_cmds['loopback_end'],)))
bytes_ = bytes(self._read_data_bytes(2))
if len(bytes_) >= 2 and bytes_[0] == 'ú':
raise IOError('Invalid command @ %d' % bytes_[1])
if self._ft232h_mode == 'I2C':
(self._tx_size, self._rx_size) = fifo_sizes
self._write_data(bytearray([True and ft232h_cmds['enable_clk_3phase'] or ft232h_cmds['disable_clk_3phase']]))
self._write_data(bytearray([ft232h_cmds['drive_zero'], self._i2c_mask & 255, self._i2c_mask >> 8 & 255]))
self._write_data(bytearray([False and ft232h_cmds['enable_clk_adaptative'] or ft232h_cmds['disable_clk_adaptative']]))
if mode == 'Write_serial_nr':
if not isinstance(serial_nr, str):
serial_nr = str(serial_nr)
if any((char in serial_nr for char in ':/')):
raise ValueError('Invalid character : or / in serial number')
word_count = round(ft232h_eeprom_size / 2)
word_addr = 0
data = bytearray()
while word_count:
try:
buf = self._usb_dev.ctrl_transfer(Ftdi_req_in, ft232h_sio_req['read_eeprom'], 0, word_addr, 2, self._usb_read_timeout)
except USBError as exc:
raise IOError('UsbError: %s' % exc) from exc
if not buf:
raise IOError('EEPROM read error @ %d' % (word_addr << 1))
data.extend(buf)
word_count -= 1
word_addr += 1
new_eeprom = data[0:ft232h_eeprom_size]
new_eeprom[ft232h_eeprom['has_serial_pos']] |= 1 << 3
str_descriptors = {'manufacturer': 'FTDI', 'product': 'FT232H', 'serial': serial_nr}
stream = bytearray()
str_pos = ft232h_eeprom['str_position']
tbl_pos = ft232h_eeprom['str_table']
data_pos = str_pos
for name in str_descriptors:
new_str = str_descriptors[name].encode('utf-16le')
length = len(new_str) + 2
stream.append(length)
stream.append(util.DESC_TYPE_STRING)
stream.extend(new_str)
new_eeprom[tbl_pos] = data_pos
tbl_pos += 1
new_eeprom[tbl_pos] = length
tbl_pos += 1
data_pos += length
new_eeprom[str_pos:str_pos + len(stream)] = stream
crc_pos = len(new_eeprom)
rem = crc_pos - (str_pos + len(stream))
new_eeprom[str_pos + len(stream):crc_pos] = bytes(rem)
if len(new_eeprom) != ft232h_eeprom_size:
raise ValueError('Eeprom_size not matching, serial number may be too long, eeprom not written')
checksum = 43690
for idx in range(0, len(new_eeprom[:-2]), 2):
v = (new_eeprom[:-2][idx + 1] << 8) + new_eeprom[:-2][idx] & 65535
checksum = v ^ checksum
checksum = checksum << 1 & 65535 | checksum >> 15 & 65535
new_eeprom[-2] = checksum & 255
new_eeprom[-1] = checksum >> 8
addr = 0
for word in unpack('<%dH' % (len(new_eeprom) // 2), new_eeprom):
out = self._usb_dev.ctrl_transfer(Ftdi_req_out, ft232h_sio_req['write_eeprom'], word, addr >> 1, b'', self._usb_write_timeout)
if out:
raise IOError('EEPROM Write Error @ %d' % addr)
addr += 2
if self._usb_dev:
if bool(self._usb_dev._ctx.handle):
try:
self._set_bitmode(0, ft232h.BitMode.RESET)
util.release_interface(self._usb_dev, self._index - 1)
except (IOError, ValueError, USBError):
pass
try:
self._usb_dev.attach_kernel_driver(self._index - 1)
except (NotImplementedError, USBError):
pass
util.dispose_resources(self._usb_dev)
self._usb_dev = None
</DeepExtract>
|
crappy
|
positive
|
def test_explicit_get_multi(self):
<DeepExtract>
reg = self._region(config_args={'expiration_time': 0.25})
counter = itertools.count(1)
@reg.cache_multi_on_arguments(namespace=namespace, expiration_time=1, function_multi_key_generator=key_generator)
def go(*args):
val = next(counter)
go = ['%d %s' % (val, arg) for arg in args]
go = go
</DeepExtract>
eq_(go(1, 2), ['1 1', '1 2'])
eq_(go.get(1, 2), ['1 1', '1 2'])
eq_(go.get(3, 1), [NO_VALUE, '1 1'])
eq_(go(3, 1), ['2 3', '1 1'])
eq_(go.get(3, 1), ['2 3', '1 1'])
|
def test_explicit_get_multi(self):
reg = self._region(config_args={'expiration_time': 0.25})
counter = itertools.count(1)
@reg.cache_multi_on_arguments(namespace=namespace, expiration_time=1, function_multi_key_generator=key_generator)
def go(*args):
val = next(counter)
go = ['%d %s' % (val, arg) for arg in args]
go = go
eq_(go(1, 2), ['1 1', '1 2'])
eq_(go.get(1, 2), ['1 1', '1 2'])
eq_(go.get(3, 1), [NO_VALUE, '1 1'])
eq_(go(3, 1), ['2 3', '1 1'])
eq_(go.get(3, 1), ['2 3', '1 1'])
|
dogpile.cache
|
positive
|
def __init__(self, module):
self.module = module
self.fetcher = NitroAPIFetcher(self.module)
self.main_nitro_class = 'service'
self.attribute_config = {'service': {'attributes_list': ['name', 'ip', 'servername', 'servicetype', 'port', 'cleartextport', 'cachetype', 'maxclient', 'healthmonitor', 'maxreq', 'cacheable', 'cip', 'cipheader', 'usip', 'pathmonitor', 'pathmonitorindv', 'useproxyport', 'sc', 'sp', 'rtspsessionidremap', 'clttimeout', 'svrtimeout', 'customserverid', 'serverid', 'cka', 'tcpb', 'cmp', 'maxbandwidth', 'accessdown', 'monthreshold', 'downstateflush', 'tcpprofilename', 'httpprofilename', 'contentinspectionprofilename', 'hashid', 'comment', 'appflowlog', 'netprofile', 'td', 'processlocal', 'dnsprofilename', 'monconnectionclose', 'ipaddress', 'weight', 'monitor_name_svc', 'riseapbrstatsmsgcode', 'delay', 'graceful', 'all', 'Internal'], 'transforms': {'healthmonitor': lambda v: 'YES' if v else 'NO', 'cacheable': lambda v: 'YES' if v else 'NO', 'cip': lambda v: v.upper(), 'usip': lambda v: 'YES' if v else 'NO', 'pathmonitor': lambda v: 'YES' if v else 'NO', 'pathmonitorindv': lambda v: 'YES' if v else 'NO', 'useproxyport': lambda v: 'YES' if v else 'NO', 'sc': lambda v: 'ON' if v else 'OFF', 'sp': lambda v: 'ON' if v else 'OFF', 'rtspsessionidremap': lambda v: 'ON' if v else 'OFF', 'cka': lambda v: 'YES' if v else 'NO', 'tcpb': lambda v: 'YES' if v else 'NO', 'cmp': lambda v: 'YES' if v else 'NO', 'accessdown': lambda v: 'YES' if v else 'NO', 'downstateflush': lambda v: v.upper(), 'appflowlog': lambda v: v.upper(), 'processlocal': lambda v: v.upper(), 'graceful': lambda v: 'YES' if v else 'NO'}, 'get_id_attributes': ['name'], 'delete_id_attributes': ['name'], 'non_updateable_attributes': ['ip', 'servername', 'servicetype', 'port', 'cleartextport', 'cachetype', 'state', 'td', 'riseapbrstatsmsgcode', 'delay', 'graceful', 'all', 'Internal', 'newname']}, 'monitor_bindings': {'attributes_list': ['monitor_name', 'monstate', 'weight', 'passive'], 'transforms': {'monstate': lambda v: v.upper(), 'weight': str}, 'get_id_attributes': ['name'], 'delete_id_attributes': ['monitor_name', 'name']}}
self.module_result = dict(changed=False, failed=False, loglines=loglines)
self.prepared_list = []
<DeepExtract>
log('ModuleExecutor.calculate_configured_service()')
self.configured_service = {}
for attribute in self.attribute_config['service']['attributes_list']:
value = self.module.params.get(attribute)
if value is None:
continue
transform = self.attribute_config['service']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
self.configured_service[attribute] = value
log('calculated configured service%s' % self.configured_service)
</DeepExtract>
<DeepExtract>
log('ModuleExecutor.calculate_configured_monitor_bindings()')
self.configured_monitor_bindings = []
if self.module.params.get('monitor_bindings') is None:
return
for monitor_binding in self.module.params['monitor_bindings']:
member = {}
member['name'] = self.module.params['name']
for attribute in self.attribute_config['monitor_bindings']['attributes_list']:
value = monitor_binding.get(attribute)
if value is None:
continue
transform = self.attribute_config['monitor_bindings']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
member[attribute] = value
self.configured_monitor_bindings.append(member)
log('calculated configured monitor bindings %s' % self.configured_monitor_bindings)
</DeepExtract>
|
def __init__(self, module):
self.module = module
self.fetcher = NitroAPIFetcher(self.module)
self.main_nitro_class = 'service'
self.attribute_config = {'service': {'attributes_list': ['name', 'ip', 'servername', 'servicetype', 'port', 'cleartextport', 'cachetype', 'maxclient', 'healthmonitor', 'maxreq', 'cacheable', 'cip', 'cipheader', 'usip', 'pathmonitor', 'pathmonitorindv', 'useproxyport', 'sc', 'sp', 'rtspsessionidremap', 'clttimeout', 'svrtimeout', 'customserverid', 'serverid', 'cka', 'tcpb', 'cmp', 'maxbandwidth', 'accessdown', 'monthreshold', 'downstateflush', 'tcpprofilename', 'httpprofilename', 'contentinspectionprofilename', 'hashid', 'comment', 'appflowlog', 'netprofile', 'td', 'processlocal', 'dnsprofilename', 'monconnectionclose', 'ipaddress', 'weight', 'monitor_name_svc', 'riseapbrstatsmsgcode', 'delay', 'graceful', 'all', 'Internal'], 'transforms': {'healthmonitor': lambda v: 'YES' if v else 'NO', 'cacheable': lambda v: 'YES' if v else 'NO', 'cip': lambda v: v.upper(), 'usip': lambda v: 'YES' if v else 'NO', 'pathmonitor': lambda v: 'YES' if v else 'NO', 'pathmonitorindv': lambda v: 'YES' if v else 'NO', 'useproxyport': lambda v: 'YES' if v else 'NO', 'sc': lambda v: 'ON' if v else 'OFF', 'sp': lambda v: 'ON' if v else 'OFF', 'rtspsessionidremap': lambda v: 'ON' if v else 'OFF', 'cka': lambda v: 'YES' if v else 'NO', 'tcpb': lambda v: 'YES' if v else 'NO', 'cmp': lambda v: 'YES' if v else 'NO', 'accessdown': lambda v: 'YES' if v else 'NO', 'downstateflush': lambda v: v.upper(), 'appflowlog': lambda v: v.upper(), 'processlocal': lambda v: v.upper(), 'graceful': lambda v: 'YES' if v else 'NO'}, 'get_id_attributes': ['name'], 'delete_id_attributes': ['name'], 'non_updateable_attributes': ['ip', 'servername', 'servicetype', 'port', 'cleartextport', 'cachetype', 'state', 'td', 'riseapbrstatsmsgcode', 'delay', 'graceful', 'all', 'Internal', 'newname']}, 'monitor_bindings': {'attributes_list': ['monitor_name', 'monstate', 'weight', 'passive'], 'transforms': {'monstate': lambda v: v.upper(), 'weight': str}, 'get_id_attributes': ['name'], 'delete_id_attributes': ['monitor_name', 'name']}}
self.module_result = dict(changed=False, failed=False, loglines=loglines)
self.prepared_list = []
log('ModuleExecutor.calculate_configured_service()')
self.configured_service = {}
for attribute in self.attribute_config['service']['attributes_list']:
value = self.module.params.get(attribute)
if value is None:
continue
transform = self.attribute_config['service']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
self.configured_service[attribute] = value
log('calculated configured service%s' % self.configured_service)
log('ModuleExecutor.calculate_configured_monitor_bindings()')
self.configured_monitor_bindings = []
if self.module.params.get('monitor_bindings') is None:
return
for monitor_binding in self.module.params['monitor_bindings']:
member = {}
member['name'] = self.module.params['name']
for attribute in self.attribute_config['monitor_bindings']['attributes_list']:
value = monitor_binding.get(attribute)
if value is None:
continue
transform = self.attribute_config['monitor_bindings']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
member[attribute] = value
self.configured_monitor_bindings.append(member)
log('calculated configured monitor bindings %s' % self.configured_monitor_bindings)
</DeepExtract>
|
citrix-adc-ansible-modules
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.