before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def generate_aser_to_glucose_dict(head: str, tail: str, full_return=True):
"""
This function generates the replacement dictionary from ASER form to CKGP form. For example, 'I love you' will be
replaced to 'PersonX love PersonY'.
:param head: The head string in ASER form. e.g. I love you
:param tail: The tail string in ASER form. e.g. You love I
:param full_return: Whether return the replaced head, replaced tail, replaced head + tail as not.
If true, return one dict and three strings. If false, only a dict.
:return: return a dict of replacement rules from ASER to CKGP. If full_return is true, then also return the
replaced head, replaced tail, replaced head + replaced tail, joined by a comma ', '
Note that For object, 'it' and 'that' are not covered. They will not be reversed
For place, 'there' is not taken care of, as it's widely used in many ways.
"""
rule = {}
head = ' '.join(['i' if token == 'my' else token for token in head.split()])
tail = ' '.join(['i' if token == 'my' else token for token in tail.split()])
head_subj = head.split()[0]
tail_subj = tail.split()[0]
if head_subj in PP_SINGLE and tail_subj in PP_SINGLE and (head_subj != tail_subj):
<DeepExtract>
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[head_subj] = i
</DeepExtract>
<DeepExtract>
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[tail_subj] = i
</DeepExtract>
tokens = (head.lower() + ' ' + tail.lower()).split(' ')
for t in tokens:
if t in PP_SINGLE:
if t in rule.keys():
continue
else:
<DeepExtract>
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[t] = i
</DeepExtract>
if t in glucose_group_list:
if t in rule.keys():
continue
else:
<DeepExtract>
for i in ATOMIC_group_list:
if i not in rule.values():
rule[t] = i
</DeepExtract>
<DeepExtract>
(head_result, tail_result) = (head.lower(), tail.lower())
for i in rule.keys():
head_result = ' '.join([rule[i] if j == i else j for j in head_result.split(' ')])
tail_result = ' '.join([rule[i] if j == i else j for j in tail_result.split(' ')])
(replaced_head, replaced_tail) = (head_result, tail_result)
</DeepExtract>
if full_return:
return (rule, replaced_head, replaced_tail, replaced_head + ', ' + replaced_tail)
else:
return rule
|
def generate_aser_to_glucose_dict(head: str, tail: str, full_return=True):
"""
This function generates the replacement dictionary from ASER form to CKGP form. For example, 'I love you' will be
replaced to 'PersonX love PersonY'.
:param head: The head string in ASER form. e.g. I love you
:param tail: The tail string in ASER form. e.g. You love I
:param full_return: Whether return the replaced head, replaced tail, replaced head + tail as not.
If true, return one dict and three strings. If false, only a dict.
:return: return a dict of replacement rules from ASER to CKGP. If full_return is true, then also return the
replaced head, replaced tail, replaced head + replaced tail, joined by a comma ', '
Note that For object, 'it' and 'that' are not covered. They will not be reversed
For place, 'there' is not taken care of, as it's widely used in many ways.
"""
rule = {}
head = ' '.join(['i' if token == 'my' else token for token in head.split()])
tail = ' '.join(['i' if token == 'my' else token for token in tail.split()])
head_subj = head.split()[0]
tail_subj = tail.split()[0]
if head_subj in PP_SINGLE and tail_subj in PP_SINGLE and (head_subj != tail_subj):
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[head_subj] = i
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[tail_subj] = i
tokens = (head.lower() + ' ' + tail.lower()).split(' ')
for t in tokens:
if t in PP_SINGLE:
if t in rule.keys():
continue
else:
for i in ATOMIC_subject_list:
if i not in rule.values():
rule[t] = i
if t in glucose_group_list:
if t in rule.keys():
continue
else:
for i in ATOMIC_group_list:
if i not in rule.values():
rule[t] = i
(head_result, tail_result) = (head.lower(), tail.lower())
for i in rule.keys():
head_result = ' '.join([rule[i] if j == i else j for j in head_result.split(' ')])
tail_result = ' '.join([rule[i] if j == i else j for j in tail_result.split(' ')])
(replaced_head, replaced_tail) = (head_result, tail_result)
if full_return:
return (rule, replaced_head, replaced_tail, replaced_head + ', ' + replaced_tail)
else:
return rule
|
ASER
|
positive
|
def testRenderBoolean(self):
<DeepExtract>
def_dict = {'className': 'Foo', 'type': 'boolean'}
prototype = data_types.DataType(def_dict, None, language_model=self.language_model)
dv = data_value.DataValue(True, prototype)
dv = dv
</DeepExtract>
render_method = self.language_model._SUPPORTED_TYPES['boolean']
self.assertEqual('true', render_method(dv))
dv.SetValue(False)
self.assertEqual('false', render_method(dv))
|
def testRenderBoolean(self):
def_dict = {'className': 'Foo', 'type': 'boolean'}
prototype = data_types.DataType(def_dict, None, language_model=self.language_model)
dv = data_value.DataValue(True, prototype)
dv = dv
render_method = self.language_model._SUPPORTED_TYPES['boolean']
self.assertEqual('true', render_method(dv))
dv.SetValue(False)
self.assertEqual('false', render_method(dv))
|
apis-client-generator
|
positive
|
def __init__(self, input_size: int, output_size: int, nr_params: int, d_model: int, nhead: int, num_encoder_layers: int, num_decoder_layers: int, dim_feedforward: int, dropout: float, activation: str, norm_type: Union[str, nn.Module, None]=None, custom_encoder: Optional[nn.Module]=None, custom_decoder: Optional[nn.Module]=None, **kwargs):
"""PyTorch module implementing a Transformer to be used in `TransformerModel`.
PyTorch module implementing a simple encoder-decoder transformer architecture.
Parameters
----------
input_size
The dimensionality of the TimeSeries instances that will be fed to the the fit and predict functions.
output_size
The dimensionality of the output time series.
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used).
d_model
The number of expected features in the transformer encoder/decoder inputs.
nhead
The number of heads in the multiheadattention model.
num_encoder_layers
The number of encoder layers in the encoder.
num_decoder_layers
The number of decoder layers in the decoder.
dim_feedforward
The dimension of the feedforward network model.
dropout
Fraction of neurons affected by Dropout.
activation
The activation function of encoder/decoder intermediate layer.
norm_type: str | nn.Module | None
The type of LayerNorm variant to use.
custom_encoder
A custom transformer encoder provided by the user (default=None).
custom_decoder
A custom transformer decoder provided by the user (default=None).
**kwargs
All parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class.
Inputs
------
x of shape `(batch_size, input_chunk_length, input_size)`
Tensor containing the features of the input sequence.
Outputs
-------
y of shape `(batch_size, output_chunk_length, target_size, nr_params)`
Tensor containing the prediction at the last time step of the sequence.
"""
super().__init__(**kwargs)
self.input_size = input_size
self.target_size = output_size
self.nr_params = nr_params
self.target_length = self.output_chunk_length
self.encoder = nn.Linear(input_size, d_model)
self.positional_encoding = _PositionalEncoding(d_model, dropout, self.input_chunk_length)
if isinstance(norm_type, str):
try:
self.layer_norm = getattr(layer_norm_variants, norm_type)
except AttributeError:
raise_log(AttributeError('please provide a valid layer norm type'))
else:
self.layer_norm = norm_type
raise_if_not(activation in FFN, f"'{activation}' is not in {FFN}")
if activation in GLU_FFN:
raise_if(custom_encoder is not None or custom_decoder is not None, f'Cannot use `custom_encoder` or `custom_decoder` along with an `activation` from {GLU_FFN}', logger=logger)
ffn_cls = getattr(glu_variants, activation)
activation = None
<DeepExtract>
ffn = dict(ffn=ffn_cls(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if ffn_cls else dict()
layer = CustomFeedForwardEncoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_encoder = nn.TransformerEncoder(layer, num_layers=num_encoder_layers, norm=self.layer_norm if self.layer_norm else nn.LayerNorm(d_model))
</DeepExtract>
<DeepExtract>
ffn = dict(ffn=ffn_cls(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if ffn_cls else dict()
layer = CustomFeedForwardDecoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_decoder = nn.TransformerDecoder(layer, num_layers=num_decoder_layers, norm=self.layer_norm if self.layer_norm else nn.LayerNorm(d_model))
</DeepExtract>
if self.layer_norm and custom_decoder is None:
<DeepExtract>
ffn = dict(ffn=None(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if None else dict()
layer = nn.TransformerEncoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_encoder = nn.TransformerEncoder(layer, num_layers=num_encoder_layers, norm=self.layer_norm(d_model))
</DeepExtract>
<DeepExtract>
ffn = dict(ffn=None(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if None else dict()
layer = nn.TransformerDecoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_decoder = nn.TransformerDecoder(layer, num_layers=num_decoder_layers, norm=self.layer_norm(d_model))
</DeepExtract>
self.transformer = nn.Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dim_feedforward=dim_feedforward, dropout=dropout, activation=activation, custom_encoder=custom_encoder, custom_decoder=custom_decoder)
self.decoder = nn.Linear(d_model, self.output_chunk_length * self.target_size * self.nr_params)
|
def __init__(self, input_size: int, output_size: int, nr_params: int, d_model: int, nhead: int, num_encoder_layers: int, num_decoder_layers: int, dim_feedforward: int, dropout: float, activation: str, norm_type: Union[str, nn.Module, None]=None, custom_encoder: Optional[nn.Module]=None, custom_decoder: Optional[nn.Module]=None, **kwargs):
"""PyTorch module implementing a Transformer to be used in `TransformerModel`.
PyTorch module implementing a simple encoder-decoder transformer architecture.
Parameters
----------
input_size
The dimensionality of the TimeSeries instances that will be fed to the the fit and predict functions.
output_size
The dimensionality of the output time series.
nr_params
The number of parameters of the likelihood (or 1 if no likelihood is used).
d_model
The number of expected features in the transformer encoder/decoder inputs.
nhead
The number of heads in the multiheadattention model.
num_encoder_layers
The number of encoder layers in the encoder.
num_decoder_layers
The number of decoder layers in the decoder.
dim_feedforward
The dimension of the feedforward network model.
dropout
Fraction of neurons affected by Dropout.
activation
The activation function of encoder/decoder intermediate layer.
norm_type: str | nn.Module | None
The type of LayerNorm variant to use.
custom_encoder
A custom transformer encoder provided by the user (default=None).
custom_decoder
A custom transformer decoder provided by the user (default=None).
**kwargs
All parameters required for :class:`darts.model.forecasting_models.PLForecastingModule` base class.
Inputs
------
x of shape `(batch_size, input_chunk_length, input_size)`
Tensor containing the features of the input sequence.
Outputs
-------
y of shape `(batch_size, output_chunk_length, target_size, nr_params)`
Tensor containing the prediction at the last time step of the sequence.
"""
super().__init__(**kwargs)
self.input_size = input_size
self.target_size = output_size
self.nr_params = nr_params
self.target_length = self.output_chunk_length
self.encoder = nn.Linear(input_size, d_model)
self.positional_encoding = _PositionalEncoding(d_model, dropout, self.input_chunk_length)
if isinstance(norm_type, str):
try:
self.layer_norm = getattr(layer_norm_variants, norm_type)
except AttributeError:
raise_log(AttributeError('please provide a valid layer norm type'))
else:
self.layer_norm = norm_type
raise_if_not(activation in FFN, f"'{activation}' is not in {FFN}")
if activation in GLU_FFN:
raise_if(custom_encoder is not None or custom_decoder is not None, f'Cannot use `custom_encoder` or `custom_decoder` along with an `activation` from {GLU_FFN}', logger=logger)
ffn_cls = getattr(glu_variants, activation)
activation = None
ffn = dict(ffn=ffn_cls(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if ffn_cls else dict()
layer = CustomFeedForwardEncoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_encoder = nn.TransformerEncoder(layer, num_layers=num_encoder_layers, norm=self.layer_norm if self.layer_norm else nn.LayerNorm(d_model))
ffn = dict(ffn=ffn_cls(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if ffn_cls else dict()
layer = CustomFeedForwardDecoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_decoder = nn.TransformerDecoder(layer, num_layers=num_decoder_layers, norm=self.layer_norm if self.layer_norm else nn.LayerNorm(d_model))
if self.layer_norm and custom_decoder is None:
ffn = dict(ffn=None(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if None else dict()
layer = nn.TransformerEncoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_encoder = nn.TransformerEncoder(layer, num_layers=num_encoder_layers, norm=self.layer_norm(d_model))
ffn = dict(ffn=None(d_model=d_model, d_ff=dim_feedforward, dropout=dropout)) if None else dict()
layer = nn.TransformerDecoderLayer(**ffn, dropout=dropout, d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward)
custom_decoder = nn.TransformerDecoder(layer, num_layers=num_decoder_layers, norm=self.layer_norm(d_model))
self.transformer = nn.Transformer(d_model=d_model, nhead=nhead, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dim_feedforward=dim_feedforward, dropout=dropout, activation=activation, custom_encoder=custom_encoder, custom_decoder=custom_decoder)
self.decoder = nn.Linear(d_model, self.output_chunk_length * self.target_size * self.nr_params)
|
darts
|
positive
|
def success(self):
"""Set MPushButton to SuccessType"""
<DeepExtract>
if MPushButton.SuccessType in [MPushButton.DefaultType, MPushButton.PrimaryType, MPushButton.SuccessType, MPushButton.WarningType, MPushButton.DangerType]:
self._dayu_type = MPushButton.SuccessType
else:
raise ValueError("Input argument 'value' should be one of default/primary/success/warning/danger string.")
self.style().polish(self)
</DeepExtract>
return self
|
def success(self):
"""Set MPushButton to SuccessType"""
if MPushButton.SuccessType in [MPushButton.DefaultType, MPushButton.PrimaryType, MPushButton.SuccessType, MPushButton.WarningType, MPushButton.DangerType]:
self._dayu_type = MPushButton.SuccessType
else:
raise ValueError("Input argument 'value' should be one of default/primary/success/warning/danger string.")
self.style().polish(self)
return self
|
dayu_widgets
|
positive
|
def startShell(self, mnopts=None):
"""Start a shell process for running commands"""
if self.shell:
error('%s: shell is already running\n' % self.name)
return
opts = '-cd' if mnopts is None else mnopts
if self.inNamespace:
opts += 'n'
cmd = ['mnexec', opts, 'env', 'PS1=' + chr(127), 'bash', '--norc', '--noediting', '-is', 'mininet:' + self.name]
(self.master, self.slave) = pty.openpty()
<DeepExtract>
assert self
popen = Popen(cmd, **params)
debug('_popen', cmd, popen.pid)
self.shell = popen
</DeepExtract>
self.stdin = os.fdopen(self.master, 'r')
self.stdout = self.stdin
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register(self.stdout)
self.outToNode[self.stdout.fileno()] = self
self.inToNode[self.stdin.fileno()] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
while True:
<DeepExtract>
count = len(self.readbuf)
if count < 1024:
data = os.read(self.stdout.fileno(), 1024 - count)
self.readbuf += self.decoder.decode(data)
if 1024 >= len(self.readbuf):
result = self.readbuf
self.readbuf = ''
else:
result = self.readbuf[:1024]
self.readbuf = self.readbuf[1024:]
data = result
</DeepExtract>
if data[-1] == chr(127):
break
self.pollOut.poll()
self.waiting = False
<DeepExtract>
verbose = kwargs.get('verbose', False)
log = info if verbose else debug
log('*** %s : %s\n' % (self.name, args))
if self.shell:
self.shell.poll()
if self.shell.returncode is not None:
print('shell died on ', self.name)
self.shell = None
self.startShell()
self.sendCmd(*args, **kwargs)
return self.waitOutput(verbose)
else:
warn('(%s exited - ignoring cmd%s)\n' % (self, args))
return None
</DeepExtract>
|
def startShell(self, mnopts=None):
"""Start a shell process for running commands"""
if self.shell:
error('%s: shell is already running\n' % self.name)
return
opts = '-cd' if mnopts is None else mnopts
if self.inNamespace:
opts += 'n'
cmd = ['mnexec', opts, 'env', 'PS1=' + chr(127), 'bash', '--norc', '--noediting', '-is', 'mininet:' + self.name]
(self.master, self.slave) = pty.openpty()
assert self
popen = Popen(cmd, **params)
debug('_popen', cmd, popen.pid)
self.shell = popen
self.stdin = os.fdopen(self.master, 'r')
self.stdout = self.stdin
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register(self.stdout)
self.outToNode[self.stdout.fileno()] = self
self.inToNode[self.stdin.fileno()] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
while True:
count = len(self.readbuf)
if count < 1024:
data = os.read(self.stdout.fileno(), 1024 - count)
self.readbuf += self.decoder.decode(data)
if 1024 >= len(self.readbuf):
result = self.readbuf
self.readbuf = ''
else:
result = self.readbuf[:1024]
self.readbuf = self.readbuf[1024:]
data = result
if data[-1] == chr(127):
break
self.pollOut.poll()
self.waiting = False
verbose = kwargs.get('verbose', False)
log = info if verbose else debug
log('*** %s : %s\n' % (self.name, args))
if self.shell:
self.shell.poll()
if self.shell.returncode is not None:
print('shell died on ', self.name)
self.shell = None
self.startShell()
self.sendCmd(*args, **kwargs)
return self.waitOutput(verbose)
else:
warn('(%s exited - ignoring cmd%s)\n' % (self, args))
return None
</DeepExtract>
|
containernet
|
positive
|
def __init__(self, *args, **kwargs):
widget = kwargs.get('widget') or self.widget
<DeepExtract>
if isinstance(widget, BaseCSVWidget) or (isinstance(widget, type) and issubclass(widget, BaseCSVWidget)):
kwargs['widget'] = widget
assert isinstance(widget, type), "'%s.widget' must be a widget class, not %s." % (self.__class__.__name__, repr(widget))
bases = (self.base_widget_class, widget)
kwargs['widget'] = type(str('CSV%s' % widget.__name__), bases, {})
</DeepExtract>
super().__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
widget = kwargs.get('widget') or self.widget
if isinstance(widget, BaseCSVWidget) or (isinstance(widget, type) and issubclass(widget, BaseCSVWidget)):
kwargs['widget'] = widget
assert isinstance(widget, type), "'%s.widget' must be a widget class, not %s." % (self.__class__.__name__, repr(widget))
bases = (self.base_widget_class, widget)
kwargs['widget'] = type(str('CSV%s' % widget.__name__), bases, {})
super().__init__(*args, **kwargs)
|
django-filter
|
positive
|
def flush_batch_metrics(self, step=None):
avg_metrics = {metric_name: mean(values) for (metric_name, values) in self.sub_batch_metrics.items()}
<DeepExtract>
for (metric_name, value) in avg_metrics.items():
self.metrics_logger.log_scalar(metric_name, value, step=step)
</DeepExtract>
self.sub_batch_metrics.clear()
if self.time_last_flush is not None:
time_since_last_flush = time() - self.time_last_flush
if self.last_step is not None and step is not None and (step > self.last_step):
time_since_last_flush = time_since_last_flush / (step - self.last_step)
<DeepExtract>
for (metric_name, value) in {'time_per_sample': time_since_last_flush}.items():
self.metrics_logger.log_scalar(metric_name, value, step=step)
</DeepExtract>
self.last_step = step
self.time_last_flush = time()
self.metrics_logger.flush()
|
def flush_batch_metrics(self, step=None):
avg_metrics = {metric_name: mean(values) for (metric_name, values) in self.sub_batch_metrics.items()}
for (metric_name, value) in avg_metrics.items():
self.metrics_logger.log_scalar(metric_name, value, step=step)
self.sub_batch_metrics.clear()
if self.time_last_flush is not None:
time_since_last_flush = time() - self.time_last_flush
if self.last_step is not None and step is not None and (step > self.last_step):
time_since_last_flush = time_since_last_flush / (step - self.last_step)
for (metric_name, value) in {'time_per_sample': time_since_last_flush}.items():
self.metrics_logger.log_scalar(metric_name, value, step=step)
self.last_step = step
self.time_last_flush = time()
self.metrics_logger.flush()
|
code-transformer
|
positive
|
def interp_surgery(lay):
"""
Set parameters s.t. deconvolutional layers compute bilinear interpolation
Only for deconvolution without groups
"""
(m, k, h, w) = lay.weight.data.size()
if m != k:
print('input + output channels need to be the same')
raise ValueError
if h != w:
print('filters need to be square')
raise ValueError
<DeepExtract>
factor = (h + 1) // 2
if h % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:h, :h]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
</DeepExtract>
for i in range(m):
lay.weight[i, i, :, :].data.copy_(torch.from_numpy(filt))
return lay.weight.data
|
def interp_surgery(lay):
"""
Set parameters s.t. deconvolutional layers compute bilinear interpolation
Only for deconvolution without groups
"""
(m, k, h, w) = lay.weight.data.size()
if m != k:
print('input + output channels need to be the same')
raise ValueError
if h != w:
print('filters need to be square')
raise ValueError
factor = (h + 1) // 2
if h % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:h, :h]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
for i in range(m):
lay.weight[i, i, :, :].data.copy_(torch.from_numpy(filt))
return lay.weight.data
|
astmt
|
positive
|
def CheckChange(self):
<DeepExtract>
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
</DeepExtract>
month = campaign.current_date[1]
if self.precip != 'None':
if roll <= 3:
if self.precip == 'Rain':
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The rain stops.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
else:
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The snow stops falling.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
self.precip = 'None'
return
elif self.clouds == 'Overcast':
if roll <= 3:
if month <= 2 or month == 12:
self.precip = 'Snow'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Snow starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
elif 5 <= month <= 9:
self.precip = 'Rain'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Rain starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
else:
<DeepExtract>
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
</DeepExtract>
if roll >= 11:
self.precip = 'Snow'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Snow starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
else:
self.precip = 'Rain'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Rain starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
return
<DeepExtract>
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
</DeepExtract>
if self.clouds == 'Clear':
if roll <= 3:
self.clouds = 'Overcast'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Clouds roll in and the weather turns overcast', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
return
else:
if roll <= 5:
if self.fog:
self.fog = False
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The fog lifts.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
return
self.clouds = 'Clear'
self.precip = 'None'
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The sky clears.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
return
<DeepExtract>
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
</DeepExtract>
if roll <= 3 and (not self.fog):
self.fog = True
<DeepExtract>
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Fog rolls in.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
|
def CheckChange(self):
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
month = campaign.current_date[1]
if self.precip != 'None':
if roll <= 3:
if self.precip == 'Rain':
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The rain stops.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
else:
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The snow stops falling.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
self.precip = 'None'
return
elif self.clouds == 'Overcast':
if roll <= 3:
if month <= 2 or month == 12:
self.precip = 'Snow'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Snow starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
elif 5 <= month <= 9:
self.precip = 'Rain'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Rain starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
else:
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
if roll >= 11:
self.precip = 'Snow'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Snow starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
else:
self.precip = 'Rain'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Rain starts falling', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
return
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
if self.clouds == 'Clear':
if roll <= 3:
self.clouds = 'Overcast'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Clouds roll in and the weather turns overcast', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
return
else:
if roll <= 5:
if self.fog:
self.fog = False
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The fog lifts.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
return
self.clouds = 'Clear'
self.precip = 'None'
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('The sky clears.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
return
d1 = libtcod.random_get_int(0, 1, 6)
d2 = libtcod.random_get_int(0, 1, 6)
(d1, d2, roll) = (d1, d2, d1 + d2)
if roll <= 3 and (not self.fog):
self.fog = True
libtcod.console_clear(con)
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0, 0.0, 0.7)
lines = wrap('Fog rolls in.', 30)
y = 25
libtcod.console_print_frame(0, SCREEN_XM - 17, y - 2, 34, len(lines) + 6, clear=True, flag=libtcod.BKGND_SET, fmt=0)
for line in lines:
libtcod.console_print_ex(0, SCREEN_XM, y, libtcod.BKGND_NONE, libtcod.CENTER, line)
y += 1
if confirm:
text = '[%cy%c] or [%cN%c]' % (libtcod.COLCTRL_1, libtcod.COLCTRL_STOP, libtcod.COLCTRL_1, libtcod.COLCTRL_STOP)
else:
text = '[%cEnter%c] to continue' % HIGHLIGHT
libtcod.console_print_ex(0, SCREEN_XM, y + 1, libtcod.BKGND_NONE, libtcod.CENTER, text)
choice = False
exit_menu = False
while not exit_menu:
libtcod.sys_check_for_event(libtcod.EVENT_KEY_PRESS | libtcod.EVENT_MOUSE, key, mouse)
key_char = chr(key.c)
if libtcod.console_is_window_closed():
sys.exit()
if confirm:
if key.vk == libtcod.KEY_ENTER or key.vk == libtcod.KEY_ESCAPE or key_char in ['n', 'N']:
exit_menu = True
elif key_char in ['y', 'Y']:
choice = True
exit_menu = True
elif key.vk == libtcod.KEY_ENTER:
exit_menu = True
libtcod.console_flush()
if skip_update:
return choice
if battle is not None:
RenderEncounter(no_flush=True)
elif campaign.day_in_progress:
RenderCampaign(no_flush=True)
return choice
</DeepExtract>
|
armcom
|
positive
|
def make_scene_renderings(objects, cameras, urdf_ds_name, distance=1.5, theta=np.pi / 4, angles=[0], object_scale=1.0, camera_scale=1.5, background_color=(242, 231, 191), show_cameras=False, resolution=(640, 480), colormap_rgb=None, object_id_ref=0, gui=False, use_nms3d=True, camera_color=(0.2, 0.2, 0.2, 1.0)):
renderer = BulletSceneRenderer([urdf_ds_name, 'camera'], background_color=background_color, gui=gui)
urdf_ds = renderer.body_cache.urdf_ds
is_camera = np.array(['camera' in label for label in urdf_ds.index['label']])
urdf_ds.index.loc[~is_camera, 'scale'] = object_scale * 0.001
urdf_ds.index.loc[is_camera, 'scale'] = camera_scale
if use_nms3d:
<DeepExtract>
(TCO, TCO_infos) = (getattr(objects, 'TWO').cpu(), objects.infos)
is_tested = set()
TCO = np.array(TCO)
scores = TCO_infos['score'].values
all_t = TCO[:, :3, -1]
argsort = np.argsort(-scores)
keep = []
for (idx, TCO_n) in zip(argsort, TCO[argsort]):
if idx in is_tested:
continue
t_n = TCO_n[:3, -1]
dists = np.linalg.norm(t_n - all_t, axis=-1)
dists[idx] = np.inf
ids_merge = np.where(dists <= 0.04)[0]
for id_merge in ids_merge:
is_tested.add(id_merge)
keep.append(idx)
TCO = TCO[keep]
TCO_infos = TCO_infos.loc[keep].reset_index(drop=True)
new_preds = objects.clone()
new_preds.infos = TCO_infos
new_preds.poses = torch.as_tensor(TCO)
objects = new_preds
</DeepExtract>
objects = objects.cpu()
objects.TWO = objects.poses
if colormap_rgb is None:
<DeepExtract>
colors_hex = sns.color_palette(n_colors=len(objects.infos['label'])).as_hex()
colormap_hex = {label: color for (label, color) in zip(objects.infos['label'], colors_hex)}
colormap_rgb = {k: [int(h[1:][i:i + 2], 16) / 255.0 for i in (0, 2, 4)] + [1.0] for (k, h) in colormap_hex.items()}
(colormap_rgb, _) = (colormap_rgb, colormap_hex)
</DeepExtract>
objects.infos['color'] = objects.infos['label'].apply(lambda k: colormap_rgb[k])
cameras = cameras.cpu()
TWWB = objects.poses[object_id_ref]
cam = cameras[[0]]
TCWB = invert_T(cam.TWC.squeeze(0)) @ TWWB
TWBC = invert_T(TCWB)
if TWBC[2, -1] < 0:
quat = euler2quat([np.pi, 0, 0])
TWWB = Transform(TWWB.numpy()) * Transform(quat, np.zeros(3))
TWWB = TWWB.toHomogeneousMatrix()
TWWB = np.asarray(TWWB)
list_objects = []
for obj_id in range(len(objects)):
TWO = np.linalg.inv(TWWB) @ objects.TWO[obj_id].numpy()
TWO[:3, -1] *= object_scale
obj = dict(name=objects.infos.loc[obj_id, 'label'], color=objects.infos.loc[obj_id, 'color'], TWO=TWO)
list_objects.append(obj)
target = np.mean(np.stack([obj['TWO'][:3, -1] for obj in list_objects]), axis=0)
if show_cameras:
for cam_id in range(len(cameras)):
obj = dict(name='camera', color=camera_color, TWO=np.linalg.inv(TWWB) @ cameras.TWC[cam_id].numpy())
list_objects.append(obj)
(fx, fy) = (515, 515)
(w, h) = resolution
K = np.array([[fx, 0, w / 2], [0, fy, h / 2], [0, 0, 1]])
list_cameras = []
for phi in angles:
x = distance * np.sin(theta) * np.cos(phi)
y = distance * np.sin(theta) * np.sin(phi)
z = distance * np.cos(theta)
t = np.array([x, y, z])
R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes='sxyz')
R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi / 2, axes='sxyz')
t += np.array(target)
TWC = Transform(R, t).toHomogeneousMatrix()
TWBC = TWWB @ TWC
list_cameras.append(dict(K=K, TWC=TWC, resolution=(w, h)))
renders = renderer.render_scene(list_objects, list_cameras)
images = np.stack([render['rgb'] for render in renders])
if gui:
time.sleep(100)
renderer.disconnect()
return images
|
def make_scene_renderings(objects, cameras, urdf_ds_name, distance=1.5, theta=np.pi / 4, angles=[0], object_scale=1.0, camera_scale=1.5, background_color=(242, 231, 191), show_cameras=False, resolution=(640, 480), colormap_rgb=None, object_id_ref=0, gui=False, use_nms3d=True, camera_color=(0.2, 0.2, 0.2, 1.0)):
renderer = BulletSceneRenderer([urdf_ds_name, 'camera'], background_color=background_color, gui=gui)
urdf_ds = renderer.body_cache.urdf_ds
is_camera = np.array(['camera' in label for label in urdf_ds.index['label']])
urdf_ds.index.loc[~is_camera, 'scale'] = object_scale * 0.001
urdf_ds.index.loc[is_camera, 'scale'] = camera_scale
if use_nms3d:
(TCO, TCO_infos) = (getattr(objects, 'TWO').cpu(), objects.infos)
is_tested = set()
TCO = np.array(TCO)
scores = TCO_infos['score'].values
all_t = TCO[:, :3, -1]
argsort = np.argsort(-scores)
keep = []
for (idx, TCO_n) in zip(argsort, TCO[argsort]):
if idx in is_tested:
continue
t_n = TCO_n[:3, -1]
dists = np.linalg.norm(t_n - all_t, axis=-1)
dists[idx] = np.inf
ids_merge = np.where(dists <= 0.04)[0]
for id_merge in ids_merge:
is_tested.add(id_merge)
keep.append(idx)
TCO = TCO[keep]
TCO_infos = TCO_infos.loc[keep].reset_index(drop=True)
new_preds = objects.clone()
new_preds.infos = TCO_infos
new_preds.poses = torch.as_tensor(TCO)
objects = new_preds
objects = objects.cpu()
objects.TWO = objects.poses
if colormap_rgb is None:
colors_hex = sns.color_palette(n_colors=len(objects.infos['label'])).as_hex()
colormap_hex = {label: color for (label, color) in zip(objects.infos['label'], colors_hex)}
colormap_rgb = {k: [int(h[1:][i:i + 2], 16) / 255.0 for i in (0, 2, 4)] + [1.0] for (k, h) in colormap_hex.items()}
(colormap_rgb, _) = (colormap_rgb, colormap_hex)
objects.infos['color'] = objects.infos['label'].apply(lambda k: colormap_rgb[k])
cameras = cameras.cpu()
TWWB = objects.poses[object_id_ref]
cam = cameras[[0]]
TCWB = invert_T(cam.TWC.squeeze(0)) @ TWWB
TWBC = invert_T(TCWB)
if TWBC[2, -1] < 0:
quat = euler2quat([np.pi, 0, 0])
TWWB = Transform(TWWB.numpy()) * Transform(quat, np.zeros(3))
TWWB = TWWB.toHomogeneousMatrix()
TWWB = np.asarray(TWWB)
list_objects = []
for obj_id in range(len(objects)):
TWO = np.linalg.inv(TWWB) @ objects.TWO[obj_id].numpy()
TWO[:3, -1] *= object_scale
obj = dict(name=objects.infos.loc[obj_id, 'label'], color=objects.infos.loc[obj_id, 'color'], TWO=TWO)
list_objects.append(obj)
target = np.mean(np.stack([obj['TWO'][:3, -1] for obj in list_objects]), axis=0)
if show_cameras:
for cam_id in range(len(cameras)):
obj = dict(name='camera', color=camera_color, TWO=np.linalg.inv(TWWB) @ cameras.TWC[cam_id].numpy())
list_objects.append(obj)
(fx, fy) = (515, 515)
(w, h) = resolution
K = np.array([[fx, 0, w / 2], [0, fy, h / 2], [0, 0, 1]])
list_cameras = []
for phi in angles:
x = distance * np.sin(theta) * np.cos(phi)
y = distance * np.sin(theta) * np.sin(phi)
z = distance * np.cos(theta)
t = np.array([x, y, z])
R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes='sxyz')
R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi / 2, axes='sxyz')
t += np.array(target)
TWC = Transform(R, t).toHomogeneousMatrix()
TWBC = TWWB @ TWC
list_cameras.append(dict(K=K, TWC=TWC, resolution=(w, h)))
renders = renderer.render_scene(list_objects, list_cameras)
images = np.stack([render['rgb'] for render in renders])
if gui:
time.sleep(100)
renderer.disconnect()
return images
|
cosypose
|
positive
|
def find_anaconda():
path = Path.home() / 'anaconda3'
if path.exists():
return path
try:
info = subprocess.check_output('conda info', shell=True).decode('utf-8')
<DeepExtract>
info = info.strip('\n').replace(' ', '')
info_dict = {}
latest_key = ''
for line in info.splitlines():
if split in line:
pair = line.split(split)
info_dict[pair[0]] = pair[1]
latest_key = pair[0]
else:
if not isinstance(info_dict[latest_key], list):
info_dict[latest_key] = [info_dict[latest_key]]
info_dict[latest_key].append(line)
info_dict = info_dict
</DeepExtract>
return info_dict['activeenvlocation']
except subprocess.CalledProcessError:
raise RuntimeError('find anadonda failed')
|
def find_anaconda():
path = Path.home() / 'anaconda3'
if path.exists():
return path
try:
info = subprocess.check_output('conda info', shell=True).decode('utf-8')
info = info.strip('\n').replace(' ', '')
info_dict = {}
latest_key = ''
for line in info.splitlines():
if split in line:
pair = line.split(split)
info_dict[pair[0]] = pair[1]
latest_key = pair[0]
else:
if not isinstance(info_dict[latest_key], list):
info_dict[latest_key] = [info_dict[latest_key]]
info_dict[latest_key].append(line)
info_dict = info_dict
return info_dict['activeenvlocation']
except subprocess.CalledProcessError:
raise RuntimeError('find anadonda failed')
|
3D-CVF
|
positive
|
def main(env):
taskcluster.auth()
hooks = taskcluster.get_service('hooks')
<DeepExtract>
url = TC_INDEX_URL.format(env)
resp = requests.get(url)
resp.raise_for_status()
all_tasks = list(map(lambda t: t['data'], resp.json()['tasks']))
</DeepExtract>
skip_phids = [t['diff_phid'] for t in filter(is_not_error, all_tasks)]
tasks = list(filter(is_mach_failure, all_tasks))
total = 0
for task in tasks:
phid = task['diff_phid']
print('Triggering {} > {}'.format(phid, task['title']))
if phid in skip_phids:
print('>> Skipping, phid {} has already a non-erroneous task'.format(phid))
continue
extra_env = {'ANALYSIS_SOURCE': 'phabricator', 'ANALYSIS_ID': phid}
task = hooks.triggerHook('project-relman', 'code-review-{}'.format(env), extra_env)
print('>> New task {}'.format(task['status']['taskId']))
total += 1
print('Triggered {} tasks'.format(total))
|
def main(env):
taskcluster.auth()
hooks = taskcluster.get_service('hooks')
url = TC_INDEX_URL.format(env)
resp = requests.get(url)
resp.raise_for_status()
all_tasks = list(map(lambda t: t['data'], resp.json()['tasks']))
skip_phids = [t['diff_phid'] for t in filter(is_not_error, all_tasks)]
tasks = list(filter(is_mach_failure, all_tasks))
total = 0
for task in tasks:
phid = task['diff_phid']
print('Triggering {} > {}'.format(phid, task['title']))
if phid in skip_phids:
print('>> Skipping, phid {} has already a non-erroneous task'.format(phid))
continue
extra_env = {'ANALYSIS_SOURCE': 'phabricator', 'ANALYSIS_ID': phid}
task = hooks.triggerHook('project-relman', 'code-review-{}'.format(env), extra_env)
print('>> New task {}'.format(task['status']['taskId']))
total += 1
print('Triggered {} tasks'.format(total))
|
code-review
|
positive
|
def _get_dl(self, dataset):
<DeepExtract>
if not hasattr(self, 'tokenizer'):
self.tokenizer = AbstractGeneratorTokenizer(tokenizer_model_path=self.hparams.tokenizer_model_path, extra_data_path=self.hparams.extra_data_path, lowercase=self.hparams.lowercase)
</DeepExtract>
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(dataset=dataset, sampler=sampler, batch_size=self.hparams.batch_size, collate_fn=self._collate)
return loader
|
def _get_dl(self, dataset):
if not hasattr(self, 'tokenizer'):
self.tokenizer = AbstractGeneratorTokenizer(tokenizer_model_path=self.hparams.tokenizer_model_path, extra_data_path=self.hparams.extra_data_path, lowercase=self.hparams.lowercase)
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
loader = torch.utils.data.DataLoader(dataset=dataset, sampler=sampler, batch_size=self.hparams.batch_size, collate_fn=self._collate)
return loader
|
agatha
|
positive
|
def __init__(self, block_data):
if len(block_data) != 16384:
raise ShadowBlockException('Invalid application information block size: {} bytes'.format(len(block_data)))
super(ApplicationInformation, self).__init__(block_data)
if self.get_type() != SHADOW_BLOCK_TYPE_UNKNOWN_NAME_4:
raise ShadowBlockException('Invalid type: {}'.format(self.get_type()))
<DeepExtract>
offset = struct.unpack('<Q', self.shadow_block_data[32:40])[0]
</DeepExtract>
if offset == 0 and offset % 16384 != 0:
raise ShadowBlockException('Invalid offset: {}'.format(offset))
|
def __init__(self, block_data):
if len(block_data) != 16384:
raise ShadowBlockException('Invalid application information block size: {} bytes'.format(len(block_data)))
super(ApplicationInformation, self).__init__(block_data)
if self.get_type() != SHADOW_BLOCK_TYPE_UNKNOWN_NAME_4:
raise ShadowBlockException('Invalid type: {}'.format(self.get_type()))
offset = struct.unpack('<Q', self.shadow_block_data[32:40])[0]
if offset == 0 and offset % 16384 != 0:
raise ShadowBlockException('Invalid offset: {}'.format(offset))
|
dfir_ntfs
|
positive
|
def execute(self, params, **kwargs):
print('Enterprise name: {0}'.format(params.enterprise['enterprise_name']))
user_managed_nodes = set(self.get_user_managed_nodes(params))
node_scope = set()
if kwargs.get('node'):
subnode = kwargs.get('node').lower()
root_nodes = [x['node_id'] for x in self.resolve_nodes(params, subnode) if x['node_id'] in user_managed_nodes]
if len(root_nodes) == 0:
logging.warning('Node "%s" not found', subnode)
return
if len(root_nodes) > 1:
logging.warning('More than one node "%s" found. Use Node ID.', subnode)
return
logging.info('Output is limited to "%s" node', subnode)
node_tree = {}
for node in params.enterprise['nodes']:
parent_id = node.get('parent_id')
if parent_id not in node_tree:
node_tree[parent_id] = []
node_tree[parent_id].append(node['node_id'])
nl = [x for x in root_nodes]
pos = 0
while pos < len(nl):
if nl[pos] in node_tree:
nl.extend(node_tree[nl[pos]])
pos += 1
if pos > 100:
break
node_scope.update([x for x in nl if x in user_managed_nodes])
else:
node_scope.update((x['node_id'] for x in params.enterprise['nodes'] if x['node_id'] in user_managed_nodes))
root_nodes = list(self.get_user_root_nodes(params))
nodes = {}
for node in params.enterprise['nodes']:
node_id = node['node_id']
if node_id not in node_scope:
continue
nodes[node_id] = {'node_id': node_id, 'parent_id': node.get('parent_id') or 0, 'name': node['data'].get('displayname') or '', 'isolated': node.get('restrict_visibility') or False, 'users': [], 'teams': [], 'queued_teams': [], 'roles': [], 'children': []}
for node in nodes:
parent_id = nodes[node]['parent_id']
if parent_id in nodes:
nodes[parent_id]['children'].append(node)
users = {}
if 'users' in params.enterprise:
for user in params.enterprise['users']:
node_id = user['node_id']
if node_id not in node_scope:
continue
user_id = user['enterprise_user_id']
u = {'id': user_id, 'node_id': node_id, 'username': user['username'] if 'username' in user else '[none]', 'name': user['data'].get('displayname') or '', 'status': user['status'], 'lock': user['lock']}
if 'account_share_expiration' in user:
u['account_share_expiration'] = user['account_share_expiration']
users[user_id] = u
if node_id in nodes:
nodes[node_id]['users'].append(user_id)
teams = {}
if 'teams' in params.enterprise:
for team in params.enterprise['teams']:
node_id = team['node_id']
if node_id not in node_scope:
continue
team_id = team['team_uid']
teams[team_id] = {'id': team_id, 'node_id': node_id, 'name': team['name'], 'restrict_sharing': team['restrict_sharing'], 'restrict_edit': team['restrict_edit'], 'restrict_view': team['restrict_view'], 'users': [], 'queued_users': []}
if node_id in nodes:
nodes[node_id]['teams'].append(team_id)
if 'team_users' in params.enterprise:
for tu in params.enterprise['team_users']:
team_uid = tu['team_uid']
if tu['team_uid'] in teams:
user_id = tu['enterprise_user_id']
teams[team_uid]['users'].append(user_id)
queued_teams = {}
if 'queued_teams' in params.enterprise:
for queued_team in params.enterprise['queued_teams']:
node_id = queued_team['node_id']
if node_id not in node_scope:
continue
team_id = queued_team['team_uid']
queued_teams[team_id] = {'id': team_id, 'node_id': node_id, 'name': queued_team['name'], 'queued_users': []}
if node_id in nodes:
nodes[node_id]['queued_teams'].append(team_id)
if 'queued_team_users' in params.enterprise:
for tu in params.enterprise['queued_team_users']:
if tu['team_uid'] in queued_teams:
queued_teams[tu['team_uid']]['queued_users'].extend(tu['users'])
elif tu['team_uid'] in teams:
teams[tu['team_uid']]['queued_users'].extend(tu['users'])
roles = {}
if 'roles' in params.enterprise:
for role in params.enterprise['roles']:
node_id = role['node_id']
if node_id not in node_scope:
continue
role_id = role['role_id']
roles[role_id] = {'id': role_id, 'node_id': node_id, 'name': role['data'].get('displayname') or '', 'visible_below': role['visible_below'], 'new_user_inherit': role['new_user_inherit'], 'is_admin': False, 'users': []}
if node_id in nodes:
nodes[node_id]['roles'].append(role_id)
if 'role_users' in params.enterprise:
for ru in params.enterprise['role_users']:
role_id = ru['role_id']
if role_id in roles:
roles[role_id]['users'].append(ru['enterprise_user_id'])
if 'managed_nodes' in params.enterprise:
for mn in params.enterprise['managed_nodes']:
role_id = mn['role_id']
if role_id in roles:
roles[role_id]['is_admin'] = True
show_nodes = kwargs.get('nodes') or False
show_users = kwargs.get('users') or False
show_teams = kwargs.get('teams') or False
show_roles = kwargs.get('roles') or False
def user_email(user_id):
if user_id in users:
return users[user_id]['username']
else:
return str(user_id)
def restricts(team):
rs = ''
rs += 'R ' if team['restrict_view'] else ' '
rs += 'W ' if team['restrict_edit'] else ' '
rs += 'S' if team['restrict_sharing'] else ' '
return rs
if not show_users and (not show_teams) and (not show_roles) and (not show_nodes):
def tree_node(node):
children = [nodes[x] for x in node['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
<DeepExtract>
children = [nodes[x] for x in ch['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = tree_node(ch)
if len(ch['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in ch['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(ch['users']))] = {}
if len(ch['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in ch['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(ch['roles']))] = {}
if len(ch['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in ch['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(ch['teams']))] = {}
if len(ch['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in ch['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(ch['queued_teams']))] = {}
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = n
</DeepExtract>
if len(node['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in node['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(node['users']))] = {}
if len(node['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in node['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(node['roles']))] = {}
if len(node['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in node['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(node['teams']))] = {}
if len(node['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in node['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(node['queued_teams']))] = {}
return n
tree = OD()
for node_id in root_nodes:
r = nodes[node_id]
root_name = r['name']
if not r['parent_id'] and root_name == '':
root_name = params.enterprise['enterprise_name']
if kwargs.get('verbose'):
root_name += ' ({0})'.format(r['node_id'])
<DeepExtract>
children = [nodes[x] for x in r['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = tree_node(ch)
if len(r['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in r['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(r['users']))] = {}
if len(r['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in r['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(r['roles']))] = {}
if len(r['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in r['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(r['teams']))] = {}
if len(r['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in r['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(r['queued_teams']))] = {}
tree['{0} {1}'.format(root_name, ' |Isolated| ' if r.get('isolated') else '')] = n
</DeepExtract>
if len(root_nodes) > 1:
tree = OD([('', tree)])
else:
print('')
tr = LeftAligned()
print(tr(tree))
else:
columns = set()
if kwargs.get('columns'):
columns.update((x.strip() for x in kwargs.get('columns').split(',')))
pattern = (kwargs.get('pattern') or '').lower()
if show_nodes:
supported_columns = SUPPORTED_NODE_COLUMNS
if len(columns) == 0:
columns.update(('parent_node', 'user_count', 'team_count', 'role_count'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported node columns: %s\n', ', '.join(supported_columns))
has_provisioning = 'provisioning' in columns
if has_provisioning:
columns.remove('provisioning')
email_provisioning = None
scim_provisioning = None
bridge_provisioning = None
sso_provisioning = None
displayed_columns = [x for x in supported_columns if x in columns]
if has_provisioning:
if 'email_provision' in params.enterprise:
email_provisioning = {x['node_id']: x['domain'] for x in params.enterprise['email_provision']}
if len(email_provisioning) > 0:
displayed_columns.append('email_provisioning')
else:
email_provisioning = None
if 'bridges' in params.enterprise:
bridge_provisioning = {x['node_id']: x['status'] for x in params.enterprise['bridges']}
if len(bridge_provisioning) > 0:
displayed_columns.append('bridge_provisioning')
else:
bridge_provisioning = None
if 'scims' in params.enterprise:
scim_provisioning = {x['node_id']: x['status'] for x in params.enterprise['scims']}
if len(scim_provisioning) > 0:
displayed_columns.append('scim_provisioning')
else:
scim_provisioning = None
if 'sso_services' in params.enterprise:
sso_provisioning = {x['node_id']: x['name'] for x in params.enterprise['sso_services']}
if len(sso_provisioning) > 0:
displayed_columns.append('sso_provisioning')
else:
sso_provisioning = None
rows = []
for n in nodes.values():
node_id = n['node_id']
row = [node_id, n['name']]
for column in displayed_columns:
if column == 'parent_node':
parent_id = n.get('parent_id', 0)
row.append(self.get_node_path(params, parent_id) if parent_id > 0 else '')
elif column == 'user_count':
us = n.get('users', [])
row.append(len(us))
elif column == 'users':
us = n.get('users', [])
user_names = [users[x]['username'] for x in us if x in users]
row.append(user_names)
elif column == 'team_count':
ts = n.get('teams', [])
row.append(len(ts))
elif column == 'teams':
ts = n.get('teams', [])
team_names = [teams[x]['name'] for x in ts if x in teams]
row.append(team_names)
elif column == 'role_count':
rs = n.get('roles', [])
row.append(len(rs))
elif column == 'roles':
rs = n.get('roles', [])
role_names = [roles[x]['name'] for x in rs if x in roles]
row.append(role_names)
elif column == 'email_provisioning':
status = email_provisioning.get(node_id) if email_provisioning else None
row.append(status)
elif column == 'bridge_provisioning':
status = bridge_provisioning.get(node_id) if bridge_provisioning else None
row.append(status)
elif column == 'scim_provisioning':
status = scim_provisioning.get(node_id) if scim_provisioning else None
row.append(status)
elif column == 'sso_provisioning':
status = sso_provisioning.get(node_id) if sso_provisioning else None
row.append(status)
else:
row.append(None)
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['node_id', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
elif show_users:
supported_columns = SUPPORTED_USER_COLUMNS
if len(columns) == 0:
columns.update(('name', 'status', 'transfer_status', 'node'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported user columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for u in users.values():
<DeepExtract>
def lock_text(lock):
user_status_dict = 'Locked' if lock == 1 else 'Disabled' if lock == 2 else ''
account_status = 'Invited' if u['status'] == 'invited' else 'Active'
if u['lock'] > 0:
account_status = lock_text(u['lock'])
acct_transfer_status = ''
if 'account_share_expiration' in u:
expire_at = datetime.datetime.fromtimestamp(u['account_share_expiration'] / 1000.0)
if expire_at < datetime.datetime.now():
acct_transfer_status = 'Blocked'
else:
acct_transfer_status = 'Pending Transfer'
user_status_dict = {'acct_status': account_status, 'acct_transfer_status': acct_transfer_status}
</DeepExtract>
user_id = u['id']
row = [user_id, u['username']]
for column in displayed_columns:
if column == 'name':
row.append(u['name'])
elif column == 'status':
row.append(user_status_dict['acct_status'])
elif column == 'transfer_status':
row.append(user_status_dict['acct_transfer_status'])
elif column == 'node':
row.append(self.get_node_path(params, u['node_id']))
elif column == 'team_count':
row.append(len([1 for t in teams.values() if t['users'] and user_id in t['users']]))
elif column == 'teams':
team_names = [t['name'] for t in teams.values() if t['users'] and user_id in t['users']]
row.append(team_names)
elif column == 'role_count':
row.append(len([1 for r in roles.values() if r['users'] and user_id in r['users']]))
elif column == 'roles':
role_names = [r['name'] for r in roles.values() if r['users'] and user_id in r['users']]
row.append(role_names)
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['user_id', 'email']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
if show_teams:
supported_columns = SUPPORTED_TEAM_COLUMNS
if len(columns) == 0:
columns.update(('restricts', 'node', 'user_count'))
if 'queued_team_users' in params.enterprise:
if len(params.enterprise['queued_team_users']) > 0:
columns.update(('queued_user_count',))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported team columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for t in teams.values():
row = [t['id'], t['name']]
for column in displayed_columns:
if column == 'restricts':
row.append(restricts(t))
elif column == 'node':
row.append(self.get_node_path(params, t['node_id']))
elif column == 'user_count':
row.append(len(t['users']))
elif column == 'users':
row.append([user_email(x) for x in t['users']])
elif column == 'queued_user_count':
row.append(len(t['queued_users']))
elif column == 'queued_users':
row.append([user_email(x) for x in t['queued_users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
for t in queued_teams.values():
row = [t['id'], t['name']]
for column in displayed_columns:
if column == 'restricts':
row.append('Queued')
elif column == 'node':
row.append(self.get_node_path(params, t['node_id']))
elif column in {'user_count', 'users'}:
row.append('')
elif column == 'queued_user_count':
row.append(len(t['queued_users']))
elif column == 'queued_users':
row.append([user_email(x) for x in t['queued_users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['team_uid', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
if show_roles:
supported_columns = SUPPORTED_ROLE_COLUMNS
if len(columns) == 0:
columns.update(('default_role', 'admin', 'node', 'user_count'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported role columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for r in roles.values():
row = [r['id'], r['name']]
for column in displayed_columns:
if column == 'visible_below':
row.append('Y' if r['visible_below'] else '')
elif column == 'default_role':
row.append('Y' if r['new_user_inherit'] else '')
elif column == 'admin':
row.append('Y' if r['is_admin'] else '')
elif column == 'node':
row.append(self.get_node_path(params, r['node_id']))
elif column == 'user_count':
row.append(len(r['users']))
elif column == 'users':
row.append([user_email(x) for x in r['users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['role_id', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
print('')
|
def execute(self, params, **kwargs):
print('Enterprise name: {0}'.format(params.enterprise['enterprise_name']))
user_managed_nodes = set(self.get_user_managed_nodes(params))
node_scope = set()
if kwargs.get('node'):
subnode = kwargs.get('node').lower()
root_nodes = [x['node_id'] for x in self.resolve_nodes(params, subnode) if x['node_id'] in user_managed_nodes]
if len(root_nodes) == 0:
logging.warning('Node "%s" not found', subnode)
return
if len(root_nodes) > 1:
logging.warning('More than one node "%s" found. Use Node ID.', subnode)
return
logging.info('Output is limited to "%s" node', subnode)
node_tree = {}
for node in params.enterprise['nodes']:
parent_id = node.get('parent_id')
if parent_id not in node_tree:
node_tree[parent_id] = []
node_tree[parent_id].append(node['node_id'])
nl = [x for x in root_nodes]
pos = 0
while pos < len(nl):
if nl[pos] in node_tree:
nl.extend(node_tree[nl[pos]])
pos += 1
if pos > 100:
break
node_scope.update([x for x in nl if x in user_managed_nodes])
else:
node_scope.update((x['node_id'] for x in params.enterprise['nodes'] if x['node_id'] in user_managed_nodes))
root_nodes = list(self.get_user_root_nodes(params))
nodes = {}
for node in params.enterprise['nodes']:
node_id = node['node_id']
if node_id not in node_scope:
continue
nodes[node_id] = {'node_id': node_id, 'parent_id': node.get('parent_id') or 0, 'name': node['data'].get('displayname') or '', 'isolated': node.get('restrict_visibility') or False, 'users': [], 'teams': [], 'queued_teams': [], 'roles': [], 'children': []}
for node in nodes:
parent_id = nodes[node]['parent_id']
if parent_id in nodes:
nodes[parent_id]['children'].append(node)
users = {}
if 'users' in params.enterprise:
for user in params.enterprise['users']:
node_id = user['node_id']
if node_id not in node_scope:
continue
user_id = user['enterprise_user_id']
u = {'id': user_id, 'node_id': node_id, 'username': user['username'] if 'username' in user else '[none]', 'name': user['data'].get('displayname') or '', 'status': user['status'], 'lock': user['lock']}
if 'account_share_expiration' in user:
u['account_share_expiration'] = user['account_share_expiration']
users[user_id] = u
if node_id in nodes:
nodes[node_id]['users'].append(user_id)
teams = {}
if 'teams' in params.enterprise:
for team in params.enterprise['teams']:
node_id = team['node_id']
if node_id not in node_scope:
continue
team_id = team['team_uid']
teams[team_id] = {'id': team_id, 'node_id': node_id, 'name': team['name'], 'restrict_sharing': team['restrict_sharing'], 'restrict_edit': team['restrict_edit'], 'restrict_view': team['restrict_view'], 'users': [], 'queued_users': []}
if node_id in nodes:
nodes[node_id]['teams'].append(team_id)
if 'team_users' in params.enterprise:
for tu in params.enterprise['team_users']:
team_uid = tu['team_uid']
if tu['team_uid'] in teams:
user_id = tu['enterprise_user_id']
teams[team_uid]['users'].append(user_id)
queued_teams = {}
if 'queued_teams' in params.enterprise:
for queued_team in params.enterprise['queued_teams']:
node_id = queued_team['node_id']
if node_id not in node_scope:
continue
team_id = queued_team['team_uid']
queued_teams[team_id] = {'id': team_id, 'node_id': node_id, 'name': queued_team['name'], 'queued_users': []}
if node_id in nodes:
nodes[node_id]['queued_teams'].append(team_id)
if 'queued_team_users' in params.enterprise:
for tu in params.enterprise['queued_team_users']:
if tu['team_uid'] in queued_teams:
queued_teams[tu['team_uid']]['queued_users'].extend(tu['users'])
elif tu['team_uid'] in teams:
teams[tu['team_uid']]['queued_users'].extend(tu['users'])
roles = {}
if 'roles' in params.enterprise:
for role in params.enterprise['roles']:
node_id = role['node_id']
if node_id not in node_scope:
continue
role_id = role['role_id']
roles[role_id] = {'id': role_id, 'node_id': node_id, 'name': role['data'].get('displayname') or '', 'visible_below': role['visible_below'], 'new_user_inherit': role['new_user_inherit'], 'is_admin': False, 'users': []}
if node_id in nodes:
nodes[node_id]['roles'].append(role_id)
if 'role_users' in params.enterprise:
for ru in params.enterprise['role_users']:
role_id = ru['role_id']
if role_id in roles:
roles[role_id]['users'].append(ru['enterprise_user_id'])
if 'managed_nodes' in params.enterprise:
for mn in params.enterprise['managed_nodes']:
role_id = mn['role_id']
if role_id in roles:
roles[role_id]['is_admin'] = True
show_nodes = kwargs.get('nodes') or False
show_users = kwargs.get('users') or False
show_teams = kwargs.get('teams') or False
show_roles = kwargs.get('roles') or False
def user_email(user_id):
if user_id in users:
return users[user_id]['username']
else:
return str(user_id)
def restricts(team):
rs = ''
rs += 'R ' if team['restrict_view'] else ' '
rs += 'W ' if team['restrict_edit'] else ' '
rs += 'S' if team['restrict_sharing'] else ' '
return rs
if not show_users and (not show_teams) and (not show_roles) and (not show_nodes):
def tree_node(node):
children = [nodes[x] for x in node['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
children = [nodes[x] for x in ch['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = tree_node(ch)
if len(ch['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in ch['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(ch['users']))] = {}
if len(ch['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in ch['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(ch['roles']))] = {}
if len(ch['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in ch['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(ch['teams']))] = {}
if len(ch['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in ch['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(ch['queued_teams']))] = {}
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = n
if len(node['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in node['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(node['users']))] = {}
if len(node['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in node['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(node['roles']))] = {}
if len(node['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in node['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(node['teams']))] = {}
if len(node['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in node['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(node['queued_teams']))] = {}
return n
tree = OD()
for node_id in root_nodes:
r = nodes[node_id]
root_name = r['name']
if not r['parent_id'] and root_name == '':
root_name = params.enterprise['enterprise_name']
if kwargs.get('verbose'):
root_name += ' ({0})'.format(r['node_id'])
children = [nodes[x] for x in r['children']]
children.sort(key=lambda x: x['name'])
n = OD()
for ch in children:
name = ch['name']
if kwargs.get('verbose'):
name += ' ({0})'.format(ch['node_id'])
n['[{0}]{1}'.format(name, ' |Isolated| ' if ch.get('isolated') else '')] = tree_node(ch)
if len(r['users']) > 0:
if kwargs.get('verbose'):
logging.debug('users: %s' % json.dumps(users, indent=4, sort_keys=True))
us = [users[x] for x in r['users']]
us.sort(key=lambda x: x['username'] if 'username' in x else 'a')
ud = OD()
for u in us:
ud['{0} ({1})'.format(u['username'] if 'username' in u else '[none]', u['id'])] = {}
n['User(s)'] = ud
else:
n['{0} user(s)'.format(len(r['users']))] = {}
if len(r['roles']) > 0:
if kwargs.get('verbose'):
ts = [roles[x] for x in r['roles']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Role(s)'.format(len(ts) - i)] = {}
break
n['Role(s)'] = td
else:
n['{0} role(s)'.format(len(r['roles']))] = {}
if len(r['teams']) > 0:
if kwargs.get('verbose'):
ts = [teams[x] for x in r['teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Team(s)'.format(len(ts) - i)] = {}
break
n['Teams(s)'] = td
else:
n['{0} team(s)'.format(len(r['teams']))] = {}
if len(r['queued_teams']) > 0:
if kwargs.get('verbose'):
ts = [queued_teams[x] for x in r['queued_teams']]
ts.sort(key=lambda x: x['name'])
td = OD()
for (i, t) in enumerate(ts):
td['{0} ({1})'.format(t['name'], t['id'])] = {}
if i >= 50:
td['{0} More Queued Team(s)'.format(len(ts) - i)] = {}
break
n['Queued Teams(s)'] = td
else:
n['{0} queued team(s)'.format(len(r['queued_teams']))] = {}
tree['{0} {1}'.format(root_name, ' |Isolated| ' if r.get('isolated') else '')] = n
if len(root_nodes) > 1:
tree = OD([('', tree)])
else:
print('')
tr = LeftAligned()
print(tr(tree))
else:
columns = set()
if kwargs.get('columns'):
columns.update((x.strip() for x in kwargs.get('columns').split(',')))
pattern = (kwargs.get('pattern') or '').lower()
if show_nodes:
supported_columns = SUPPORTED_NODE_COLUMNS
if len(columns) == 0:
columns.update(('parent_node', 'user_count', 'team_count', 'role_count'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported node columns: %s\n', ', '.join(supported_columns))
has_provisioning = 'provisioning' in columns
if has_provisioning:
columns.remove('provisioning')
email_provisioning = None
scim_provisioning = None
bridge_provisioning = None
sso_provisioning = None
displayed_columns = [x for x in supported_columns if x in columns]
if has_provisioning:
if 'email_provision' in params.enterprise:
email_provisioning = {x['node_id']: x['domain'] for x in params.enterprise['email_provision']}
if len(email_provisioning) > 0:
displayed_columns.append('email_provisioning')
else:
email_provisioning = None
if 'bridges' in params.enterprise:
bridge_provisioning = {x['node_id']: x['status'] for x in params.enterprise['bridges']}
if len(bridge_provisioning) > 0:
displayed_columns.append('bridge_provisioning')
else:
bridge_provisioning = None
if 'scims' in params.enterprise:
scim_provisioning = {x['node_id']: x['status'] for x in params.enterprise['scims']}
if len(scim_provisioning) > 0:
displayed_columns.append('scim_provisioning')
else:
scim_provisioning = None
if 'sso_services' in params.enterprise:
sso_provisioning = {x['node_id']: x['name'] for x in params.enterprise['sso_services']}
if len(sso_provisioning) > 0:
displayed_columns.append('sso_provisioning')
else:
sso_provisioning = None
rows = []
for n in nodes.values():
node_id = n['node_id']
row = [node_id, n['name']]
for column in displayed_columns:
if column == 'parent_node':
parent_id = n.get('parent_id', 0)
row.append(self.get_node_path(params, parent_id) if parent_id > 0 else '')
elif column == 'user_count':
us = n.get('users', [])
row.append(len(us))
elif column == 'users':
us = n.get('users', [])
user_names = [users[x]['username'] for x in us if x in users]
row.append(user_names)
elif column == 'team_count':
ts = n.get('teams', [])
row.append(len(ts))
elif column == 'teams':
ts = n.get('teams', [])
team_names = [teams[x]['name'] for x in ts if x in teams]
row.append(team_names)
elif column == 'role_count':
rs = n.get('roles', [])
row.append(len(rs))
elif column == 'roles':
rs = n.get('roles', [])
role_names = [roles[x]['name'] for x in rs if x in roles]
row.append(role_names)
elif column == 'email_provisioning':
status = email_provisioning.get(node_id) if email_provisioning else None
row.append(status)
elif column == 'bridge_provisioning':
status = bridge_provisioning.get(node_id) if bridge_provisioning else None
row.append(status)
elif column == 'scim_provisioning':
status = scim_provisioning.get(node_id) if scim_provisioning else None
row.append(status)
elif column == 'sso_provisioning':
status = sso_provisioning.get(node_id) if sso_provisioning else None
row.append(status)
else:
row.append(None)
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['node_id', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
elif show_users:
supported_columns = SUPPORTED_USER_COLUMNS
if len(columns) == 0:
columns.update(('name', 'status', 'transfer_status', 'node'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported user columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for u in users.values():
def lock_text(lock):
user_status_dict = 'Locked' if lock == 1 else 'Disabled' if lock == 2 else ''
account_status = 'Invited' if u['status'] == 'invited' else 'Active'
if u['lock'] > 0:
account_status = lock_text(u['lock'])
acct_transfer_status = ''
if 'account_share_expiration' in u:
expire_at = datetime.datetime.fromtimestamp(u['account_share_expiration'] / 1000.0)
if expire_at < datetime.datetime.now():
acct_transfer_status = 'Blocked'
else:
acct_transfer_status = 'Pending Transfer'
user_status_dict = {'acct_status': account_status, 'acct_transfer_status': acct_transfer_status}
user_id = u['id']
row = [user_id, u['username']]
for column in displayed_columns:
if column == 'name':
row.append(u['name'])
elif column == 'status':
row.append(user_status_dict['acct_status'])
elif column == 'transfer_status':
row.append(user_status_dict['acct_transfer_status'])
elif column == 'node':
row.append(self.get_node_path(params, u['node_id']))
elif column == 'team_count':
row.append(len([1 for t in teams.values() if t['users'] and user_id in t['users']]))
elif column == 'teams':
team_names = [t['name'] for t in teams.values() if t['users'] and user_id in t['users']]
row.append(team_names)
elif column == 'role_count':
row.append(len([1 for r in roles.values() if r['users'] and user_id in r['users']]))
elif column == 'roles':
role_names = [r['name'] for r in roles.values() if r['users'] and user_id in r['users']]
row.append(role_names)
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['user_id', 'email']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
if show_teams:
supported_columns = SUPPORTED_TEAM_COLUMNS
if len(columns) == 0:
columns.update(('restricts', 'node', 'user_count'))
if 'queued_team_users' in params.enterprise:
if len(params.enterprise['queued_team_users']) > 0:
columns.update(('queued_user_count',))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported team columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for t in teams.values():
row = [t['id'], t['name']]
for column in displayed_columns:
if column == 'restricts':
row.append(restricts(t))
elif column == 'node':
row.append(self.get_node_path(params, t['node_id']))
elif column == 'user_count':
row.append(len(t['users']))
elif column == 'users':
row.append([user_email(x) for x in t['users']])
elif column == 'queued_user_count':
row.append(len(t['queued_users']))
elif column == 'queued_users':
row.append([user_email(x) for x in t['queued_users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
for t in queued_teams.values():
row = [t['id'], t['name']]
for column in displayed_columns:
if column == 'restricts':
row.append('Queued')
elif column == 'node':
row.append(self.get_node_path(params, t['node_id']))
elif column in {'user_count', 'users'}:
row.append('')
elif column == 'queued_user_count':
row.append(len(t['queued_users']))
elif column == 'queued_users':
row.append([user_email(x) for x in t['queued_users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['team_uid', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
if show_roles:
supported_columns = SUPPORTED_ROLE_COLUMNS
if len(columns) == 0:
columns.update(('default_role', 'admin', 'node', 'user_count'))
else:
wc = columns.difference(supported_columns)
if len(wc) > 0:
logging.warning('\n\nSupported role columns: %s\n', ', '.join(supported_columns))
displayed_columns = [x for x in supported_columns if x in columns]
rows = []
for r in roles.values():
row = [r['id'], r['name']]
for column in displayed_columns:
if column == 'visible_below':
row.append('Y' if r['visible_below'] else '')
elif column == 'default_role':
row.append('Y' if r['new_user_inherit'] else '')
elif column == 'admin':
row.append('Y' if r['is_admin'] else '')
elif column == 'node':
row.append(self.get_node_path(params, r['node_id']))
elif column == 'user_count':
row.append(len(r['users']))
elif column == 'users':
row.append([user_email(x) for x in r['users']])
if pattern:
if not any((1 for x in row if x and str(x).lower().find(pattern) >= 0)):
continue
rows.append(row)
rows.sort(key=lambda x: x[1])
print('')
headers = ['role_id', 'name']
headers.extend(displayed_columns)
if kwargs.get('format') != 'json':
headers = [string.capwords(x.replace('_', ' ')) for x in headers]
return dump_report_data(rows, headers, fmt=kwargs.get('format'), filename=kwargs.get('output'))
print('')
|
Commander
|
positive
|
def select(self, x, prng):
relation = x
seed = prng.randint(10000.0, 1000000000.0)
<DeepExtract>
config = relation.config
config_str = ''
for column in config:
(d_left, d_right) = config[column]['domain']
if config[column]['type'] == 'continuous':
config_str += 'C '
config_str += str(d_left)
config_str += ' '
config_str += str(d_right)
config_str += ' '
elif config[column]['type'] == 'discrete':
config_str += 'D '
for i in range(d_left, d_right):
config_str += str(i)
config_str += ' '
config_str += '\n'
config_str = config_str
</DeepExtract>
model_str = privBayesSelect.py_get_model(np.ascontiguousarray(relation.df.astype(np.int32)), config_str.encode('utf-8'), self.eps, self.theta, seed)
model = PrivBayesSelect.make_models(model_str.decode('utf-8'))
M = PrivBayesSelect.get_measurements(model, self.domain_shape)
return EkteloMatrix(M)
|
def select(self, x, prng):
relation = x
seed = prng.randint(10000.0, 1000000000.0)
config = relation.config
config_str = ''
for column in config:
(d_left, d_right) = config[column]['domain']
if config[column]['type'] == 'continuous':
config_str += 'C '
config_str += str(d_left)
config_str += ' '
config_str += str(d_right)
config_str += ' '
elif config[column]['type'] == 'discrete':
config_str += 'D '
for i in range(d_left, d_right):
config_str += str(i)
config_str += ' '
config_str += '\n'
config_str = config_str
model_str = privBayesSelect.py_get_model(np.ascontiguousarray(relation.df.astype(np.int32)), config_str.encode('utf-8'), self.eps, self.theta, seed)
model = PrivBayesSelect.make_models(model_str.decode('utf-8'))
M = PrivBayesSelect.get_measurements(model, self.domain_shape)
return EkteloMatrix(M)
|
ektelo
|
positive
|
def _increment_negative_power_in_einsum_r(formula, x, exponent, args1, args2, args3):
(in_formulas, out_formula) = split_einsum_formula(formula)
<DeepExtract>
new_formula = '{}->{}'.format(','.join(in_formulas[:len(args1) + 1 + len(args2)] + in_formulas[len(args1) + 2 + len(args2):]), out_formula)
</DeepExtract>
return np.einsum(new_formula, *args1 + (x ** (exponent + 1),) + args2 + args3)
|
def _increment_negative_power_in_einsum_r(formula, x, exponent, args1, args2, args3):
(in_formulas, out_formula) = split_einsum_formula(formula)
new_formula = '{}->{}'.format(','.join(in_formulas[:len(args1) + 1 + len(args2)] + in_formulas[len(args1) + 2 + len(args2):]), out_formula)
return np.einsum(new_formula, *args1 + (x ** (exponent + 1),) + args2 + args3)
|
autoconj
|
positive
|
def __getitem__(self, item):
if self.env is None:
self.env = self.init_lmdb(self.seq_dir)
key = self.keys[item]
(idx, (tot_frm, h, w), cur_frm) = self.parse_lmdb_key(key)
c = 3 if self.data_type.lower() == 'rgb' else 1
frms = []
if self.moving_first_frame and random.uniform(0, 1) > self.moving_factor:
frm = self.read_lmdb_frame(self.env, key, size=(h, w, c))
frm = frm.transpose(2, 0, 1)
offsets = np.floor(np.random.uniform(-3.5, 4.5, size=(self.tempo_extent, 2)))
offsets = offsets.astype(np.int32)
pos = np.cumsum(offsets, axis=0)
min_pos = np.min(pos, axis=0)
topleft_pos = pos - min_pos
range_pos = np.max(pos, axis=0) - min_pos
(c_h, c_w) = (h - range_pos[0], w - range_pos[1])
for i in range(self.tempo_extent):
(top, left) = topleft_pos[i]
frms.append(frm[:, top:top + c_h, left:left + c_w].copy())
else:
for i in range(cur_frm, cur_frm + self.tempo_extent):
if i >= tot_frm:
key = '{}_{}x{}x{}_{:04d}'.format(idx, tot_frm, h, w, 2 * tot_frm - i - 2)
else:
key = '{}_{}x{}x{}_{:04d}'.format(idx, tot_frm, h, w, i)
frm = self.read_lmdb_frame(self.env, key, size=(h, w, c))
frm = frm.transpose(2, 0, 1)
frms.append(frm)
frms = np.stack(frms)
<DeepExtract>
csz = self.crop_size
(h, w) = frms.shape[-2:]
assert csz <= h and csz <= w, 'the crop size is larger than the image size'
top = random.randint(0, h - csz)
left = random.randint(0, w - csz)
pats = frms[..., top:top + csz, left:left + csz]
pats = pats
</DeepExtract>
<DeepExtract>
axis = random.randint(1, 3)
if axis > 1:
pats = np.flip(pats, axis)
axis = random.randint(0, 1)
if axis < 1:
pats = np.flip(pats, axis)
k = random.randint(0, 3)
pats = np.rot90(pats, k, (2, 3))
pats = pats
</DeepExtract>
tsr = torch.FloatTensor(np.ascontiguousarray(pats)) / 255.0
return {'gt': tsr}
|
def __getitem__(self, item):
if self.env is None:
self.env = self.init_lmdb(self.seq_dir)
key = self.keys[item]
(idx, (tot_frm, h, w), cur_frm) = self.parse_lmdb_key(key)
c = 3 if self.data_type.lower() == 'rgb' else 1
frms = []
if self.moving_first_frame and random.uniform(0, 1) > self.moving_factor:
frm = self.read_lmdb_frame(self.env, key, size=(h, w, c))
frm = frm.transpose(2, 0, 1)
offsets = np.floor(np.random.uniform(-3.5, 4.5, size=(self.tempo_extent, 2)))
offsets = offsets.astype(np.int32)
pos = np.cumsum(offsets, axis=0)
min_pos = np.min(pos, axis=0)
topleft_pos = pos - min_pos
range_pos = np.max(pos, axis=0) - min_pos
(c_h, c_w) = (h - range_pos[0], w - range_pos[1])
for i in range(self.tempo_extent):
(top, left) = topleft_pos[i]
frms.append(frm[:, top:top + c_h, left:left + c_w].copy())
else:
for i in range(cur_frm, cur_frm + self.tempo_extent):
if i >= tot_frm:
key = '{}_{}x{}x{}_{:04d}'.format(idx, tot_frm, h, w, 2 * tot_frm - i - 2)
else:
key = '{}_{}x{}x{}_{:04d}'.format(idx, tot_frm, h, w, i)
frm = self.read_lmdb_frame(self.env, key, size=(h, w, c))
frm = frm.transpose(2, 0, 1)
frms.append(frm)
frms = np.stack(frms)
csz = self.crop_size
(h, w) = frms.shape[-2:]
assert csz <= h and csz <= w, 'the crop size is larger than the image size'
top = random.randint(0, h - csz)
left = random.randint(0, w - csz)
pats = frms[..., top:top + csz, left:left + csz]
pats = pats
axis = random.randint(1, 3)
if axis > 1:
pats = np.flip(pats, axis)
axis = random.randint(0, 1)
if axis < 1:
pats = np.flip(pats, axis)
k = random.randint(0, 3)
pats = np.rot90(pats, k, (2, 3))
pats = pats
tsr = torch.FloatTensor(np.ascontiguousarray(pats)) / 255.0
return {'gt': tsr}
|
EGVSR
|
positive
|
def boot_and_connect(self):
<DeepExtract>
if not getattr(self, 'conn', None):
self.conn = self._get_connection()
conn = self.conn
</DeepExtract>
<DeepExtract>
conn = self.conn
boot_new = True
last_instance_path = None
if 'use_existing_instance' in self._driver_options():
boot_new = False
instance_id = self._driver_options()['use_existing_instance']
if instance_id == '__auto__':
last_instance_path = '.vmlauncher_last_instance_%s' % self.driver_options_key
if not os.path.exists(last_instance_path):
boot_new = True
else:
instance_id = open(last_instance_path, 'r').read()
if not boot_new:
nodes = conn.list_nodes()
nodes_with_id = [node for node in nodes if node.uuid == instance_id]
if not nodes_with_id:
err_msg_template = 'Specified use_existing_instance with instance id %s, but no such instance found.'
raise Exception(err_msg_template % instance_id)
node = nodes_with_id[0]
if boot_new:
node = self._boot_new(conn)
if last_instance_path:
open(last_instance_path, 'w').write(node.uuid)
node = node
</DeepExtract>
self.conn = conn
self.node = node
self.uuid = node.uuid
<DeepExtract>
i = 0
while i < tries:
try:
ssh_client = self.__get_ssh_client()
conn._ssh_client_connect(ssh_client=ssh_client, timeout=60)
return
except:
i = i + 1
</DeepExtract>
|
def boot_and_connect(self):
if not getattr(self, 'conn', None):
self.conn = self._get_connection()
conn = self.conn
conn = self.conn
boot_new = True
last_instance_path = None
if 'use_existing_instance' in self._driver_options():
boot_new = False
instance_id = self._driver_options()['use_existing_instance']
if instance_id == '__auto__':
last_instance_path = '.vmlauncher_last_instance_%s' % self.driver_options_key
if not os.path.exists(last_instance_path):
boot_new = True
else:
instance_id = open(last_instance_path, 'r').read()
if not boot_new:
nodes = conn.list_nodes()
nodes_with_id = [node for node in nodes if node.uuid == instance_id]
if not nodes_with_id:
err_msg_template = 'Specified use_existing_instance with instance id %s, but no such instance found.'
raise Exception(err_msg_template % instance_id)
node = nodes_with_id[0]
if boot_new:
node = self._boot_new(conn)
if last_instance_path:
open(last_instance_path, 'w').write(node.uuid)
node = node
self.conn = conn
self.node = node
self.uuid = node.uuid
i = 0
while i < tries:
try:
ssh_client = self.__get_ssh_client()
conn._ssh_client_connect(ssh_client=ssh_client, timeout=60)
return
except:
i = i + 1
</DeepExtract>
|
cloudbiolinux
|
positive
|
def __init__(self, allow_cookie_auth=None, blocked_regions=None):
"""Constructor for ApiAuth, authentication information for an API.
Args:
allow_cookie_auth: boolean, whether cooking auth is allowed. By
default, API methods do not allow cookie authentication, and
require the use of OAuth2 or ID tokens. Setting this field to
True will allow cookies to be used to access the API, with
potentially dangerous results. Please be very cautious in enabling
this setting, and make sure to require appropriate XSRF tokens to
protect your API.
blocked_regions: list of Strings, a list of 2-letter ISO region codes
to block.
"""
<DeepExtract>
if allow_cookie_auth is None and allow_none:
return
if not isinstance(allow_cookie_auth, bool):
raise TypeError("%s type doesn't match %s." % ('allow_cookie_auth', bool))
</DeepExtract>
<DeepExtract>
if blocked_regions is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % 'blocked_regions')
return blocked_regions
if not isinstance(blocked_regions, (tuple, list)):
raise TypeError('%s is not a list.' % 'blocked_regions')
if not all((isinstance(i, basestring) for i in blocked_regions)):
type_list = list(set((type(setting) for setting in blocked_regions)))
raise TypeError("%s contains types that don't match %s: %s" % ('blocked_regions', basestring.__name__, type_list))
return blocked_regions
</DeepExtract>
self.__allow_cookie_auth = allow_cookie_auth
self.__blocked_regions = blocked_regions
|
def __init__(self, allow_cookie_auth=None, blocked_regions=None):
"""Constructor for ApiAuth, authentication information for an API.
Args:
allow_cookie_auth: boolean, whether cooking auth is allowed. By
default, API methods do not allow cookie authentication, and
require the use of OAuth2 or ID tokens. Setting this field to
True will allow cookies to be used to access the API, with
potentially dangerous results. Please be very cautious in enabling
this setting, and make sure to require appropriate XSRF tokens to
protect your API.
blocked_regions: list of Strings, a list of 2-letter ISO region codes
to block.
"""
if allow_cookie_auth is None and allow_none:
return
if not isinstance(allow_cookie_auth, bool):
raise TypeError("%s type doesn't match %s." % ('allow_cookie_auth', bool))
if blocked_regions is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % 'blocked_regions')
return blocked_regions
if not isinstance(blocked_regions, (tuple, list)):
raise TypeError('%s is not a list.' % 'blocked_regions')
if not all((isinstance(i, basestring) for i in blocked_regions)):
type_list = list(set((type(setting) for setting in blocked_regions)))
raise TypeError("%s contains types that don't match %s: %s" % ('blocked_regions', basestring.__name__, type_list))
return blocked_regions
self.__allow_cookie_auth = allow_cookie_auth
self.__blocked_regions = blocked_regions
|
AndroidGCMTutorial
|
positive
|
def test_given_specific_random_seed_when_estimate_shapley_values_with_early_stopping_then_returns_deterministic_result(preserve_random_generator_state):
<DeepExtract>
(X, coefficients) = (np.random.normal(0, 1, (1000, 15)), np.random.choice(20, 15) - 10)
</DeepExtract>
def model(x):
return np.sum(coefficients * x, axis=1)
shapley_config = ShapleyConfig(approximation_method=ShapleyApproximationMethods.EARLY_STOPPING)
assert estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config) != approx(estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config), abs=0)
np.random.seed(0)
shapley_values_1 = estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config)
np.random.seed(0)
shapley_values_2 = estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config)
assert shapley_values_1 == approx(shapley_values_2, abs=0)
|
def test_given_specific_random_seed_when_estimate_shapley_values_with_early_stopping_then_returns_deterministic_result(preserve_random_generator_state):
(X, coefficients) = (np.random.normal(0, 1, (1000, 15)), np.random.choice(20, 15) - 10)
def model(x):
return np.sum(coefficients * x, axis=1)
shapley_config = ShapleyConfig(approximation_method=ShapleyApproximationMethods.EARLY_STOPPING)
assert estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config) != approx(estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config), abs=0)
np.random.seed(0)
shapley_values_1 = estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config)
np.random.seed(0)
shapley_values_2 = estimate_shapley_values(lambda subset: _set_function_for_aggregated_feature_attribution(subset, X, model), X.shape[1], shapley_config)
assert shapley_values_1 == approx(shapley_values_2, abs=0)
|
dowhy
|
positive
|
def mujoco_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
<DeepExtract>
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
</DeepExtract>
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def mujoco_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
CHER
|
positive
|
def extract_from_parsed_result(self, parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw):
if output_format not in ['Relation', 'triplet']:
raise NotImplementedError('Error: extract_from_parsed_result only supports Relation or triplet.')
connective_dict = kw.get('connective_dict', SEED_CONNECTIVE_DICT)
para_relations = list()
for (sent_parsed_result, eventualities) in zip(parsed_result, para_eventualities):
relations_in_sent = list()
for head_eventuality in eventualities:
for tail_eventuality in eventualities:
if not head_eventuality.position < tail_eventuality.position:
continue
heid = head_eventuality.eid
teid = tail_eventuality.eid
<DeepExtract>
extracted_senses = ['Co_Occurrence']
for sense in relation_senses:
for connective_words in connective_dict[sense]:
if self._verify_connective_in_one_sentence(connective_words, head_eventuality, tail_eventuality, sent_parsed_result['dependencies'], sent_parsed_result['tokens']):
extracted_senses.append(sense)
break
extracted_senses = extracted_senses
</DeepExtract>
if len(extracted_senses) > 0:
relations_in_sent.append(Relation(heid, teid, extracted_senses))
para_relations.append(relations_in_sent)
for i in range(len(parsed_result) - 1):
(eventualities1, eventualities2) = (para_eventualities[i], para_eventualities[i + 1])
relations_between_sents = list()
if len(eventualities1) == 1 and len(eventualities2) == 1:
(s1_tokens, s2_tokens) = (parsed_result[i]['tokens'], parsed_result[i + 1]['tokens'])
(s1_eventuality, s2_eventuality) = (eventualities1[0], eventualities2[0])
(heid, teid) = (s1_eventuality.eid, s2_eventuality.eid)
<DeepExtract>
extracted_senses = list()
for sense in relation_senses:
for connective_words in connective_dict[sense]:
if self._verify_connective_in_two_sentence(connective_words, s1_eventuality, s2_eventuality, s1_tokens, s2_tokens):
extracted_senses.append(sense)
break
extracted_senses = extracted_senses
</DeepExtract>
if len(extracted_senses) > 0:
relations_between_sents.append(Relation(heid, teid, extracted_senses))
para_relations.append(relations_between_sents)
if in_order:
if output_format == 'triplet':
para_relations = [sorted(chain.from_iterable([r.to_triplets() for r in relations])) for relations in para_relations]
return para_relations
else:
if output_format == 'Relation':
rid2relation = dict()
for relation in chain(*para_relations):
if relation.rid not in rid2relation:
rid2relation[relation.rid] = deepcopy(relation)
else:
rid2relation[relation.rid].update(relation)
relations = sorted(rid2relation.values(), key=lambda r: r.rid)
elif output_format == 'triplet':
relations = sorted([r.to_triplets() for relations in para_relations for r in relations])
return relations
|
def extract_from_parsed_result(self, parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw):
if output_format not in ['Relation', 'triplet']:
raise NotImplementedError('Error: extract_from_parsed_result only supports Relation or triplet.')
connective_dict = kw.get('connective_dict', SEED_CONNECTIVE_DICT)
para_relations = list()
for (sent_parsed_result, eventualities) in zip(parsed_result, para_eventualities):
relations_in_sent = list()
for head_eventuality in eventualities:
for tail_eventuality in eventualities:
if not head_eventuality.position < tail_eventuality.position:
continue
heid = head_eventuality.eid
teid = tail_eventuality.eid
extracted_senses = ['Co_Occurrence']
for sense in relation_senses:
for connective_words in connective_dict[sense]:
if self._verify_connective_in_one_sentence(connective_words, head_eventuality, tail_eventuality, sent_parsed_result['dependencies'], sent_parsed_result['tokens']):
extracted_senses.append(sense)
break
extracted_senses = extracted_senses
if len(extracted_senses) > 0:
relations_in_sent.append(Relation(heid, teid, extracted_senses))
para_relations.append(relations_in_sent)
for i in range(len(parsed_result) - 1):
(eventualities1, eventualities2) = (para_eventualities[i], para_eventualities[i + 1])
relations_between_sents = list()
if len(eventualities1) == 1 and len(eventualities2) == 1:
(s1_tokens, s2_tokens) = (parsed_result[i]['tokens'], parsed_result[i + 1]['tokens'])
(s1_eventuality, s2_eventuality) = (eventualities1[0], eventualities2[0])
(heid, teid) = (s1_eventuality.eid, s2_eventuality.eid)
extracted_senses = list()
for sense in relation_senses:
for connective_words in connective_dict[sense]:
if self._verify_connective_in_two_sentence(connective_words, s1_eventuality, s2_eventuality, s1_tokens, s2_tokens):
extracted_senses.append(sense)
break
extracted_senses = extracted_senses
if len(extracted_senses) > 0:
relations_between_sents.append(Relation(heid, teid, extracted_senses))
para_relations.append(relations_between_sents)
if in_order:
if output_format == 'triplet':
para_relations = [sorted(chain.from_iterable([r.to_triplets() for r in relations])) for relations in para_relations]
return para_relations
else:
if output_format == 'Relation':
rid2relation = dict()
for relation in chain(*para_relations):
if relation.rid not in rid2relation:
rid2relation[relation.rid] = deepcopy(relation)
else:
rid2relation[relation.rid].update(relation)
relations = sorted(rid2relation.values(), key=lambda r: r.rid)
elif output_format == 'triplet':
relations = sorted([r.to_triplets() for relations in para_relations for r in relations])
return relations
|
ASER
|
positive
|
def countGroups(related):
<DeepExtract>
n = len(related)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and related[i][j] == '1':
g.addEdge(i, j)
graph = g
</DeepExtract>
groups = graph.countGroups()
return len(groups)
|
def countGroups(related):
n = len(related)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and related[i][j] == '1':
g.addEdge(i, j)
graph = g
groups = graph.countGroups()
return len(groups)
|
Competitive_Programming
|
positive
|
def save_mov_txt(mov, fname, mjd=False):
"""Save movie data to series of text files.
Args:
mov (Movie): movie object
fname (str): basename of output text file
mjd (int): MJD of saved movie
Returns:
"""
if mjd is False:
mjd = mov.mjd
for i in range(mov.nframes):
time_frame = mov.times[i]
fname_frame = fname + '%05d' % i
print('saving file ' + fname_frame)
frame_im = mov.get_frame(i)
<DeepExtract>
if frame_im.polrep != 'stokes' or frame_im.pol_prim != 'I':
frame_im = frame_im.switch_polrep(polrep_out='stokes', pol_prim_out=None)
pdimas = frame_im.psize / ehc.RADPERAS
xs = np.array([[j for j in range(frame_im.xdim)] for i in range(frame_im.ydim)]).reshape(frame_im.xdim * frame_im.ydim, 1)
xs = pdimas * (xs[::-1] - frame_im.xdim / 2.0)
ys = np.array([[i for j in range(frame_im.xdim)] for i in range(frame_im.ydim)]).reshape(frame_im.xdim * frame_im.ydim, 1)
ys = pdimas * (ys[::-1] - frame_im.xdim / 2.0)
if len(frame_im.vvec) and (not len(frame_im.qvec)):
frame_im.qvec = 0 * frame_im.vvec
frame_im.uvec = 0 * frame_im.vvec
if len(frame_im.qvec) and len(frame_im.vvec):
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.qvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.uvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.vvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel) Q (Jy/pixel) U (Jy/pixel) V (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f %10.10f %10.10f %10.10f'
elif len(frame_im.qvec):
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.qvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.uvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel) Q (Jy/pixel) U (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f %10.10f %10.10f'
else:
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f'
if not mjd:
mjd = float(frame_im.mjd)
if not time_frame:
time_frame = frame_im.time
mjd += time_frame / 24.0
head = 'SRC: %s \n' % frame_im.source + 'RA: ' + obsh.rastring(frame_im.ra) + '\n' + 'DEC: ' + obsh.decstring(frame_im.dec) + '\n' + 'MJD: %.6f \n' % float(mjd) + 'RF: %.4f GHz \n' % (frame_im.rf / 1000000000.0) + 'FOVX: %i pix %f as \n' % (frame_im.xdim, pdimas * frame_im.xdim) + 'FOVY: %i pix %f as \n' % (frame_im.ydim, pdimas * frame_im.ydim) + '------------------------------------\n' + hf
np.savetxt(fname_frame, outdata, header=head, fmt=fmts)
return
</DeepExtract>
return
|
def save_mov_txt(mov, fname, mjd=False):
"""Save movie data to series of text files.
Args:
mov (Movie): movie object
fname (str): basename of output text file
mjd (int): MJD of saved movie
Returns:
"""
if mjd is False:
mjd = mov.mjd
for i in range(mov.nframes):
time_frame = mov.times[i]
fname_frame = fname + '%05d' % i
print('saving file ' + fname_frame)
frame_im = mov.get_frame(i)
if frame_im.polrep != 'stokes' or frame_im.pol_prim != 'I':
frame_im = frame_im.switch_polrep(polrep_out='stokes', pol_prim_out=None)
pdimas = frame_im.psize / ehc.RADPERAS
xs = np.array([[j for j in range(frame_im.xdim)] for i in range(frame_im.ydim)]).reshape(frame_im.xdim * frame_im.ydim, 1)
xs = pdimas * (xs[::-1] - frame_im.xdim / 2.0)
ys = np.array([[i for j in range(frame_im.xdim)] for i in range(frame_im.ydim)]).reshape(frame_im.xdim * frame_im.ydim, 1)
ys = pdimas * (ys[::-1] - frame_im.xdim / 2.0)
if len(frame_im.vvec) and (not len(frame_im.qvec)):
frame_im.qvec = 0 * frame_im.vvec
frame_im.uvec = 0 * frame_im.vvec
if len(frame_im.qvec) and len(frame_im.vvec):
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.qvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.uvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.vvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel) Q (Jy/pixel) U (Jy/pixel) V (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f %10.10f %10.10f %10.10f'
elif len(frame_im.qvec):
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.qvec.reshape(frame_im.xdim * frame_im.ydim, 1), frame_im.uvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel) Q (Jy/pixel) U (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f %10.10f %10.10f'
else:
outdata = np.hstack((xs, ys, frame_im.imvec.reshape(frame_im.xdim * frame_im.ydim, 1)))
hf = 'x (as) y (as) I (Jy/pixel)'
fmts = '%10.10f %10.10f %10.10f'
if not mjd:
mjd = float(frame_im.mjd)
if not time_frame:
time_frame = frame_im.time
mjd += time_frame / 24.0
head = 'SRC: %s \n' % frame_im.source + 'RA: ' + obsh.rastring(frame_im.ra) + '\n' + 'DEC: ' + obsh.decstring(frame_im.dec) + '\n' + 'MJD: %.6f \n' % float(mjd) + 'RF: %.4f GHz \n' % (frame_im.rf / 1000000000.0) + 'FOVX: %i pix %f as \n' % (frame_im.xdim, pdimas * frame_im.xdim) + 'FOVY: %i pix %f as \n' % (frame_im.ydim, pdimas * frame_im.ydim) + '------------------------------------\n' + hf
np.savetxt(fname_frame, outdata, header=head, fmt=fmts)
return
return
|
eht-imaging
|
positive
|
def union(self, types: Sequence[AnyType]) -> DeserializationMethodFactory:
discriminator = get_inherited_discriminator(types)
if discriminator is not None:
return self.discriminate(discriminator, types)
alt_factories = self._union_results(types)
if len(alt_factories) == 1:
return alt_factories[0]
def factory(constraints: Optional[Constraints], _) -> DeserializationMethod:
<DeepExtract>
elt_factories = [self.visit(tp) for tp in (fact.merge(constraints).method for fact in alt_factories)]
def factory(constraints: Optional[Constraints], _) -> DeserializationMethod:
def len_error(constraints: Constraints) -> Union[str, Callable[[Any], str]]:
alt_methods = constraints_validators(constraints)[list][0].error
alt_methods = TupleMethod(constraints_validators(constraints)[list], len_error(Constraints(min_items=len((fact.merge(constraints).method for fact in alt_factories)))), len_error(Constraints(max_items=len((fact.merge(constraints).method for fact in alt_factories)))), tuple((fact.method for fact in elt_factories)))
alt_methods = self._factory(factory, list)
</DeepExtract>
method_by_cls = dict(zip((f.cls for f in alt_factories if f.cls is not None), alt_methods))
if NoneType in types and len(alt_methods) == 2:
value_method = next((meth for (fact, meth) in zip(alt_factories, alt_methods) if fact.cls is not NoneType))
return OptionalMethod(value_method, self.coercer)
elif len(method_by_cls) == len(alt_factories) and (not any((isinstance(x, CoercerMethod) for x in alt_methods))):
return UnionByTypeMethod(method_by_cls)
else:
return UnionMethod(alt_methods)
return self._factory(factory)
|
def union(self, types: Sequence[AnyType]) -> DeserializationMethodFactory:
discriminator = get_inherited_discriminator(types)
if discriminator is not None:
return self.discriminate(discriminator, types)
alt_factories = self._union_results(types)
if len(alt_factories) == 1:
return alt_factories[0]
def factory(constraints: Optional[Constraints], _) -> DeserializationMethod:
elt_factories = [self.visit(tp) for tp in (fact.merge(constraints).method for fact in alt_factories)]
def factory(constraints: Optional[Constraints], _) -> DeserializationMethod:
def len_error(constraints: Constraints) -> Union[str, Callable[[Any], str]]:
alt_methods = constraints_validators(constraints)[list][0].error
alt_methods = TupleMethod(constraints_validators(constraints)[list], len_error(Constraints(min_items=len((fact.merge(constraints).method for fact in alt_factories)))), len_error(Constraints(max_items=len((fact.merge(constraints).method for fact in alt_factories)))), tuple((fact.method for fact in elt_factories)))
alt_methods = self._factory(factory, list)
method_by_cls = dict(zip((f.cls for f in alt_factories if f.cls is not None), alt_methods))
if NoneType in types and len(alt_methods) == 2:
value_method = next((meth for (fact, meth) in zip(alt_factories, alt_methods) if fact.cls is not NoneType))
return OptionalMethod(value_method, self.coercer)
elif len(method_by_cls) == len(alt_factories) and (not any((isinstance(x, CoercerMethod) for x in alt_methods))):
return UnionByTypeMethod(method_by_cls)
else:
return UnionMethod(alt_methods)
return self._factory(factory)
|
apischema
|
positive
|
def apply_phab(self, hg, phabricator_deployment, diff_id):
if phabricator_deployment == PHAB_PROD:
api_key = get_secret('PHABRICATOR_TOKEN')
url = get_secret('PHABRICATOR_URL')
else:
api_key = get_secret('PHABRICATOR_DEV_TOKEN')
url = get_secret('PHABRICATOR_DEV_URL')
phabricator_api = PhabricatorAPI(api_key=api_key, url=url)
stack = phabricator_api.load_patches_stack(diff_id)
assert len(stack) > 0, 'No patches to apply'
needed_stack = []
revisions = {}
for patch in reversed(stack):
needed_stack.insert(0, patch)
if self.has_revision(hg, patch.base_revision):
logger.info(f'Stopping at diff {patch.id} and revision {patch.base_revision}')
break
if not needed_stack:
logger.info('All the patches are already applied')
return
diffs = phabricator_api.search_diffs(diff_phid=[p.phid for p in stack])
revisions = {diff['phid']: phabricator_api.load_revision(rev_phid=diff['revisionPHID'], attachments={'reviewers': True}) for diff in diffs}
hg_base = needed_stack[0].base_revision
if not self.has_revision(hg, hg_base):
logger.warning('Missing base revision {} from Phabricator'.format(hg_base))
hg_base = 'tip'
if hg_base:
hg.update(rev=hg_base, clean=True)
logger.info('Updated repo to %s', hg_base)
if self.git_repo_dir and hg_base != 'tip':
try:
self.git_base = tuple(vcs_map.mercurial_to_git(self.git_repo_dir, [hg_base]))[0]
subprocess.run(['git', 'checkout', '-b', 'analysis_branch', self.git_base], check=True, cwd=self.git_repo_dir)
logger.info('Updated git repo to %s', self.git_base)
except Exception as e:
logger.info('Updating git repo to Mercurial %s failed: %s', hg_base, e)
def load_user(phid):
if phid.startswith('PHID-USER'):
return phabricator_api.load_user(user_phid=phid)
elif phid.startswith('PHID-PROJ'):
logger.info('Skipping group reviewer %s', phid)
else:
raise ValueError(f'Unsupported reviewer {phid}')
for patch in needed_stack:
revision = revisions[patch.phid]
message = '{}\n\n{}'.format(revision['fields']['title'], revision['fields']['summary'])
author_name = None
author_email = None
if patch.commits:
author_name = patch.commits[0]['author']['name']
author_email = patch.commits[0]['author']['email']
if author_name is None:
<DeepExtract>
if revision['fields']['authorPHID'].startswith('PHID-USER'):
author = phabricator_api.load_user(user_phid=revision['fields']['authorPHID'])
elif revision['fields']['authorPHID'].startswith('PHID-PROJ'):
logger.info('Skipping group reviewer %s', revision['fields']['authorPHID'])
else:
raise ValueError(f"Unsupported reviewer {revision['fields']['authorPHID']}")
</DeepExtract>
author_name = author['fields']['realName']
author_email = author['fields']['username']
reviewers = list(filter(None, (load_user(reviewer['reviewerPHID']) for reviewer in revision['attachments']['reviewers']['reviewers'])))
reviewers = set((reviewer['fields']['username'] for reviewer in reviewers))
if len(reviewers):
<DeepExtract>
if not reviewers:
reviewers_str = ''
else:
reviewers_str = 'r=' + ','.join(reviewers)
if message == '':
message = reviewers_str
message = message.splitlines()
commit_summary = message.pop(0)
message = '\n'.join(message)
if not R_SPECIFIER_RE.search(commit_summary):
commit_summary += ' ' + reviewers_str
else:
d = {'first': True}
def replace_first_reviewer(matchobj):
if R_SPECIFIER_RE.match(matchobj.group(2)):
if d['first']:
d['first'] = False
message = matchobj.group(1) + reviewers_str
else:
message = '\x00'
else:
message = matchobj.group(0)
commit_summary = re.sub(REVIEWERS_RE, replace_first_reviewer, commit_summary)
commit_summary = re.sub(LIST + '\x00', '', commit_summary)
commit_summary = re.sub('\x00', '', commit_summary)
if message == '':
message = commit_summary.strip()
else:
message = commit_summary.strip() + '\n' + message
</DeepExtract>
logger.info(f"Applying {patch.phid} from revision {revision['id']}: {message}")
hg.import_(patches=io.BytesIO(patch.patch.encode('utf-8')), message=message.encode('utf-8'), user=f'{author_name} <{author_email}>'.encode('utf-8'))
if self.git_repo_dir:
patch_proc = subprocess.Popen(['patch', '-p1', '--no-backup-if-mismatch', '--force'], stdin=subprocess.PIPE, cwd=self.git_repo_dir)
patch_proc.communicate(patch.patch.encode('utf-8'))
assert patch_proc.returncode == 0, 'Failed to apply patch'
subprocess.run(['git', '-c', f'user.name={author_name}', '-c', f'user.email={author_email}', 'commit', '-am', message], check=True, cwd=self.git_repo_dir)
|
def apply_phab(self, hg, phabricator_deployment, diff_id):
if phabricator_deployment == PHAB_PROD:
api_key = get_secret('PHABRICATOR_TOKEN')
url = get_secret('PHABRICATOR_URL')
else:
api_key = get_secret('PHABRICATOR_DEV_TOKEN')
url = get_secret('PHABRICATOR_DEV_URL')
phabricator_api = PhabricatorAPI(api_key=api_key, url=url)
stack = phabricator_api.load_patches_stack(diff_id)
assert len(stack) > 0, 'No patches to apply'
needed_stack = []
revisions = {}
for patch in reversed(stack):
needed_stack.insert(0, patch)
if self.has_revision(hg, patch.base_revision):
logger.info(f'Stopping at diff {patch.id} and revision {patch.base_revision}')
break
if not needed_stack:
logger.info('All the patches are already applied')
return
diffs = phabricator_api.search_diffs(diff_phid=[p.phid for p in stack])
revisions = {diff['phid']: phabricator_api.load_revision(rev_phid=diff['revisionPHID'], attachments={'reviewers': True}) for diff in diffs}
hg_base = needed_stack[0].base_revision
if not self.has_revision(hg, hg_base):
logger.warning('Missing base revision {} from Phabricator'.format(hg_base))
hg_base = 'tip'
if hg_base:
hg.update(rev=hg_base, clean=True)
logger.info('Updated repo to %s', hg_base)
if self.git_repo_dir and hg_base != 'tip':
try:
self.git_base = tuple(vcs_map.mercurial_to_git(self.git_repo_dir, [hg_base]))[0]
subprocess.run(['git', 'checkout', '-b', 'analysis_branch', self.git_base], check=True, cwd=self.git_repo_dir)
logger.info('Updated git repo to %s', self.git_base)
except Exception as e:
logger.info('Updating git repo to Mercurial %s failed: %s', hg_base, e)
def load_user(phid):
if phid.startswith('PHID-USER'):
return phabricator_api.load_user(user_phid=phid)
elif phid.startswith('PHID-PROJ'):
logger.info('Skipping group reviewer %s', phid)
else:
raise ValueError(f'Unsupported reviewer {phid}')
for patch in needed_stack:
revision = revisions[patch.phid]
message = '{}\n\n{}'.format(revision['fields']['title'], revision['fields']['summary'])
author_name = None
author_email = None
if patch.commits:
author_name = patch.commits[0]['author']['name']
author_email = patch.commits[0]['author']['email']
if author_name is None:
if revision['fields']['authorPHID'].startswith('PHID-USER'):
author = phabricator_api.load_user(user_phid=revision['fields']['authorPHID'])
elif revision['fields']['authorPHID'].startswith('PHID-PROJ'):
logger.info('Skipping group reviewer %s', revision['fields']['authorPHID'])
else:
raise ValueError(f"Unsupported reviewer {revision['fields']['authorPHID']}")
author_name = author['fields']['realName']
author_email = author['fields']['username']
reviewers = list(filter(None, (load_user(reviewer['reviewerPHID']) for reviewer in revision['attachments']['reviewers']['reviewers'])))
reviewers = set((reviewer['fields']['username'] for reviewer in reviewers))
if len(reviewers):
if not reviewers:
reviewers_str = ''
else:
reviewers_str = 'r=' + ','.join(reviewers)
if message == '':
message = reviewers_str
message = message.splitlines()
commit_summary = message.pop(0)
message = '\n'.join(message)
if not R_SPECIFIER_RE.search(commit_summary):
commit_summary += ' ' + reviewers_str
else:
d = {'first': True}
def replace_first_reviewer(matchobj):
if R_SPECIFIER_RE.match(matchobj.group(2)):
if d['first']:
d['first'] = False
message = matchobj.group(1) + reviewers_str
else:
message = '\x00'
else:
message = matchobj.group(0)
commit_summary = re.sub(REVIEWERS_RE, replace_first_reviewer, commit_summary)
commit_summary = re.sub(LIST + '\x00', '', commit_summary)
commit_summary = re.sub('\x00', '', commit_summary)
if message == '':
message = commit_summary.strip()
else:
message = commit_summary.strip() + '\n' + message
logger.info(f"Applying {patch.phid} from revision {revision['id']}: {message}")
hg.import_(patches=io.BytesIO(patch.patch.encode('utf-8')), message=message.encode('utf-8'), user=f'{author_name} <{author_email}>'.encode('utf-8'))
if self.git_repo_dir:
patch_proc = subprocess.Popen(['patch', '-p1', '--no-backup-if-mismatch', '--force'], stdin=subprocess.PIPE, cwd=self.git_repo_dir)
patch_proc.communicate(patch.patch.encode('utf-8'))
assert patch_proc.returncode == 0, 'Failed to apply patch'
subprocess.run(['git', '-c', f'user.name={author_name}', '-c', f'user.email={author_email}', 'commit', '-am', message], check=True, cwd=self.git_repo_dir)
|
bugbug
|
positive
|
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
(name, value) = item.split('=', 1)
if value[:1] == value[-1:] == '"':
<DeepExtract>
if value[1:-1] and value[1:-1][0] == value[1:-1][-1] == '"':
value[1:-1] = value[1:-1][1:-1]
if not is_filename or value[1:-1][:2] != '\\\\':
value[1:-1] = value[1:-1].replace('\\\\', '\\').replace('\\"', '"')
value[1:-1] = value[1:-1]
</DeepExtract>
result[name] = value
return result
|
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
(name, value) = item.split('=', 1)
if value[:1] == value[-1:] == '"':
if value[1:-1] and value[1:-1][0] == value[1:-1][-1] == '"':
value[1:-1] = value[1:-1][1:-1]
if not is_filename or value[1:-1][:2] != '\\\\':
value[1:-1] = value[1:-1].replace('\\\\', '\\').replace('\\"', '"')
value[1:-1] = value[1:-1]
result[name] = value
return result
|
AdvancedCloudFormation
|
positive
|
def get_info_about_model(inputs):
<DeepExtract>
model_folder_full_name = get_input_name_from_role(inputs, 'model_folder')
model_folder = dataiku.Folder(model_folder_full_name).get_path()
model_folder = model_folder
</DeepExtract>
model_info = utils.get_model_info(model_folder, goal=constants.SCORING)
config = utils.get_config(model_folder)
return add_can_use_gpu_to_resp({'layers': model_info['layers'], 'summary': model_info['summary'], 'default_layer_index': config['extract_layer_default_index']})
|
def get_info_about_model(inputs):
model_folder_full_name = get_input_name_from_role(inputs, 'model_folder')
model_folder = dataiku.Folder(model_folder_full_name).get_path()
model_folder = model_folder
model_info = utils.get_model_info(model_folder, goal=constants.SCORING)
config = utils.get_config(model_folder)
return add_can_use_gpu_to_resp({'layers': model_info['layers'], 'summary': model_info['summary'], 'default_layer_index': config['extract_layer_default_index']})
|
dataiku-contrib
|
positive
|
def _get_ground_truth(self):
num_loc_list = [len(loc) for loc in self.locations]
self.num_loc_list = num_loc_list
loc_to_size_range = []
for (l, loc_per_level) in enumerate(self.locations):
loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l])
loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1))
loc_to_size_range = torch.cat(loc_to_size_range, dim=0)
locations = torch.cat(self.locations, dim=0)
<DeepExtract>
labels = []
reg_targets = []
bezier_targets = []
(xs, ys) = (locations[:, 0], locations[:, 1])
num_targets = 0
for im_i in range(len(self.gt_instances)):
targets_per_im = self.gt_instances[im_i]
bboxes = targets_per_im.gt_boxes.tensor
labels_per_im = targets_per_im.gt_classes
if bboxes.numel() == 0:
labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes)
reg_targets.append(locations.new_zeros((locations.size(0), 4)))
bezier_targets.append(locations.new_zeros((locations.size(0), 16)))
continue
area = targets_per_im.gt_boxes.area()
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
bezier_pts = targets_per_im.beziers.view(-1, 8, 2)
x_targets = bezier_pts[:, :, 0][None] - xs[:, None, None]
y_targets = bezier_pts[:, :, 1][None] - ys[:, None, None]
bezier_targets_per_im = torch.stack((x_targets, y_targets), dim=3)
bezier_targets_per_im = bezier_targets_per_im.view(xs.size(0), bboxes.size(0), 16)
if self.center_sample:
is_in_boxes = self.get_sample_region(bboxes, self.strides, self.num_loc_list, xs, ys, radius=self.radius)
else:
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
is_cared_in_the_level = (max_reg_targets_per_im >= loc_to_size_range[:, [0]]) & (max_reg_targets_per_im <= loc_to_size_range[:, [1]])
locations_to_gt_area = area[None].repeat(len(locations), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
(locations_to_min_area, locations_to_gt_inds) = locations_to_gt_area.min(dim=1)
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
bezier_targets_per_im = bezier_targets_per_im[range(len(locations)), locations_to_gt_inds]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_area == INF] = self.num_classes
labels.append(labels_per_im)
reg_targets.append(reg_targets_per_im)
bezier_targets.append(bezier_targets_per_im)
training_targets = {'labels': labels, 'reg_targets': reg_targets, 'bezier_targets': bezier_targets}
</DeepExtract>
training_targets = {k: self._transpose(v, num_loc_list) for (k, v) in training_targets.items()}
reg_targets = training_targets['reg_targets']
bezier_targets = training_targets['bezier_targets']
for l in range(len(reg_targets)):
reg_targets[l] = reg_targets[l] / float(self.strides[l])
bezier_targets[l] = bezier_targets[l] / float(self.strides[l])
return training_targets
|
def _get_ground_truth(self):
num_loc_list = [len(loc) for loc in self.locations]
self.num_loc_list = num_loc_list
loc_to_size_range = []
for (l, loc_per_level) in enumerate(self.locations):
loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l])
loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1))
loc_to_size_range = torch.cat(loc_to_size_range, dim=0)
locations = torch.cat(self.locations, dim=0)
labels = []
reg_targets = []
bezier_targets = []
(xs, ys) = (locations[:, 0], locations[:, 1])
num_targets = 0
for im_i in range(len(self.gt_instances)):
targets_per_im = self.gt_instances[im_i]
bboxes = targets_per_im.gt_boxes.tensor
labels_per_im = targets_per_im.gt_classes
if bboxes.numel() == 0:
labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes)
reg_targets.append(locations.new_zeros((locations.size(0), 4)))
bezier_targets.append(locations.new_zeros((locations.size(0), 16)))
continue
area = targets_per_im.gt_boxes.area()
l = xs[:, None] - bboxes[:, 0][None]
t = ys[:, None] - bboxes[:, 1][None]
r = bboxes[:, 2][None] - xs[:, None]
b = bboxes[:, 3][None] - ys[:, None]
reg_targets_per_im = torch.stack([l, t, r, b], dim=2)
bezier_pts = targets_per_im.beziers.view(-1, 8, 2)
x_targets = bezier_pts[:, :, 0][None] - xs[:, None, None]
y_targets = bezier_pts[:, :, 1][None] - ys[:, None, None]
bezier_targets_per_im = torch.stack((x_targets, y_targets), dim=3)
bezier_targets_per_im = bezier_targets_per_im.view(xs.size(0), bboxes.size(0), 16)
if self.center_sample:
is_in_boxes = self.get_sample_region(bboxes, self.strides, self.num_loc_list, xs, ys, radius=self.radius)
else:
is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0
max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0]
is_cared_in_the_level = (max_reg_targets_per_im >= loc_to_size_range[:, [0]]) & (max_reg_targets_per_im <= loc_to_size_range[:, [1]])
locations_to_gt_area = area[None].repeat(len(locations), 1)
locations_to_gt_area[is_in_boxes == 0] = INF
locations_to_gt_area[is_cared_in_the_level == 0] = INF
(locations_to_min_area, locations_to_gt_inds) = locations_to_gt_area.min(dim=1)
reg_targets_per_im = reg_targets_per_im[range(len(locations)), locations_to_gt_inds]
bezier_targets_per_im = bezier_targets_per_im[range(len(locations)), locations_to_gt_inds]
labels_per_im = labels_per_im[locations_to_gt_inds]
labels_per_im[locations_to_min_area == INF] = self.num_classes
labels.append(labels_per_im)
reg_targets.append(reg_targets_per_im)
bezier_targets.append(bezier_targets_per_im)
training_targets = {'labels': labels, 'reg_targets': reg_targets, 'bezier_targets': bezier_targets}
training_targets = {k: self._transpose(v, num_loc_list) for (k, v) in training_targets.items()}
reg_targets = training_targets['reg_targets']
bezier_targets = training_targets['bezier_targets']
for l in range(len(reg_targets)):
reg_targets[l] = reg_targets[l] / float(self.strides[l])
bezier_targets[l] = bezier_targets[l] / float(self.strides[l])
return training_targets
|
dict-guided
|
positive
|
def main(self):
game_state = game.GameState()
<DeepExtract>
action = np.zeros([self.Num_action])
(state, _, _) = game_state.frame_step(action)
state = self.reshape_input(state)
for i in range(self.Num_skipping * self.Num_stacking):
self.state_set.append(state)
state = state
</DeepExtract>
<DeepExtract>
self.state_set.append(state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_state = state_in
</DeepExtract>
while True:
<DeepExtract>
progress = ''
if self.step <= self.Num_Exploration:
progress = 'Exploring'
elif self.step <= self.Num_Exploration + self.Num_Training:
progress = 'Training'
elif self.step <= self.Num_Exploration + self.Num_Training + self.Num_Testing:
progress = 'Testing'
else:
progress = 'Finished'
self.progress = progress
</DeepExtract>
<DeepExtract>
action = np.zeros([self.Num_action])
action_index = 0
if self.progress == 'Exploring':
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
elif self.progress == 'Training':
if random.random() < self.epsilon:
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
else:
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state], self.tau_min: self.sample_min, self.tau_max: self.sample_max, self.num_sample: self.Num_quantile})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
if self.epsilon > self.final_epsilon:
self.epsilon -= self.first_epsilon / self.Num_Training
elif self.progress == 'Testing':
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state], self.tau_min: self.sample_min, self.tau_max: self.sample_max, self.num_sample: self.Num_quantile})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
self.epsilon = 0
action = action
</DeepExtract>
(next_state, reward, terminal) = game_state.frame_step(action)
<DeepExtract>
state_out = cv2.resize(next_state, (self.img_size, self.img_size))
if self.Num_colorChannel == 1:
state_out = cv2.cvtColor(state_out, cv2.COLOR_BGR2GRAY)
state_out = np.reshape(state_out, (self.img_size, self.img_size, 1))
state_out = np.uint8(state_out)
next_state = state_out
</DeepExtract>
<DeepExtract>
self.state_set.append(next_state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_next_state = state_in
</DeepExtract>
<DeepExtract>
if len(self.replay_memory) >= self.Num_replay_memory:
del self.replay_memory[0]
self.replay_memory.append([stacked_state, action, reward, stacked_next_state, terminal])
</DeepExtract>
if self.progress == 'Training':
if self.step % self.Num_update_target == 0:
<DeepExtract>
trainable_variables = tf.trainable_variables()
trainable_variables_network = [var for var in trainable_variables if var.name.startswith('network')]
trainable_variables_target = [var for var in trainable_variables if var.name.startswith('target')]
for i in range(len(trainable_variables_network)):
self.sess.run(tf.assign(trainable_variables_target[i], trainable_variables_network[i]))
</DeepExtract>
<DeepExtract>
minibatch = random.sample(self.replay_memory, self.Num_batch)
state_batch = [batch[0] for batch in minibatch]
action_batch = [batch[1] for batch in minibatch]
reward_batch = [batch[2] for batch in minibatch]
next_state_batch = [batch[3] for batch in minibatch]
terminal_batch = [batch[4] for batch in minibatch]
Q_batch = self.Q_action.eval(feed_dict={self.input: next_state_batch, self.tau_min: 0.0, self.tau_max: 1.0, self.num_sample: self.Num_quantile})
theta_batch = self.logits_target.eval(feed_dict={self.input_target: next_state_batch, self.tau_min_target: 0.0, self.tau_max_target: 1.0, self.num_sample_target: self.Num_quantile_target})
theta_target = []
for i in range(len(minibatch)):
theta_target.append([])
for j in range(self.Num_quantile_target):
if terminal_batch[i] == True:
theta_target[i].append(reward_batch[i])
else:
theta_target[i].append(reward_batch[i] + self.gamma * theta_batch[j, i, np.argmax(Q_batch[i])])
action_binary = np.zeros([self.Num_quantile, self.Num_batch, self.Num_action])
for i in range(len(action_batch)):
action_batch_max = np.argmax(action_batch[i])
action_binary[:, i, action_batch_max] = 1
(_, self.loss) = self.sess.run([self.train_step, self.loss_train], feed_dict={self.input: state_batch, self.theta_target: theta_target, self.action_binary_loss: action_binary, self.tau_min: 0.0, self.tau_max: 1.0, self.num_sample: self.Num_quantile})
</DeepExtract>
<DeepExtract>
if self.step == self.Num_Exploration + self.Num_Training:
save_path = self.saver.save(self.sess, 'saved_networks/' + self.game_name + '/' + self.date_time + '_' + self.algorithm + '/model.ckpt')
print('Model saved in file: %s' % save_path)
</DeepExtract>
stacked_state = stacked_next_state
self.score += reward
self.step += 1
<DeepExtract>
if self.progress != 'Exploring':
if terminal:
self.score_board += self.score
self.maxQ_board += self.maxQ
self.loss_board += self.loss
if self.episode % self.Num_plot_episode == 0 and self.episode != 0 and terminal or self.progress == 'Finished':
diff_step = self.step - self.step_old
diff_episode = self.episode - self.episode_old
tensorboard_info = [self.score_board / diff_episode, self.maxQ_board / diff_step, self.loss_board / diff_step]
for i in range(len(tensorboard_info)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(tensorboard_info[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.step)
self.score_board = 0
self.maxQ_board = 0
self.loss_board = 0
self.step_old = self.step
self.episode_old = self.episode
else:
self.step_old = self.step
</DeepExtract>
if terminal:
<DeepExtract>
print('Step: ' + str(self.step) + ' / ' + 'Episode: ' + str(self.episode) + ' / ' + 'Progress: ' + self.progress + ' / ' + 'Epsilon: ' + str(self.epsilon) + ' / ' + 'Score: ' + str(self.score))
if self.progress != 'Exploring':
self.episode += 1
self.score = 0
state = self.initialization(game_state)
stacked_state = self.skip_and_stack_frame(state)
stacked_state = stacked_state
</DeepExtract>
if self.progress == 'Finished':
print('Finished!')
break
|
def main(self):
game_state = game.GameState()
action = np.zeros([self.Num_action])
(state, _, _) = game_state.frame_step(action)
state = self.reshape_input(state)
for i in range(self.Num_skipping * self.Num_stacking):
self.state_set.append(state)
state = state
self.state_set.append(state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_state = state_in
while True:
progress = ''
if self.step <= self.Num_Exploration:
progress = 'Exploring'
elif self.step <= self.Num_Exploration + self.Num_Training:
progress = 'Training'
elif self.step <= self.Num_Exploration + self.Num_Training + self.Num_Testing:
progress = 'Testing'
else:
progress = 'Finished'
self.progress = progress
action = np.zeros([self.Num_action])
action_index = 0
if self.progress == 'Exploring':
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
elif self.progress == 'Training':
if random.random() < self.epsilon:
action_index = random.randint(0, self.Num_action - 1)
action[action_index] = 1
else:
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state], self.tau_min: self.sample_min, self.tau_max: self.sample_max, self.num_sample: self.Num_quantile})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
if self.epsilon > self.final_epsilon:
self.epsilon -= self.first_epsilon / self.Num_Training
elif self.progress == 'Testing':
Q_value = self.Q_action.eval(feed_dict={self.input: [stacked_state], self.tau_min: self.sample_min, self.tau_max: self.sample_max, self.num_sample: self.Num_quantile})
action_index = np.argmax(Q_value)
action[action_index] = 1
self.maxQ = np.max(Q_value)
self.epsilon = 0
action = action
(next_state, reward, terminal) = game_state.frame_step(action)
state_out = cv2.resize(next_state, (self.img_size, self.img_size))
if self.Num_colorChannel == 1:
state_out = cv2.cvtColor(state_out, cv2.COLOR_BGR2GRAY)
state_out = np.reshape(state_out, (self.img_size, self.img_size, 1))
state_out = np.uint8(state_out)
next_state = state_out
self.state_set.append(next_state)
state_in = np.zeros((self.img_size, self.img_size, self.Num_colorChannel * self.Num_stacking))
for stack_frame in range(self.Num_stacking):
state_in[:, :, self.Num_colorChannel * stack_frame:self.Num_colorChannel * (stack_frame + 1)] = self.state_set[-1 - self.Num_skipping * stack_frame]
del self.state_set[0]
state_in = np.uint8(state_in)
stacked_next_state = state_in
if len(self.replay_memory) >= self.Num_replay_memory:
del self.replay_memory[0]
self.replay_memory.append([stacked_state, action, reward, stacked_next_state, terminal])
if self.progress == 'Training':
if self.step % self.Num_update_target == 0:
trainable_variables = tf.trainable_variables()
trainable_variables_network = [var for var in trainable_variables if var.name.startswith('network')]
trainable_variables_target = [var for var in trainable_variables if var.name.startswith('target')]
for i in range(len(trainable_variables_network)):
self.sess.run(tf.assign(trainable_variables_target[i], trainable_variables_network[i]))
minibatch = random.sample(self.replay_memory, self.Num_batch)
state_batch = [batch[0] for batch in minibatch]
action_batch = [batch[1] for batch in minibatch]
reward_batch = [batch[2] for batch in minibatch]
next_state_batch = [batch[3] for batch in minibatch]
terminal_batch = [batch[4] for batch in minibatch]
Q_batch = self.Q_action.eval(feed_dict={self.input: next_state_batch, self.tau_min: 0.0, self.tau_max: 1.0, self.num_sample: self.Num_quantile})
theta_batch = self.logits_target.eval(feed_dict={self.input_target: next_state_batch, self.tau_min_target: 0.0, self.tau_max_target: 1.0, self.num_sample_target: self.Num_quantile_target})
theta_target = []
for i in range(len(minibatch)):
theta_target.append([])
for j in range(self.Num_quantile_target):
if terminal_batch[i] == True:
theta_target[i].append(reward_batch[i])
else:
theta_target[i].append(reward_batch[i] + self.gamma * theta_batch[j, i, np.argmax(Q_batch[i])])
action_binary = np.zeros([self.Num_quantile, self.Num_batch, self.Num_action])
for i in range(len(action_batch)):
action_batch_max = np.argmax(action_batch[i])
action_binary[:, i, action_batch_max] = 1
(_, self.loss) = self.sess.run([self.train_step, self.loss_train], feed_dict={self.input: state_batch, self.theta_target: theta_target, self.action_binary_loss: action_binary, self.tau_min: 0.0, self.tau_max: 1.0, self.num_sample: self.Num_quantile})
if self.step == self.Num_Exploration + self.Num_Training:
save_path = self.saver.save(self.sess, 'saved_networks/' + self.game_name + '/' + self.date_time + '_' + self.algorithm + '/model.ckpt')
print('Model saved in file: %s' % save_path)
stacked_state = stacked_next_state
self.score += reward
self.step += 1
if self.progress != 'Exploring':
if terminal:
self.score_board += self.score
self.maxQ_board += self.maxQ
self.loss_board += self.loss
if self.episode % self.Num_plot_episode == 0 and self.episode != 0 and terminal or self.progress == 'Finished':
diff_step = self.step - self.step_old
diff_episode = self.episode - self.episode_old
tensorboard_info = [self.score_board / diff_episode, self.maxQ_board / diff_step, self.loss_board / diff_step]
for i in range(len(tensorboard_info)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(tensorboard_info[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.step)
self.score_board = 0
self.maxQ_board = 0
self.loss_board = 0
self.step_old = self.step
self.episode_old = self.episode
else:
self.step_old = self.step
if terminal:
print('Step: ' + str(self.step) + ' / ' + 'Episode: ' + str(self.episode) + ' / ' + 'Progress: ' + self.progress + ' / ' + 'Epsilon: ' + str(self.epsilon) + ' / ' + 'Score: ' + str(self.score))
if self.progress != 'Exploring':
self.episode += 1
self.score = 0
state = self.initialization(game_state)
stacked_state = self.skip_and_stack_frame(state)
stacked_state = stacked_state
if self.progress == 'Finished':
print('Finished!')
break
|
DRL
|
positive
|
@dxpy.entry_point('main')
def main(rep1_ta, ctl1_ta, rep1_xcor, rep1_paired_end, chrom_sizes, genomesize, narrowpeak_as, gappedpeak_as, broadpeak_as, rep2_ta=None, ctl2_ta=None, rep2_xcor=None, rep2_paired_end=None, fragment_length=None):
rep1_ta_file = dxpy.DXFile(rep1_ta)
dxpy.download_dxfile(rep1_ta_file.get_id(), rep1_ta_file.name)
rep1_ta_filename = rep1_ta_file.name
ntags_rep1 = common.count_lines(rep1_ta_filename)
simplicate_experiment = rep1_ta and (not rep2_ta)
if simplicate_experiment:
logger.info('No rep2 tags specified so processing as a simplicate experiment.')
else:
logger.info('Rep1 and rep2 tags specified so processing as a replicated experiment.')
if not simplicate_experiment:
assert rep1_paired_end == rep2_paired_end, 'Mixed PE/SE not supported'
rep2_ta_file = dxpy.DXFile(rep2_ta)
dxpy.download_dxfile(rep2_ta_file.get_id(), rep2_ta_file.name)
rep2_ta_filename = rep2_ta_file.name
ntags_rep2 = common.count_lines(rep2_ta_filename)
paired_end = rep1_paired_end
unary_control = ctl1_ta == ctl2_ta or not ctl2_ta
ctl1_ta_file = dxpy.DXFile(ctl1_ta)
dxpy.download_dxfile(ctl1_ta_file.get_id(), ctl1_ta_file.name)
ctl1_ta_filename = ctl1_ta_file.name
if not unary_control:
ctl2_ta_file = dxpy.DXFile(ctl2_ta)
dxpy.download_dxfile(ctl2_ta_file.get_id(), ctl2_ta_file.name)
ctl2_ta_filename = ctl2_ta_file.name
else:
ctl2_ta_file = ctl1_ta_file
ctl2_ta_filename = ctl1_ta_file.name
ntags_ctl1 = common.count_lines(ctl1_ta_filename)
ntags_ctl2 = common.count_lines(ctl2_ta_filename)
rep1_control = ctl1_ta
rep1_ctl_msg = 'control rep1'
rep2_control = ctl2_ta
rep2_ctl_msg = 'control rep2'
rep_info = [(ntags_rep1, 'replicate 1', rep1_ta_filename)]
if not simplicate_experiment:
rep_info.append((ntags_rep2, 'replicate 2', rep2_ta_filename))
rep_info.extend([(ntags_ctl1, 'control 1', ctl1_ta_filename), (ntags_ctl2, 'control 2', ctl2_ta_filename)])
for (n, name, filename) in rep_info:
logger.info('Found %d tags in %s file %s' % (n, name, filename))
subprocess.check_output('ls -l', shell=True, stderr=subprocess.STDOUT)
if not simplicate_experiment:
pool_applet = dxpy.find_one_data_object(classname='applet', name='pool', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
pool_replicates_subjob = pool_applet.run({'inputs': [rep1_ta, rep2_ta], 'prefix': 'pooled_reps'}, name='Pool replicates')
pooled_replicates = pool_replicates_subjob.get_output_ref('pooled')
<DeepExtract>
xcor_only_applet = dxpy.find_one_data_object(classname='applet', name='xcor_only', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
pooled_replicates_xcor_subjob = xcor_only_applet.run({'input_tagAlign': pooled_replicates, 'paired_end': paired_end}, name='Pool cross-correlation')
</DeepExtract>
if unary_control:
logger.info('Only one control supplied.')
if not simplicate_experiment:
logger.info('Using one control for both replicate 1 and 2 and for the pool.')
rep2_control = rep1_control
control_for_pool = rep1_control
pool_ctl_msg = 'one control'
else:
pool_controls_subjob = pool_applet.run({'inputs': [ctl1_ta, ctl2_ta], 'prefix': 'PL_ctls'}, name='Pool controls')
pooled_controls = pool_controls_subjob.get_output_ref('pooled')
control_for_pool = pooled_controls
pool_ctl_msg = 'pooled controls'
ratio_ctl_reads = float(ntags_ctl1) / float(ntags_ctl2)
if ratio_ctl_reads < 1:
ratio_ctl_reads = 1 / ratio_ctl_reads
ratio_cutoff = 1.2
if ratio_ctl_reads > ratio_cutoff:
logger.info('Number of reads in controls differ by > factor of %f. Using pooled controls.' % ratio_cutoff)
rep1_control = pooled_controls
rep2_control = pooled_controls
elif ntags_ctl1 < ntags_rep1:
logger.info('Fewer reads in control replicate 1 than experiment replicate 1. Using pooled controls for replicate 1.')
rep1_control = pooled_controls
rep1_ctl_msg = 'pooled controls'
elif not simplicate_experiment and ntags_ctl2 < ntags_rep2:
logger.info('Fewer reads in control replicate 2 than experiment replicate 2. Using pooled controls for replicate 2.')
rep2_control = pooled_controls
rep2_ctl_msg = 'pooled controls'
else:
logger.info('Using distinct controls for replicate 1 and 2.')
rep1_control = ctl1_ta
rep2_control = ctl2_ta
rep1_ctl_msg = 'control rep1'
rep2_ctl_msg = 'control rep2'
pseudoreplicator_applet = dxpy.find_one_data_object(classname='applet', name='pseudoreplicator', zero_ok=False, more_ok=False, return_handler=True)
rep1_pr_subjob = pseudoreplicator_applet.run({'input_tags': rep1_ta})
if not simplicate_experiment:
rep2_pr_subjob = pseudoreplicator_applet.run({'input_tags': rep2_ta})
pool_pr1_subjob = pool_applet.run({'inputs': [rep1_pr_subjob.get_output_ref('pseudoreplicate1'), rep2_pr_subjob.get_output_ref('pseudoreplicate1')], 'prefix': 'PPR1'})
pool_pr2_subjob = pool_applet.run({'inputs': [rep1_pr_subjob.get_output_ref('pseudoreplicate2'), rep2_pr_subjob.get_output_ref('pseudoreplicate2')], 'prefix': 'PPR2'})
common_args = {'chrom_sizes': chrom_sizes, 'genomesize': genomesize, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as}
if fragment_length is not None:
common_args.update({'fragment_length': fragment_length})
common_args.update({'prefix': 'r1'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_ta, 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'r1pr1'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_pr_subjob.get_output_ref('pseudoreplicate1'), 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1pr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'r1pr2'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_pr_subjob.get_output_ref('pseudoreplicate2'), 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1pr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
if not simplicate_experiment:
common_args.update({'prefix': 'r2'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_ta, 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'r2pr1'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_pr_subjob.get_output_ref('pseudoreplicate1'), 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2pr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'r2pr2'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_pr_subjob.get_output_ref('pseudoreplicate2'), 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2pr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'pool'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pooled_replicates, 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooled_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'ppr1'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pool_pr1_subjob.get_output_ref('pooled'), 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooledpr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
common_args.update({'prefix': 'ppr2'})
<DeepExtract>
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pool_pr2_subjob.get_output_ref('pooled'), 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooledpr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
</DeepExtract>
output = {'rep1_narrowpeaks': rep1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1_gappedpeaks': rep1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1_broadpeaks': rep1_peaks_subjob.get_output_ref('broadpeaks'), 'rep1_narrowpeaks_bb': rep1_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'rep1_gappedpeaks_bb': rep1_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'rep1_broadpeaks_bb': rep1_peaks_subjob.get_output_ref('broadpeaks_bb'), 'rep1_fc_signal': rep1_peaks_subjob.get_output_ref('fc_signal'), 'rep1_pvalue_signal': rep1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep1pr1_narrowpeaks': rep1pr1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1pr1_gappedpeaks': rep1pr1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1pr1_broadpeaks': rep1pr1_peaks_subjob.get_output_ref('broadpeaks'), 'rep1pr1_fc_signal': rep1pr1_peaks_subjob.get_output_ref('fc_signal'), 'rep1pr1_pvalue_signal': rep1pr1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep1pr2_narrowpeaks': rep1pr2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1pr2_gappedpeaks': rep1pr2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1pr2_broadpeaks': rep1pr2_peaks_subjob.get_output_ref('broadpeaks'), 'rep1pr2_fc_signal': rep1pr2_peaks_subjob.get_output_ref('fc_signal'), 'rep1pr2_pvalue_signal': rep1pr2_peaks_subjob.get_output_ref('pvalue_signal')}
if not simplicate_experiment:
output.update({'rep2_narrowpeaks': rep2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2_gappedpeaks': rep2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2_broadpeaks': rep2_peaks_subjob.get_output_ref('broadpeaks'), 'rep2_narrowpeaks_bb': rep2_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'rep2_gappedpeaks_bb': rep2_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'rep2_broadpeaks_bb': rep2_peaks_subjob.get_output_ref('broadpeaks_bb'), 'rep2_fc_signal': rep2_peaks_subjob.get_output_ref('fc_signal'), 'rep2_pvalue_signal': rep2_peaks_subjob.get_output_ref('pvalue_signal'), 'rep2pr1_narrowpeaks': rep2pr1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2pr1_gappedpeaks': rep2pr1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2pr1_broadpeaks': rep2pr1_peaks_subjob.get_output_ref('broadpeaks'), 'rep2pr1_fc_signal': rep2pr1_peaks_subjob.get_output_ref('fc_signal'), 'rep2pr1_pvalue_signal': rep2pr1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep2pr2_narrowpeaks': rep2pr2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2pr2_gappedpeaks': rep2pr2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2pr2_broadpeaks': rep2pr2_peaks_subjob.get_output_ref('broadpeaks'), 'rep2pr2_fc_signal': rep2pr2_peaks_subjob.get_output_ref('fc_signal'), 'rep2pr2_pvalue_signal': rep2pr2_peaks_subjob.get_output_ref('pvalue_signal'), 'pooled_narrowpeaks': pooled_peaks_subjob.get_output_ref('narrowpeaks'), 'pooled_gappedpeaks': pooled_peaks_subjob.get_output_ref('gappedpeaks'), 'pooled_broadpeaks': pooled_peaks_subjob.get_output_ref('broadpeaks'), 'pooled_narrowpeaks_bb': pooled_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'pooled_gappedpeaks_bb': pooled_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'pooled_broadpeaks_bb': pooled_peaks_subjob.get_output_ref('broadpeaks_bb'), 'pooled_fc_signal': pooled_peaks_subjob.get_output_ref('fc_signal'), 'pooled_pvalue_signal': pooled_peaks_subjob.get_output_ref('pvalue_signal'), 'pooledpr1_narrowpeaks': pooledpr1_peaks_subjob.get_output_ref('narrowpeaks'), 'pooledpr1_gappedpeaks': pooledpr1_peaks_subjob.get_output_ref('gappedpeaks'), 'pooledpr1_broadpeaks': pooledpr1_peaks_subjob.get_output_ref('broadpeaks'), 'pooledpr1_fc_signal': pooledpr1_peaks_subjob.get_output_ref('fc_signal'), 'pooledpr1_pvalue_signal': pooledpr1_peaks_subjob.get_output_ref('pvalue_signal'), 'pooledpr2_narrowpeaks': pooledpr2_peaks_subjob.get_output_ref('narrowpeaks'), 'pooledpr2_gappedpeaks': pooledpr2_peaks_subjob.get_output_ref('gappedpeaks'), 'pooledpr2_broadpeaks': pooledpr2_peaks_subjob.get_output_ref('broadpeaks'), 'pooledpr2_fc_signal': pooledpr2_peaks_subjob.get_output_ref('fc_signal'), 'pooledpr2_pvalue_signal': pooledpr2_peaks_subjob.get_output_ref('pvalue_signal')})
return output
|
@dxpy.entry_point('main')
def main(rep1_ta, ctl1_ta, rep1_xcor, rep1_paired_end, chrom_sizes, genomesize, narrowpeak_as, gappedpeak_as, broadpeak_as, rep2_ta=None, ctl2_ta=None, rep2_xcor=None, rep2_paired_end=None, fragment_length=None):
rep1_ta_file = dxpy.DXFile(rep1_ta)
dxpy.download_dxfile(rep1_ta_file.get_id(), rep1_ta_file.name)
rep1_ta_filename = rep1_ta_file.name
ntags_rep1 = common.count_lines(rep1_ta_filename)
simplicate_experiment = rep1_ta and (not rep2_ta)
if simplicate_experiment:
logger.info('No rep2 tags specified so processing as a simplicate experiment.')
else:
logger.info('Rep1 and rep2 tags specified so processing as a replicated experiment.')
if not simplicate_experiment:
assert rep1_paired_end == rep2_paired_end, 'Mixed PE/SE not supported'
rep2_ta_file = dxpy.DXFile(rep2_ta)
dxpy.download_dxfile(rep2_ta_file.get_id(), rep2_ta_file.name)
rep2_ta_filename = rep2_ta_file.name
ntags_rep2 = common.count_lines(rep2_ta_filename)
paired_end = rep1_paired_end
unary_control = ctl1_ta == ctl2_ta or not ctl2_ta
ctl1_ta_file = dxpy.DXFile(ctl1_ta)
dxpy.download_dxfile(ctl1_ta_file.get_id(), ctl1_ta_file.name)
ctl1_ta_filename = ctl1_ta_file.name
if not unary_control:
ctl2_ta_file = dxpy.DXFile(ctl2_ta)
dxpy.download_dxfile(ctl2_ta_file.get_id(), ctl2_ta_file.name)
ctl2_ta_filename = ctl2_ta_file.name
else:
ctl2_ta_file = ctl1_ta_file
ctl2_ta_filename = ctl1_ta_file.name
ntags_ctl1 = common.count_lines(ctl1_ta_filename)
ntags_ctl2 = common.count_lines(ctl2_ta_filename)
rep1_control = ctl1_ta
rep1_ctl_msg = 'control rep1'
rep2_control = ctl2_ta
rep2_ctl_msg = 'control rep2'
rep_info = [(ntags_rep1, 'replicate 1', rep1_ta_filename)]
if not simplicate_experiment:
rep_info.append((ntags_rep2, 'replicate 2', rep2_ta_filename))
rep_info.extend([(ntags_ctl1, 'control 1', ctl1_ta_filename), (ntags_ctl2, 'control 2', ctl2_ta_filename)])
for (n, name, filename) in rep_info:
logger.info('Found %d tags in %s file %s' % (n, name, filename))
subprocess.check_output('ls -l', shell=True, stderr=subprocess.STDOUT)
if not simplicate_experiment:
pool_applet = dxpy.find_one_data_object(classname='applet', name='pool', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
pool_replicates_subjob = pool_applet.run({'inputs': [rep1_ta, rep2_ta], 'prefix': 'pooled_reps'}, name='Pool replicates')
pooled_replicates = pool_replicates_subjob.get_output_ref('pooled')
xcor_only_applet = dxpy.find_one_data_object(classname='applet', name='xcor_only', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
pooled_replicates_xcor_subjob = xcor_only_applet.run({'input_tagAlign': pooled_replicates, 'paired_end': paired_end}, name='Pool cross-correlation')
if unary_control:
logger.info('Only one control supplied.')
if not simplicate_experiment:
logger.info('Using one control for both replicate 1 and 2 and for the pool.')
rep2_control = rep1_control
control_for_pool = rep1_control
pool_ctl_msg = 'one control'
else:
pool_controls_subjob = pool_applet.run({'inputs': [ctl1_ta, ctl2_ta], 'prefix': 'PL_ctls'}, name='Pool controls')
pooled_controls = pool_controls_subjob.get_output_ref('pooled')
control_for_pool = pooled_controls
pool_ctl_msg = 'pooled controls'
ratio_ctl_reads = float(ntags_ctl1) / float(ntags_ctl2)
if ratio_ctl_reads < 1:
ratio_ctl_reads = 1 / ratio_ctl_reads
ratio_cutoff = 1.2
if ratio_ctl_reads > ratio_cutoff:
logger.info('Number of reads in controls differ by > factor of %f. Using pooled controls.' % ratio_cutoff)
rep1_control = pooled_controls
rep2_control = pooled_controls
elif ntags_ctl1 < ntags_rep1:
logger.info('Fewer reads in control replicate 1 than experiment replicate 1. Using pooled controls for replicate 1.')
rep1_control = pooled_controls
rep1_ctl_msg = 'pooled controls'
elif not simplicate_experiment and ntags_ctl2 < ntags_rep2:
logger.info('Fewer reads in control replicate 2 than experiment replicate 2. Using pooled controls for replicate 2.')
rep2_control = pooled_controls
rep2_ctl_msg = 'pooled controls'
else:
logger.info('Using distinct controls for replicate 1 and 2.')
rep1_control = ctl1_ta
rep2_control = ctl2_ta
rep1_ctl_msg = 'control rep1'
rep2_ctl_msg = 'control rep2'
pseudoreplicator_applet = dxpy.find_one_data_object(classname='applet', name='pseudoreplicator', zero_ok=False, more_ok=False, return_handler=True)
rep1_pr_subjob = pseudoreplicator_applet.run({'input_tags': rep1_ta})
if not simplicate_experiment:
rep2_pr_subjob = pseudoreplicator_applet.run({'input_tags': rep2_ta})
pool_pr1_subjob = pool_applet.run({'inputs': [rep1_pr_subjob.get_output_ref('pseudoreplicate1'), rep2_pr_subjob.get_output_ref('pseudoreplicate1')], 'prefix': 'PPR1'})
pool_pr2_subjob = pool_applet.run({'inputs': [rep1_pr_subjob.get_output_ref('pseudoreplicate2'), rep2_pr_subjob.get_output_ref('pseudoreplicate2')], 'prefix': 'PPR2'})
common_args = {'chrom_sizes': chrom_sizes, 'genomesize': genomesize, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as}
if fragment_length is not None:
common_args.update({'fragment_length': fragment_length})
common_args.update({'prefix': 'r1'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_ta, 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'r1pr1'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_pr_subjob.get_output_ref('pseudoreplicate1'), 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1pr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'r1pr2'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep1_pr_subjob.get_output_ref('pseudoreplicate2'), 'control': rep1_control, 'xcor_scores_input': rep1_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep1pr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
if not simplicate_experiment:
common_args.update({'prefix': 'r2'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_ta, 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'r2pr1'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_pr_subjob.get_output_ref('pseudoreplicate1'), 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2pr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'r2pr2'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': rep2_pr_subjob.get_output_ref('pseudoreplicate2'), 'control': rep2_control, 'xcor_scores_input': rep2_xcor, 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
rep2pr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'pool'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pooled_replicates, 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooled_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'ppr1'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pool_pr1_subjob.get_output_ref('pooled'), 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooledpr1_peaks_subjob = macs2_applet.run(macs2_input, name=name)
common_args.update({'prefix': 'ppr2'})
macs2_applet = dxpy.find_one_data_object(classname='applet', name='macs2', project=dxpy.PROJECT_CONTEXT_ID, zero_ok=False, more_ok=False, return_handler=True)
macs2_input = {'experiment': pool_pr2_subjob.get_output_ref('pooled'), 'control': control_for_pool, 'xcor_scores_input': pooled_replicates_xcor_subjob.get_output_ref('CC_scores_file'), 'chrom_sizes': chrom_sizes, 'narrowpeak_as': narrowpeak_as, 'gappedpeak_as': gappedpeak_as, 'broadpeak_as': broadpeak_as, 'genomesize': genomesize}
if prefix:
macs2_input.update({'prefix': prefix})
if fragment_length is not None:
macs2_input.update({'fragment_length': fragment_length})
pooledpr2_peaks_subjob = macs2_applet.run(macs2_input, name=name)
output = {'rep1_narrowpeaks': rep1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1_gappedpeaks': rep1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1_broadpeaks': rep1_peaks_subjob.get_output_ref('broadpeaks'), 'rep1_narrowpeaks_bb': rep1_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'rep1_gappedpeaks_bb': rep1_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'rep1_broadpeaks_bb': rep1_peaks_subjob.get_output_ref('broadpeaks_bb'), 'rep1_fc_signal': rep1_peaks_subjob.get_output_ref('fc_signal'), 'rep1_pvalue_signal': rep1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep1pr1_narrowpeaks': rep1pr1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1pr1_gappedpeaks': rep1pr1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1pr1_broadpeaks': rep1pr1_peaks_subjob.get_output_ref('broadpeaks'), 'rep1pr1_fc_signal': rep1pr1_peaks_subjob.get_output_ref('fc_signal'), 'rep1pr1_pvalue_signal': rep1pr1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep1pr2_narrowpeaks': rep1pr2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep1pr2_gappedpeaks': rep1pr2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep1pr2_broadpeaks': rep1pr2_peaks_subjob.get_output_ref('broadpeaks'), 'rep1pr2_fc_signal': rep1pr2_peaks_subjob.get_output_ref('fc_signal'), 'rep1pr2_pvalue_signal': rep1pr2_peaks_subjob.get_output_ref('pvalue_signal')}
if not simplicate_experiment:
output.update({'rep2_narrowpeaks': rep2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2_gappedpeaks': rep2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2_broadpeaks': rep2_peaks_subjob.get_output_ref('broadpeaks'), 'rep2_narrowpeaks_bb': rep2_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'rep2_gappedpeaks_bb': rep2_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'rep2_broadpeaks_bb': rep2_peaks_subjob.get_output_ref('broadpeaks_bb'), 'rep2_fc_signal': rep2_peaks_subjob.get_output_ref('fc_signal'), 'rep2_pvalue_signal': rep2_peaks_subjob.get_output_ref('pvalue_signal'), 'rep2pr1_narrowpeaks': rep2pr1_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2pr1_gappedpeaks': rep2pr1_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2pr1_broadpeaks': rep2pr1_peaks_subjob.get_output_ref('broadpeaks'), 'rep2pr1_fc_signal': rep2pr1_peaks_subjob.get_output_ref('fc_signal'), 'rep2pr1_pvalue_signal': rep2pr1_peaks_subjob.get_output_ref('pvalue_signal'), 'rep2pr2_narrowpeaks': rep2pr2_peaks_subjob.get_output_ref('narrowpeaks'), 'rep2pr2_gappedpeaks': rep2pr2_peaks_subjob.get_output_ref('gappedpeaks'), 'rep2pr2_broadpeaks': rep2pr2_peaks_subjob.get_output_ref('broadpeaks'), 'rep2pr2_fc_signal': rep2pr2_peaks_subjob.get_output_ref('fc_signal'), 'rep2pr2_pvalue_signal': rep2pr2_peaks_subjob.get_output_ref('pvalue_signal'), 'pooled_narrowpeaks': pooled_peaks_subjob.get_output_ref('narrowpeaks'), 'pooled_gappedpeaks': pooled_peaks_subjob.get_output_ref('gappedpeaks'), 'pooled_broadpeaks': pooled_peaks_subjob.get_output_ref('broadpeaks'), 'pooled_narrowpeaks_bb': pooled_peaks_subjob.get_output_ref('narrowpeaks_bb'), 'pooled_gappedpeaks_bb': pooled_peaks_subjob.get_output_ref('gappedpeaks_bb'), 'pooled_broadpeaks_bb': pooled_peaks_subjob.get_output_ref('broadpeaks_bb'), 'pooled_fc_signal': pooled_peaks_subjob.get_output_ref('fc_signal'), 'pooled_pvalue_signal': pooled_peaks_subjob.get_output_ref('pvalue_signal'), 'pooledpr1_narrowpeaks': pooledpr1_peaks_subjob.get_output_ref('narrowpeaks'), 'pooledpr1_gappedpeaks': pooledpr1_peaks_subjob.get_output_ref('gappedpeaks'), 'pooledpr1_broadpeaks': pooledpr1_peaks_subjob.get_output_ref('broadpeaks'), 'pooledpr1_fc_signal': pooledpr1_peaks_subjob.get_output_ref('fc_signal'), 'pooledpr1_pvalue_signal': pooledpr1_peaks_subjob.get_output_ref('pvalue_signal'), 'pooledpr2_narrowpeaks': pooledpr2_peaks_subjob.get_output_ref('narrowpeaks'), 'pooledpr2_gappedpeaks': pooledpr2_peaks_subjob.get_output_ref('gappedpeaks'), 'pooledpr2_broadpeaks': pooledpr2_peaks_subjob.get_output_ref('broadpeaks'), 'pooledpr2_fc_signal': pooledpr2_peaks_subjob.get_output_ref('fc_signal'), 'pooledpr2_pvalue_signal': pooledpr2_peaks_subjob.get_output_ref('pvalue_signal')})
return output
|
chip-seq-pipeline
|
positive
|
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
<DeepExtract>
if issubclass(type('Success for unrequested channel! [??]'), list):
for m in 'Success for unrequested channel! [??]':
self.logger.log(WARNING, m)
else:
self.logger.log(WARNING, 'Success for unrequested channel! [??]', *args)
</DeepExtract>
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
<DeepExtract>
if issubclass(type('Secsh channel {:d} opened.'.format(chanid)), list):
for m in 'Secsh channel {:d} opened.'.format(chanid):
self.logger.log(DEBUG, m)
else:
self.logger.log(DEBUG, 'Secsh channel {:d} opened.'.format(chanid), *args)
</DeepExtract>
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
|
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
if issubclass(type('Success for unrequested channel! [??]'), list):
for m in 'Success for unrequested channel! [??]':
self.logger.log(WARNING, m)
else:
self.logger.log(WARNING, 'Success for unrequested channel! [??]', *args)
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
if issubclass(type('Secsh channel {:d} opened.'.format(chanid)), list):
for m in 'Secsh channel {:d} opened.'.format(chanid):
self.logger.log(DEBUG, m)
else:
self.logger.log(DEBUG, 'Secsh channel {:d} opened.'.format(chanid), *args)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
|
cerbrutus
|
positive
|
def eval_isophase_phaseswitch(isophase_vcf, config_file, out_f, name='NA'):
<DeepExtract>
for line in open(config_file):
(a, b) = line.strip().split('=')
if a == 'ref_chr':
_chr = b
elif a == 'ref_strand':
_strand = b
elif a == 'ref_start':
_start = b
elif a == 'ref_end':
_end = b
(_chr, _start, _end, _strand) = (_chr, _start, _end, _strand)
</DeepExtract>
reader = vcf.VCFReader(open(isophase_vcf))
prev = {}
r = next(reader)
for c in r.samples:
prev[c.sample] = c.data.GT
num_switch = 0
for r in reader:
for c in r.samples:
if c.data.GT.find('|') == -1:
continue
(a, b) = c.data.GT.split('|')
if a == b:
continue
if prev[c.sample] != c.data.GT:
num_switch += 1
prev[c.sample] = c.data.GT
out_f.write('{name}\t{chrom}\t{start}\t{end}\t{strand}\t{num_iso}\t{num_switch}\n'.format(name=name, chrom=_chr, start=_start, end=_end, strand=_strand, num_iso=len(r.samples), num_switch=num_switch))
|
def eval_isophase_phaseswitch(isophase_vcf, config_file, out_f, name='NA'):
for line in open(config_file):
(a, b) = line.strip().split('=')
if a == 'ref_chr':
_chr = b
elif a == 'ref_strand':
_strand = b
elif a == 'ref_start':
_start = b
elif a == 'ref_end':
_end = b
(_chr, _start, _end, _strand) = (_chr, _start, _end, _strand)
reader = vcf.VCFReader(open(isophase_vcf))
prev = {}
r = next(reader)
for c in r.samples:
prev[c.sample] = c.data.GT
num_switch = 0
for r in reader:
for c in r.samples:
if c.data.GT.find('|') == -1:
continue
(a, b) = c.data.GT.split('|')
if a == b:
continue
if prev[c.sample] != c.data.GT:
num_switch += 1
prev[c.sample] = c.data.GT
out_f.write('{name}\t{chrom}\t{start}\t{end}\t{strand}\t{num_iso}\t{num_switch}\n'.format(name=name, chrom=_chr, start=_start, end=_end, strand=_strand, num_iso=len(r.samples), num_switch=num_switch))
|
cDNA_Cupcake
|
positive
|
@wrap_input(0, 1)
def resample_pointdata(source, target, data, is_sphere=False, source_mask=None, target_mask=None, red_func='mean', k=3, fill=0, n_jobs=1, append=False, key=None):
"""Resample point data in source to target surface.
Parameters
----------
source : vtkPolyData or BSPolyData
Source surface.
target : vtkPolyData or BSPolyData
Target surface.
data : str, 1D ndarray or list or str and ndarray
Point data in source surface to resample.
is_sphere : bool, optional
If True, assume source and target are provided as spheres that are
aligned. Default is False.
source_mask : str or 1D ndarray, optional
Boolean mask. If str, it must be in the point data attributes of
`source`. Default is None. If specified, only consider points within
the mask.
target_mask : str or 1D ndarray, optional
Boolean mask. If str, it must be in the point data attributes of
`target`. Default is None. If specified, only consider points within
the mask.
red_func : {'mean', 'weighted_mean', 'mode', 'weighted_mode'}, optional
Reduction function. Default is 'mean'.
k : int, optional
Number of closest points to consider during resampling.
Only used when ``is_sphere==False``. Default is 3.
fill : int or float, optional
Value used for entries out of the mask. Only used if the
`target_mask` is provided. Default is 0.
n_jobs : int, optional
Number of parallel jobs. Only used when ``is_sphere==False``.
Default is 1.
append: bool, optional
If True, append array to point data attributes of target surface and
return surface. Otherwise, only return resampled arrays.
Default is False.
key : str or list of str, optional
Array names to append to target's point data attributes. Only used if
``append == True``. If None, use names in `source_name`.
Default is None.
Returns
-------
output : vtkPolyData, BSPolyData or list of ndarray
Resampled point data. Return ndarray or list of ndarray if
``append == False``. Otherwise, return target surface with the
new arrays.
Notes
-----
This function is meant for the same source and target surfaces but with
different number of points. For other types of resampling, see
vtkResampleWithDataSet.
"""
opt = ['mean', 'mode', 'weighted_mean', 'weighted_mode']
if n_jobs != 1:
warnings.warn('The n_jobs parameter is deprecated and will be removed in a future version', DeprecationWarning)
is_list = True
if not isinstance(data, list):
data = [data]
is_list = False
if isinstance(red_func, str):
red_func = [red_func] * len(data)
if isinstance(source_mask, str):
source_mask = source.PointData[source_mask]
if isinstance(target_mask, str):
target_mask = source.PointData[target_mask]
if not is_sphere:
use_weights = False
if k > 1 and np.isin(red_func, opt[2:]).any():
use_weights = True
<DeepExtract>
sp = me.get_points(source, mask=source_mask)
tp = me.get_points(target, mask=target_mask)
tree = KDTree(sp, leafsize=20)
(dist, pids) = tree.query(tp, k=k, eps=0)
if use_weights:
pids = (pids, 1 / dist)
pids = pids
</DeepExtract>
if use_weights:
(pids, w) = pids
else:
<DeepExtract>
c = vtkGenericCell()
(close_pt, pcoord) = np.empty((2, 3))
(cid, subcid, dist) = [vtk_mutable(0) for _ in range(3)]
if source_mask is not None:
gids = np.arange(source.n_points)
name_ids = source.append_array(gids, at='p')
source_masked = mask_points(source, source_mask)
source.remove_array(name_ids)
source = source_masked
if source.n_points != np.count_nonzero(source_mask):
raise ValueError('Source mask is not fully connected.')
celoc = vtkCellLocator()
celoc.SetDataSet(source.VTKObject)
celoc.BuildLocator()
tp = me.get_points(target, mask=target_mask)
n_pts = tp.shape[0]
weights = np.empty((n_pts, 3))
pids = np.empty((n_pts, 3), dtype=np.int64)
for (i, p) in enumerate(tp):
celoc.FindClosestPoint(p, close_pt, c, cid, subcid, dist)
c.EvaluatePosition(close_pt, close_pt, subcid, pcoord, dist, weights[i])
pids[i] = [c.GetPointIds().GetId(k) for k in range(3)]
if source_mask is not None:
gids = source.get_array(name_ids, at='p')
pids = np.unique(gids, return_inverse=True)[1][pids]
(pids, w) = (pids, weights)
</DeepExtract>
k = None
for (i, rf) in enumerate(red_func):
if rf in ['mean', 'mode']:
red_func[i] = 'weighted_%s' % rf
resampled = [None] * len(data)
for (i, d) in enumerate(data):
if isinstance(d, str):
d = source.PointData[d]
if source_mask is not None:
d = d[source_mask]
if k == 1:
feat = d[pids]
elif red_func[i] == 'mean':
feat = np.nanmean(d[pids], axis=1)
elif red_func[i] == 'weighted_mean':
feat = np.average(d[pids], weights=w, axis=1)
elif red_func[i] == 'mode':
feat = mode(d[pids], axis=1)[0].squeeze()
elif red_func[i] == 'weighted_mode':
feat = weighted_mode(d[pids], w, axis=1)[0].squeeze()
feat = feat.astype(d.dtype)
else:
raise ValueError('Unknown red_func: {0}'.format(red_func[i]))
if target_mask is not None:
feat = map_to_mask(feat, mask=target_mask, axis=1, fill=fill)
resampled[i] = feat
if append and key is not None:
for (i, feat) in enumerate(resampled):
target.append_array(feat, name=key[i], at='p')
return resampled if is_list else resampled[0]
|
@wrap_input(0, 1)
def resample_pointdata(source, target, data, is_sphere=False, source_mask=None, target_mask=None, red_func='mean', k=3, fill=0, n_jobs=1, append=False, key=None):
"""Resample point data in source to target surface.
Parameters
----------
source : vtkPolyData or BSPolyData
Source surface.
target : vtkPolyData or BSPolyData
Target surface.
data : str, 1D ndarray or list or str and ndarray
Point data in source surface to resample.
is_sphere : bool, optional
If True, assume source and target are provided as spheres that are
aligned. Default is False.
source_mask : str or 1D ndarray, optional
Boolean mask. If str, it must be in the point data attributes of
`source`. Default is None. If specified, only consider points within
the mask.
target_mask : str or 1D ndarray, optional
Boolean mask. If str, it must be in the point data attributes of
`target`. Default is None. If specified, only consider points within
the mask.
red_func : {'mean', 'weighted_mean', 'mode', 'weighted_mode'}, optional
Reduction function. Default is 'mean'.
k : int, optional
Number of closest points to consider during resampling.
Only used when ``is_sphere==False``. Default is 3.
fill : int or float, optional
Value used for entries out of the mask. Only used if the
`target_mask` is provided. Default is 0.
n_jobs : int, optional
Number of parallel jobs. Only used when ``is_sphere==False``.
Default is 1.
append: bool, optional
If True, append array to point data attributes of target surface and
return surface. Otherwise, only return resampled arrays.
Default is False.
key : str or list of str, optional
Array names to append to target's point data attributes. Only used if
``append == True``. If None, use names in `source_name`.
Default is None.
Returns
-------
output : vtkPolyData, BSPolyData or list of ndarray
Resampled point data. Return ndarray or list of ndarray if
``append == False``. Otherwise, return target surface with the
new arrays.
Notes
-----
This function is meant for the same source and target surfaces but with
different number of points. For other types of resampling, see
vtkResampleWithDataSet.
"""
opt = ['mean', 'mode', 'weighted_mean', 'weighted_mode']
if n_jobs != 1:
warnings.warn('The n_jobs parameter is deprecated and will be removed in a future version', DeprecationWarning)
is_list = True
if not isinstance(data, list):
data = [data]
is_list = False
if isinstance(red_func, str):
red_func = [red_func] * len(data)
if isinstance(source_mask, str):
source_mask = source.PointData[source_mask]
if isinstance(target_mask, str):
target_mask = source.PointData[target_mask]
if not is_sphere:
use_weights = False
if k > 1 and np.isin(red_func, opt[2:]).any():
use_weights = True
sp = me.get_points(source, mask=source_mask)
tp = me.get_points(target, mask=target_mask)
tree = KDTree(sp, leafsize=20)
(dist, pids) = tree.query(tp, k=k, eps=0)
if use_weights:
pids = (pids, 1 / dist)
pids = pids
if use_weights:
(pids, w) = pids
else:
c = vtkGenericCell()
(close_pt, pcoord) = np.empty((2, 3))
(cid, subcid, dist) = [vtk_mutable(0) for _ in range(3)]
if source_mask is not None:
gids = np.arange(source.n_points)
name_ids = source.append_array(gids, at='p')
source_masked = mask_points(source, source_mask)
source.remove_array(name_ids)
source = source_masked
if source.n_points != np.count_nonzero(source_mask):
raise ValueError('Source mask is not fully connected.')
celoc = vtkCellLocator()
celoc.SetDataSet(source.VTKObject)
celoc.BuildLocator()
tp = me.get_points(target, mask=target_mask)
n_pts = tp.shape[0]
weights = np.empty((n_pts, 3))
pids = np.empty((n_pts, 3), dtype=np.int64)
for (i, p) in enumerate(tp):
celoc.FindClosestPoint(p, close_pt, c, cid, subcid, dist)
c.EvaluatePosition(close_pt, close_pt, subcid, pcoord, dist, weights[i])
pids[i] = [c.GetPointIds().GetId(k) for k in range(3)]
if source_mask is not None:
gids = source.get_array(name_ids, at='p')
pids = np.unique(gids, return_inverse=True)[1][pids]
(pids, w) = (pids, weights)
k = None
for (i, rf) in enumerate(red_func):
if rf in ['mean', 'mode']:
red_func[i] = 'weighted_%s' % rf
resampled = [None] * len(data)
for (i, d) in enumerate(data):
if isinstance(d, str):
d = source.PointData[d]
if source_mask is not None:
d = d[source_mask]
if k == 1:
feat = d[pids]
elif red_func[i] == 'mean':
feat = np.nanmean(d[pids], axis=1)
elif red_func[i] == 'weighted_mean':
feat = np.average(d[pids], weights=w, axis=1)
elif red_func[i] == 'mode':
feat = mode(d[pids], axis=1)[0].squeeze()
elif red_func[i] == 'weighted_mode':
feat = weighted_mode(d[pids], w, axis=1)[0].squeeze()
feat = feat.astype(d.dtype)
else:
raise ValueError('Unknown red_func: {0}'.format(red_func[i]))
if target_mask is not None:
feat = map_to_mask(feat, mask=target_mask, axis=1, fill=fill)
resampled[i] = feat
if append and key is not None:
for (i, feat) in enumerate(resampled):
target.append_array(feat, name=key[i], at='p')
return resampled if is_list else resampled[0]
|
BrainSpace
|
positive
|
def test_estimated_departure_matches_real(self):
<DeepExtract>
if True:
allowable: List[np.ndarray] = [np.array([0, 32])] * 2
else:
allowable: List[np.ndarray] = [np.array([0] + list(range(8, 33)))] * 2
if remaining_energy is None:
remaining_energy: List[float] = [3.3, 3.3]
network: InfrastructureDict = single_phase_single_constraint(2, 100, allowable_pilots=allowable, is_continuous=np.array([True] * 2))
sessions: List[SessionDict] = session_generator(num_sessions=2, arrivals=[0] * 2, departures=[11, 12], estimated_departures=[11, 12], requested_energy=[3.3] * 2, remaining_energy=remaining_energy, min_rates=[session_min_rate] * 2, max_rates=[32] * 2)
data = {'active_sessions': sessions, 'infrastructure_info': network, 'current_time': CURRENT_TIME, 'period': PERIOD}
interface = TestingInterface(data)
</DeepExtract>
sorted_sessions = least_laxity_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, '0')
self.assertEqual(sorted_sessions[1].session_id, '1')
|
def test_estimated_departure_matches_real(self):
if True:
allowable: List[np.ndarray] = [np.array([0, 32])] * 2
else:
allowable: List[np.ndarray] = [np.array([0] + list(range(8, 33)))] * 2
if remaining_energy is None:
remaining_energy: List[float] = [3.3, 3.3]
network: InfrastructureDict = single_phase_single_constraint(2, 100, allowable_pilots=allowable, is_continuous=np.array([True] * 2))
sessions: List[SessionDict] = session_generator(num_sessions=2, arrivals=[0] * 2, departures=[11, 12], estimated_departures=[11, 12], requested_energy=[3.3] * 2, remaining_energy=remaining_energy, min_rates=[session_min_rate] * 2, max_rates=[32] * 2)
data = {'active_sessions': sessions, 'infrastructure_info': network, 'current_time': CURRENT_TIME, 'period': PERIOD}
interface = TestingInterface(data)
sorted_sessions = least_laxity_first(interface.active_sessions(), interface)
self.assertEqual(sorted_sessions[0].session_id, '0')
self.assertEqual(sorted_sessions[1].session_id, '1')
|
acnportal
|
positive
|
def _stringify_header(self, options):
bits = []
<DeepExtract>
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
(lpad, rpad) = (lpad, rpad)
</DeepExtract>
if options['border']:
if options['hrules'] in (ALL, FRAME):
bits.append(self._hrule)
bits.append('\n')
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
if not self._field_names:
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
for (field, width) in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
if self._header_style == 'cap':
fieldname = field.capitalize()
elif self._header_style == 'title':
fieldname = field.title()
elif self._header_style == 'upper':
fieldname = field.upper()
elif self._header_style == 'lower':
fieldname = field.lower()
else:
fieldname = field
bits.append(' ' * lpad + self._justify(fieldname, width, self._align[field]) + ' ' * rpad)
if options['border']:
if options['vrules'] == ALL:
bits.append(options['vertical_char'])
else:
bits.append(' ')
if options['border'] and options['vrules'] == FRAME:
bits.pop()
bits.append(options['vertical_char'])
if options['border'] and options['hrules'] != NONE:
bits.append('\n')
bits.append(self._hrule)
return ''.join(bits)
|
def _stringify_header(self, options):
bits = []
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
(lpad, rpad) = (lpad, rpad)
if options['border']:
if options['hrules'] in (ALL, FRAME):
bits.append(self._hrule)
bits.append('\n')
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
if not self._field_names:
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
for (field, width) in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
if self._header_style == 'cap':
fieldname = field.capitalize()
elif self._header_style == 'title':
fieldname = field.title()
elif self._header_style == 'upper':
fieldname = field.upper()
elif self._header_style == 'lower':
fieldname = field.lower()
else:
fieldname = field
bits.append(' ' * lpad + self._justify(fieldname, width, self._align[field]) + ' ' * rpad)
if options['border']:
if options['vrules'] == ALL:
bits.append(options['vertical_char'])
else:
bits.append(' ')
if options['border'] and options['vrules'] == FRAME:
bits.pop()
bits.append(options['vertical_char'])
if options['border'] and options['hrules'] != NONE:
bits.append('\n')
bits.append(self._hrule)
return ''.join(bits)
|
C--Compiler
|
positive
|
def write(self, data):
<DeepExtract>
if self.file.tell() + len(data) > self.max_file_size:
self.close()
self.file = self.open(next(self.nextFile))
</DeepExtract>
self.file.write(data)
|
def write(self, data):
if self.file.tell() + len(data) > self.max_file_size:
self.close()
self.file = self.open(next(self.nextFile))
self.file.write(data)
|
DistillBERT
|
positive
|
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
<DeepExtract>
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(orig_text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
(orig_ns_text, orig_ns_to_s_map) = (ns_text, ns_to_s_map)
</DeepExtract>
<DeepExtract>
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(tok_text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
(tok_ns_text, tok_ns_to_s_map) = (ns_text, ns_to_s_map)
</DeepExtract>
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:orig_end_position + 1]
return output_text
|
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = ' '.join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(orig_text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
(orig_ns_text, orig_ns_to_s_map) = (ns_text, ns_to_s_map)
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(tok_text):
if c == ' ':
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = ''.join(ns_chars)
(tok_ns_text, tok_ns_to_s_map) = (ns_text, ns_to_s_map)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:orig_end_position + 1]
return output_text
|
coref
|
positive
|
def get_public_key():
<DeepExtract>
keys = cache.get('RSA_KEYS')
if keys is None:
keys = generate_RSA()
cache.set('RSA_KEYS', keys, 3600)
(_, pub) = keys
</DeepExtract>
return pub.decode()
|
def get_public_key():
keys = cache.get('RSA_KEYS')
if keys is None:
keys = generate_RSA()
cache.set('RSA_KEYS', keys, 3600)
(_, pub) = keys
return pub.decode()
|
eoj3
|
positive
|
def test_date_search_from_items(self):
<DeepExtract>
response = self.app.get('collections/{}/items?bbox=0,43,1,44'.format(self.tested_product_type), follow_redirects=True)
self.assertEqual(200, response.status_code)
result1 = geojson.loads(response.data.decode('utf-8'))
</DeepExtract>
<DeepExtract>
response = self.app.get('collections/{}/items?bbox=0,43,1,44&datetime=2018-01-20/2018-01-25'.format(self.tested_product_type), follow_redirects=True)
self.assertEqual(200, response.status_code)
result2 = geojson.loads(response.data.decode('utf-8'))
</DeepExtract>
self.assertGreaterEqual(len(result1.features), len(result2.features))
|
def test_date_search_from_items(self):
response = self.app.get('collections/{}/items?bbox=0,43,1,44'.format(self.tested_product_type), follow_redirects=True)
self.assertEqual(200, response.status_code)
result1 = geojson.loads(response.data.decode('utf-8'))
response = self.app.get('collections/{}/items?bbox=0,43,1,44&datetime=2018-01-20/2018-01-25'.format(self.tested_product_type), follow_redirects=True)
self.assertEqual(200, response.status_code)
result2 = geojson.loads(response.data.decode('utf-8'))
self.assertGreaterEqual(len(result1.features), len(result2.features))
|
eodag
|
positive
|
def median_filter(tr, multiplier=10, windowlength=0.5, interp_len=0.05):
"""
Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution.
"""
num_cores = cpu_count()
filt = tr.copy()
filt.detrend('linear')
try:
filt.filter('bandpass', freqmin=10.0, freqmax=tr.stats.sampling_rate / 2 - 1)
except Exception as e:
Logger.error('Could not filter due to error: {0}'.format(e))
data = filt.data
del filt
_windowlength = int(windowlength * tr.stats.sampling_rate)
_interp_len = int(interp_len * tr.stats.sampling_rate)
peaks = []
with Timer() as t:
pool = Pool(processes=num_cores)
results = [pool.apply_async(_median_window, args=(data[chunk * _windowlength:(chunk + 1) * _windowlength], chunk * _windowlength, multiplier, tr.stats.starttime + windowlength, tr.stats.sampling_rate)) for chunk in range(int(len(data) / _windowlength))]
pool.close()
for p in results:
peaks += p.get()
pool.join()
for peak in peaks:
<DeepExtract>
start_loc = peak[1] - int(0.5 * _interp_len)
end_loc = peak[1] + int(0.5 * _interp_len)
if start_loc < 0:
start_loc = 0
if end_loc > len(tr.data) - 1:
end_loc = len(tr.data) - 1
fill = np.linspace(tr.data[start_loc], tr.data[end_loc], end_loc - start_loc)
tr.data[start_loc:end_loc] = fill
tr.data = tr.data
</DeepExtract>
Logger.debug('Despiking took: %s s' % t.secs)
return tr
|
def median_filter(tr, multiplier=10, windowlength=0.5, interp_len=0.05):
"""
Filter out spikes in data above a multiple of MAD of the data.
Currently only has the ability to replaces spikes with linear
interpolation. In the future we would aim to fill the gap with something
more appropriate. Works in-place on data.
:type tr: obspy.core.trace.Trace
:param tr: trace to despike
:type multiplier: float
:param multiplier:
median absolute deviation multiplier to find spikes above.
:type windowlength: float
:param windowlength: Length of window to look for spikes in in seconds.
:type interp_len: float
:param interp_len: Length in seconds to interpolate around spikes.
:returns: :class:`obspy.core.trace.Trace`
.. warning::
Not particularly effective, and may remove earthquake signals, use with
caution.
"""
num_cores = cpu_count()
filt = tr.copy()
filt.detrend('linear')
try:
filt.filter('bandpass', freqmin=10.0, freqmax=tr.stats.sampling_rate / 2 - 1)
except Exception as e:
Logger.error('Could not filter due to error: {0}'.format(e))
data = filt.data
del filt
_windowlength = int(windowlength * tr.stats.sampling_rate)
_interp_len = int(interp_len * tr.stats.sampling_rate)
peaks = []
with Timer() as t:
pool = Pool(processes=num_cores)
results = [pool.apply_async(_median_window, args=(data[chunk * _windowlength:(chunk + 1) * _windowlength], chunk * _windowlength, multiplier, tr.stats.starttime + windowlength, tr.stats.sampling_rate)) for chunk in range(int(len(data) / _windowlength))]
pool.close()
for p in results:
peaks += p.get()
pool.join()
for peak in peaks:
start_loc = peak[1] - int(0.5 * _interp_len)
end_loc = peak[1] + int(0.5 * _interp_len)
if start_loc < 0:
start_loc = 0
if end_loc > len(tr.data) - 1:
end_loc = len(tr.data) - 1
fill = np.linspace(tr.data[start_loc], tr.data[end_loc], end_loc - start_loc)
tr.data[start_loc:end_loc] = fill
tr.data = tr.data
Logger.debug('Despiking took: %s s' % t.secs)
return tr
|
EQcorrscan
|
positive
|
def code_complete(self, contents_filename, original_filename, line, column, complete_macros=False, complete_lang_constructs=False, opts=None):
(tunit, tunit_build_flags, tunit_timestamp) = self.tunit_cache.fetch(original_filename)
if tunit is None:
build_flags = self.compiler_args.get(original_filename)
<DeepExtract>
try:
tunit = self.index.parse(path=contents_filename, args=build_flags, options=self.default_parsing_flags() if opts is None else opts)
except:
logging.error(sys.exc_info())
</DeepExtract>
if tunit:
self.tunit_cache.insert(original_filename, tunit, build_flags, os.path.getmtime(original_filename))
else:
logging.error('Unable to parse TUnit!')
with open(contents_filename) as f:
return tunit.codeComplete(tunit.spelling, line, column + 1, include_macros=complete_macros, include_code_patterns=complete_lang_constructs, unsaved_files=[(original_filename, f.read())])
|
def code_complete(self, contents_filename, original_filename, line, column, complete_macros=False, complete_lang_constructs=False, opts=None):
(tunit, tunit_build_flags, tunit_timestamp) = self.tunit_cache.fetch(original_filename)
if tunit is None:
build_flags = self.compiler_args.get(original_filename)
try:
tunit = self.index.parse(path=contents_filename, args=build_flags, options=self.default_parsing_flags() if opts is None else opts)
except:
logging.error(sys.exc_info())
if tunit:
self.tunit_cache.insert(original_filename, tunit, build_flags, os.path.getmtime(original_filename))
else:
logging.error('Unable to parse TUnit!')
with open(contents_filename) as f:
return tunit.codeComplete(tunit.spelling, line, column + 1, include_macros=complete_macros, include_code_patterns=complete_lang_constructs, unsaved_files=[(original_filename, f.read())])
|
cxxd
|
positive
|
def _filter_event_tensor(self, events_tensor: torch.Tensor, segments_tensor: torch.Tensor, segment_label_name: str):
"""
Filter segment tensor
:param events_tensor:
:param segment_label_name:
"""
if segment_label_name is not None and events_tensor.size(0) > 0:
for event_i in range(events_tensor.size(0)):
<DeepExtract>
segment_label_index = self.segment_label_name_to_index(segment_label_name)
time_pos = 0
for segment_i in range(segments_tensor.size(0)):
segment_start = segments_tensor[segment_i, 0]
segment_end = segments_tensor[segment_i, 1]
segment_label = segments_tensor[segment_i, 2]
segment_length = segment_end - segment_start
if segment_label == segment_label_index:
if segment_start <= events_tensor[event_i][0] <= segment_end and segment_start <= events_tensor[event_i][1] <= segment_end:
events_tensor[event_i][0] = time_pos + (events_tensor[event_i][0] - segment_start)
events_tensor[event_i][1] = time_pos + (events_tensor[event_i][1] - segment_start)
events_tensor[event_i] = events_tensor[event_i]
time_pos += segment_length
events_tensor[event_i][2] = -1
events_tensor[event_i] = events_tensor[event_i]
</DeepExtract>
return events_tensor[events_tensor[:, 2] != -1]
else:
return events_tensor
|
def _filter_event_tensor(self, events_tensor: torch.Tensor, segments_tensor: torch.Tensor, segment_label_name: str):
"""
Filter segment tensor
:param events_tensor:
:param segment_label_name:
"""
if segment_label_name is not None and events_tensor.size(0) > 0:
for event_i in range(events_tensor.size(0)):
segment_label_index = self.segment_label_name_to_index(segment_label_name)
time_pos = 0
for segment_i in range(segments_tensor.size(0)):
segment_start = segments_tensor[segment_i, 0]
segment_end = segments_tensor[segment_i, 1]
segment_label = segments_tensor[segment_i, 2]
segment_length = segment_end - segment_start
if segment_label == segment_label_index:
if segment_start <= events_tensor[event_i][0] <= segment_end and segment_start <= events_tensor[event_i][1] <= segment_end:
events_tensor[event_i][0] = time_pos + (events_tensor[event_i][0] - segment_start)
events_tensor[event_i][1] = time_pos + (events_tensor[event_i][1] - segment_start)
events_tensor[event_i] = events_tensor[event_i]
time_pos += segment_length
events_tensor[event_i][2] = -1
events_tensor[event_i] = events_tensor[event_i]
return events_tensor[events_tensor[:, 2] != -1]
else:
return events_tensor
|
EchoTorch
|
positive
|
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
<DeepExtract>
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
pos = WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
</DeepExtract>
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start, message_end) != message_end:
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos]))
return pos
return DecodeItem
|
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
pos = WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start, message_end) != message_end:
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos]))
return pos
return DecodeItem
|
botchallenge
|
positive
|
def _rps(forecast: xr.Dataset, verif: xr.Dataset, dim: dimType=None, **metric_kwargs: metric_kwargsType) -> xr.Dataset:
"""Ranked Probability Score.
.. math::
RPS(p, k) = \\sum_{m=1}^{M} [(\\sum_{k=1}^{m} p_k) - (\\sum_{k=1}^{m} \\
o_k)]^{2}
Args:
forecast: Forecasts.
verif: Verification.
dim: Dimensions to aggregate.
**metric_kwargs, see :py:func:`.xskillscore.rps`
.. note::
If ``category_edges`` is xr.Dataset or tuple of xr.Datasets, climpred will
broadcast the grouped dimensions ``season``, ``month``, ``weekofyear``,
``dayfofyear`` onto the dimensions ``init`` for forecast and ``time`` for
observations. see ``climpred.utils.broadcast_time_grouped_to_time``.
Notes:
+-----------------+-----------+
| **minimum** | 0.0 |
+-----------------+-----------+
| **maximum** | ∞ |
+-----------------+-----------+
| **perfect** | 0.0 |
+-----------------+-----------+
| **orientation** | negative |
+-----------------+-----------+
See also:
* :py:func:`.xskillscore.rps`
Example:
>>> category_edges = np.array([-0.5, 0.0, 0.5, 1.0])
>>> HindcastEnsemble.verify(
... metric="rps",
... comparison="m2o",
... dim=["member", "init"],
... alignment="same_verifs",
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 10)
Coordinates:
* lead (lead) int32 1 2 3 4 5 6 7 8 9 10
observations_category_edge <U67 '[-np.inf, -0.5), [-0.5, 0.0), [0.0, 0.5...
forecasts_category_edge <U67 '[-np.inf, -0.5), [-0.5, 0.0), [0.0, 0.5...
skill <U11 'initialized'
Data variables:
SST (lead) float64 0.115 0.1123 ... 0.1687 0.1875
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: HindcastEnsemble.verify()
number_of_initializations: 64
number_of_members: 10
alignment: same_verifs
metric: rps
comparison: m2o
dim: ['member', 'init']
reference: []
category_edges: [-0.5 0. 0.5 1. ]
Provide ``category_edges`` as :py:class:`xarray.Dataset` for category edges
varying along dimensions.
>>> category_edges = (
... xr.DataArray([9.5, 10.0, 10.5, 11.0], dims="category_edge")
... .assign_coords(category_edge=[9.5, 10.0, 10.5, 11.0])
... .to_dataset(name="tos")
... )
>>> # category_edges = np.array([9.5, 10., 10.5, 11.]) # identical
>>> PerfectModelEnsemble.verify(
... metric="rps",
... comparison="m2c",
... dim=["member", "init"],
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 20)
Coordinates:
* lead (lead) int64 1 2 3 4 5 6 7 ... 15 16 17 18 19 20
observations_category_edge <U71 '[-np.inf, 9.5), [9.5, 10.0), [10.0, 10....
forecasts_category_edge <U71 '[-np.inf, 9.5), [9.5, 10.0), [10.0, 10....
Data variables:
tos (lead) float64 0.08951 0.1615 ... 0.1399 0.2274
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: PerfectModelEnsemble.verify()
number_of_initializations: 12
number_of_members: 10
metric: rps
comparison: m2c
dim: ['member', 'init']
reference: []
category_edges: <xarray.Dataset>\\nDimensions: (cate...
Provide ``category_edges`` as tuple for different category edges to categorize
forecasts and observations.
>>> q = [1 / 3, 2 / 3] # terciles by month
>>> forecast_edges = (
... HindcastEnsemble.get_initialized()
... .groupby("init.month")
... .quantile(q=q, dim=["init", "member"])
... .rename({"quantile": "category_edge"})
... )
>>> obs_edges = (
... HindcastEnsemble.get_observations()
... .groupby("time.month")
... .quantile(q=q, dim="time")
... .rename({"quantile": "category_edge"})
... )
>>> category_edges = (obs_edges, forecast_edges)
>>> HindcastEnsemble.verify(
... metric="rps",
... comparison="m2o",
... dim=["member", "init"],
... alignment="same_verifs",
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 10)
Coordinates:
* lead (lead) int32 1 2 3 4 5 6 7 8 9 10
observations_category_edge <U101 '[-np.inf, 0.3333333333333333), [0.3333...
forecasts_category_edge <U101 '[-np.inf, 0.3333333333333333), [0.3333...
skill <U11 'initialized'
Data variables:
SST (lead) float64 0.1248 0.1756 ... 0.3081 0.3413
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: HindcastEnsemble.verify()
number_of_initializations: 64
number_of_members: 10
alignment: same_verifs
metric: rps
comparison: m2o
dim: ['member', 'init']
reference: []
category_edges: (<xarray.Dataset>\\nDimensions: (mon...
"""
if 'category_edges' in metric_kwargs:
category_edges = metric_kwargs.pop('category_edges')
else:
category_edges = None
if category_edges is not None and 'member' in forecast.dims:
<DeepExtract>
dim = _preprocess_dims(dim)
if 'member' in dim:
dim = dim.copy()
dim.remove('member')
else:
raise ValueError(f'Expected to find `member` in `dim`, found {dim}')
dim = dim
</DeepExtract>
elif 'category' in forecast.dims and 'category' in verif.dims:
pass
else:
raise ValueError(f'rps either expects multiple forecast members and `category_edges` or `category` in both forecast and observations. Found: category_edges={category_edges}, forecast.dims = {forecast.dims}, observations.dims = {verif.dims}')
if 'lead' not in forecast.dims and 'lead' in forecast.coords and 'lead':
if isinstance(category_edges, tuple):
if 'lead' in category_edges[1].dims:
forecast_edges = category_edges[1].sel(lead=forecast.lead).rename({'init': 'time'})
from climpred.utils import my_shift
forecast_edges['time'] = my_shift(forecast_edges.time, forecast.lead)
forecast_edges = forecast_edges.sel(time=forecast.time)
forecast_edges = forecast_edges.assign_coords(time=forecast.time)
verif_edges = category_edges[0]
category_edges = (verif_edges, forecast_edges)
return rps(verif, forecast, category_edges, dim=dim, **metric_kwargs)
|
def _rps(forecast: xr.Dataset, verif: xr.Dataset, dim: dimType=None, **metric_kwargs: metric_kwargsType) -> xr.Dataset:
"""Ranked Probability Score.
.. math::
RPS(p, k) = \\sum_{m=1}^{M} [(\\sum_{k=1}^{m} p_k) - (\\sum_{k=1}^{m} \\
o_k)]^{2}
Args:
forecast: Forecasts.
verif: Verification.
dim: Dimensions to aggregate.
**metric_kwargs, see :py:func:`.xskillscore.rps`
.. note::
If ``category_edges`` is xr.Dataset or tuple of xr.Datasets, climpred will
broadcast the grouped dimensions ``season``, ``month``, ``weekofyear``,
``dayfofyear`` onto the dimensions ``init`` for forecast and ``time`` for
observations. see ``climpred.utils.broadcast_time_grouped_to_time``.
Notes:
+-----------------+-----------+
| **minimum** | 0.0 |
+-----------------+-----------+
| **maximum** | ∞ |
+-----------------+-----------+
| **perfect** | 0.0 |
+-----------------+-----------+
| **orientation** | negative |
+-----------------+-----------+
See also:
* :py:func:`.xskillscore.rps`
Example:
>>> category_edges = np.array([-0.5, 0.0, 0.5, 1.0])
>>> HindcastEnsemble.verify(
... metric="rps",
... comparison="m2o",
... dim=["member", "init"],
... alignment="same_verifs",
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 10)
Coordinates:
* lead (lead) int32 1 2 3 4 5 6 7 8 9 10
observations_category_edge <U67 '[-np.inf, -0.5), [-0.5, 0.0), [0.0, 0.5...
forecasts_category_edge <U67 '[-np.inf, -0.5), [-0.5, 0.0), [0.0, 0.5...
skill <U11 'initialized'
Data variables:
SST (lead) float64 0.115 0.1123 ... 0.1687 0.1875
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: HindcastEnsemble.verify()
number_of_initializations: 64
number_of_members: 10
alignment: same_verifs
metric: rps
comparison: m2o
dim: ['member', 'init']
reference: []
category_edges: [-0.5 0. 0.5 1. ]
Provide ``category_edges`` as :py:class:`xarray.Dataset` for category edges
varying along dimensions.
>>> category_edges = (
... xr.DataArray([9.5, 10.0, 10.5, 11.0], dims="category_edge")
... .assign_coords(category_edge=[9.5, 10.0, 10.5, 11.0])
... .to_dataset(name="tos")
... )
>>> # category_edges = np.array([9.5, 10., 10.5, 11.]) # identical
>>> PerfectModelEnsemble.verify(
... metric="rps",
... comparison="m2c",
... dim=["member", "init"],
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 20)
Coordinates:
* lead (lead) int64 1 2 3 4 5 6 7 ... 15 16 17 18 19 20
observations_category_edge <U71 '[-np.inf, 9.5), [9.5, 10.0), [10.0, 10....
forecasts_category_edge <U71 '[-np.inf, 9.5), [9.5, 10.0), [10.0, 10....
Data variables:
tos (lead) float64 0.08951 0.1615 ... 0.1399 0.2274
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: PerfectModelEnsemble.verify()
number_of_initializations: 12
number_of_members: 10
metric: rps
comparison: m2c
dim: ['member', 'init']
reference: []
category_edges: <xarray.Dataset>\\nDimensions: (cate...
Provide ``category_edges`` as tuple for different category edges to categorize
forecasts and observations.
>>> q = [1 / 3, 2 / 3] # terciles by month
>>> forecast_edges = (
... HindcastEnsemble.get_initialized()
... .groupby("init.month")
... .quantile(q=q, dim=["init", "member"])
... .rename({"quantile": "category_edge"})
... )
>>> obs_edges = (
... HindcastEnsemble.get_observations()
... .groupby("time.month")
... .quantile(q=q, dim="time")
... .rename({"quantile": "category_edge"})
... )
>>> category_edges = (obs_edges, forecast_edges)
>>> HindcastEnsemble.verify(
... metric="rps",
... comparison="m2o",
... dim=["member", "init"],
... alignment="same_verifs",
... category_edges=category_edges,
... )
<xarray.Dataset>
Dimensions: (lead: 10)
Coordinates:
* lead (lead) int32 1 2 3 4 5 6 7 8 9 10
observations_category_edge <U101 '[-np.inf, 0.3333333333333333), [0.3333...
forecasts_category_edge <U101 '[-np.inf, 0.3333333333333333), [0.3333...
skill <U11 'initialized'
Data variables:
SST (lead) float64 0.1248 0.1756 ... 0.3081 0.3413
Attributes:
prediction_skill_software: climpred https://climpred.readthedocs.io/
skill_calculated_by_function: HindcastEnsemble.verify()
number_of_initializations: 64
number_of_members: 10
alignment: same_verifs
metric: rps
comparison: m2o
dim: ['member', 'init']
reference: []
category_edges: (<xarray.Dataset>\\nDimensions: (mon...
"""
if 'category_edges' in metric_kwargs:
category_edges = metric_kwargs.pop('category_edges')
else:
category_edges = None
if category_edges is not None and 'member' in forecast.dims:
dim = _preprocess_dims(dim)
if 'member' in dim:
dim = dim.copy()
dim.remove('member')
else:
raise ValueError(f'Expected to find `member` in `dim`, found {dim}')
dim = dim
elif 'category' in forecast.dims and 'category' in verif.dims:
pass
else:
raise ValueError(f'rps either expects multiple forecast members and `category_edges` or `category` in both forecast and observations. Found: category_edges={category_edges}, forecast.dims = {forecast.dims}, observations.dims = {verif.dims}')
if 'lead' not in forecast.dims and 'lead' in forecast.coords and 'lead':
if isinstance(category_edges, tuple):
if 'lead' in category_edges[1].dims:
forecast_edges = category_edges[1].sel(lead=forecast.lead).rename({'init': 'time'})
from climpred.utils import my_shift
forecast_edges['time'] = my_shift(forecast_edges.time, forecast.lead)
forecast_edges = forecast_edges.sel(time=forecast.time)
forecast_edges = forecast_edges.assign_coords(time=forecast.time)
verif_edges = category_edges[0]
category_edges = (verif_edges, forecast_edges)
return rps(verif, forecast, category_edges, dim=dim, **metric_kwargs)
|
climpred
|
positive
|
@pytest.mark.parametrize('created_at', [None, now()])
@pytest.mark.parametrize('sequence_number', ['2', '140'])
def test_heap_item_clock(created_at, sequence_number):
"""heap_item guarantees total ordering, even for identical items."""
<DeepExtract>
shard = Mock(spec=Shard)
</DeepExtract>
<DeepExtract>
x = 0
def call():
nonlocal x
x += 1
clock = x
clock = call
</DeepExtract>
record = local_record(created_at, sequence_number)
first_item = heap_item(clock, record, shard)
second_item = heap_item(clock, record, shard)
assert first_item < second_item
assert first_item[1] == second_item[1]
assert clock() == 3
|
@pytest.mark.parametrize('created_at', [None, now()])
@pytest.mark.parametrize('sequence_number', ['2', '140'])
def test_heap_item_clock(created_at, sequence_number):
"""heap_item guarantees total ordering, even for identical items."""
shard = Mock(spec=Shard)
x = 0
def call():
nonlocal x
x += 1
clock = x
clock = call
record = local_record(created_at, sequence_number)
first_item = heap_item(clock, record, shard)
second_item = heap_item(clock, record, shard)
assert first_item < second_item
assert first_item[1] == second_item[1]
assert clock() == 3
|
bloop
|
positive
|
@patch('download.sys.exit')
@patch('download.os.path.exists')
@patch('download.open')
@patch('download.pycurl.Curl')
def test_download_write_fail_fatal(self, test_curl, test_open, test_path, test_exit):
"""
Test fatal failure to write to dest after successful GET request.
"""
<DeepExtract>
instance = test_curl.return_value
instance.URL = MockOpts.URL
instance.FOLLOWLOCATION = MockOpts.FOLLOWLOCATION
instance.FAILONERROR = MockOpts.FAILONERROR
instance.WRITEDATA = MockOpts.WRITEDATA
instance.POSTFIELDS = MockOpts.POSTFIELDS
instance = instance
</DeepExtract>
instance.setopt.side_effect = test_opts
test_open.side_effect = IOError
test_path.return_value = None
data = download.do_curl('foo', 'testdest', is_fatal=True)
test_exit.assert_called_once_with(1)
|
@patch('download.sys.exit')
@patch('download.os.path.exists')
@patch('download.open')
@patch('download.pycurl.Curl')
def test_download_write_fail_fatal(self, test_curl, test_open, test_path, test_exit):
"""
Test fatal failure to write to dest after successful GET request.
"""
instance = test_curl.return_value
instance.URL = MockOpts.URL
instance.FOLLOWLOCATION = MockOpts.FOLLOWLOCATION
instance.FAILONERROR = MockOpts.FAILONERROR
instance.WRITEDATA = MockOpts.WRITEDATA
instance.POSTFIELDS = MockOpts.POSTFIELDS
instance = instance
instance.setopt.side_effect = test_opts
test_open.side_effect = IOError
test_path.return_value = None
data = download.do_curl('foo', 'testdest', is_fatal=True)
test_exit.assert_called_once_with(1)
|
autospec
|
positive
|
def get_psf_sky2pix(self, ra, dec):
"""
Determine the psf (a,b,pa) at a given sky location. The psf is in pixel
coordinates.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
a, b, pa : (float, float, float)
The psf semi-major axis (pixels), semi-minor axis (pixels), and
rotation angle (degrees). If a psf is defined then it is the psf
that is returned, otherwise the image restoring beam is returned.
"""
if self.psf_file is None:
return (self._psf_a, self._psf_b, self._psf_theta)
<DeepExtract>
if self.psf_file is None:
(x, y) = self.sky2pix((ra, dec))
(_, _, a, b, pa) = self.pix2sky_ellipse((x, y), self._psf_a, self._psf_b, self._psf_theta)
psf_sky = (a, b, pa)
(x, y) = self.psf_sky2pix((ra, dec))
log.debug('sky2sky {0}, {1}, {2}, {3}'.format(ra, dec, x, y))
x = int(np.clip(x, 0, self.psf_map.shape[1] - 1))
y = int(np.clip(y, 0, self.psf_map.shape[2] - 1))
psf_sky = self.psf_map[:3, x, y]
psf_sky = psf_sky
</DeepExtract>
psf_pix = self.sky2pix_ellipse((ra, dec), psf_sky[0], psf_sky[1], psf_sky[2])[2:]
return psf_pix
|
def get_psf_sky2pix(self, ra, dec):
"""
Determine the psf (a,b,pa) at a given sky location. The psf is in pixel
coordinates.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
a, b, pa : (float, float, float)
The psf semi-major axis (pixels), semi-minor axis (pixels), and
rotation angle (degrees). If a psf is defined then it is the psf
that is returned, otherwise the image restoring beam is returned.
"""
if self.psf_file is None:
return (self._psf_a, self._psf_b, self._psf_theta)
if self.psf_file is None:
(x, y) = self.sky2pix((ra, dec))
(_, _, a, b, pa) = self.pix2sky_ellipse((x, y), self._psf_a, self._psf_b, self._psf_theta)
psf_sky = (a, b, pa)
(x, y) = self.psf_sky2pix((ra, dec))
log.debug('sky2sky {0}, {1}, {2}, {3}'.format(ra, dec, x, y))
x = int(np.clip(x, 0, self.psf_map.shape[1] - 1))
y = int(np.clip(y, 0, self.psf_map.shape[2] - 1))
psf_sky = self.psf_map[:3, x, y]
psf_sky = psf_sky
psf_pix = self.sky2pix_ellipse((ra, dec), psf_sky[0], psf_sky[1], psf_sky[2])[2:]
return psf_pix
|
Aegean
|
positive
|
def add_node(info):
try:
assert info['port'] > 0 and info['cpus'] > 0
except Exception:
return
node = self._nodes.get(info['ip_addr'], None)
if node is None:
logger.debug('Discovered %s:%s (%s) with %s cpus', info['ip_addr'], info['port'], info['name'], info['cpus'])
node = _Node(info['ip_addr'], info['port'], info['cpus'], info['sign'], self.node_secret, platform=info['platform'], keyfile=self.node_keyfile, certfile=self.node_certfile)
node.name = info['name']
node.avail_info = info['avail_info']
self._nodes[node.ip_addr] = node
else:
node.last_pulse = time.time()
auth = auth_code(self.node_secret, info['sign'])
if info['cpus'] > 0:
node.avail_cpus = info['cpus']
node.cpus = min(node.cpus, node.avail_cpus)
else:
logger.warning('Invalid "cpus" %s from %s ignored', info['cpus'], info['ip_addr'])
if node.port == info['port'] and node.auth == auth:
return
logger.debug('Node %s rediscovered', info['ip_addr'])
node.port = info['port']
if node.auth is not None:
dead_jobs = [_job for _job in self._sched_jobs.itervalues() if _job.node is not None and _job.node.ip_addr == node.ip_addr]
node.busy = 0
node.auth = auth
clusters = list(node.clusters)
node.clusters.clear()
for cluster in clusters:
dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None)
if not dispy_node:
continue
Task(self.send_node_status, cluster, dispy_node, DispyNode.Closed)
<DeepExtract>
if not dead_jobs:
return
for _job in dead_jobs:
self._sched_jobs.pop(_job.uid, None)
cluster = self._clusters.get(_job.compute_id, None)
if not cluster:
continue
if cluster._compute.reentrant and (not _job.pinned):
logger.debug('Rescheduling job %s from %s', _job.uid, _job.node.ip_addr)
_job.job.status = DispyJob.Created
_job.hash = os.urandom(10).encode('hex')
cluster._jobs.append(_job)
else:
logger.debug('Terminating job %s scheduled on %s', _job.uid, _job.node.ip_addr)
reply = _JobReply(_job, _job.node.ip_addr, status=DispyJob.Abandoned)
reply.result = serialize(None)
cluster.pending_jobs -= 1
if cluster.pending_jobs == 0:
cluster.end_time = time.time()
self.done_jobs[_job.uid] = _job
Task(self.send_job_result, _job.uid, cluster, reply, resending=False)
self._sched_event.set()
</DeepExtract>
node.auth = auth
setup_computations = []
node.name = info['name']
node.scheduler_ip_addr = info['scheduler_ip_addr']
for cluster in self._clusters.itervalues():
if cluster in node.clusters:
continue
compute = cluster._compute
for node_alloc in cluster._node_allocs:
cpus = node_alloc.allocate(cluster, node.ip_addr, node.name, node.avail_cpus)
if cpus <= 0:
continue
if cluster.exclusive or self.cooperative:
node.cpus = min(node.avail_cpus, cpus)
setup_computations.append((node_alloc.depends, node_alloc.setup_args, compute))
break
if setup_computations:
Task(self.setup_node, node, setup_computations)
|
def add_node(info):
try:
assert info['port'] > 0 and info['cpus'] > 0
except Exception:
return
node = self._nodes.get(info['ip_addr'], None)
if node is None:
logger.debug('Discovered %s:%s (%s) with %s cpus', info['ip_addr'], info['port'], info['name'], info['cpus'])
node = _Node(info['ip_addr'], info['port'], info['cpus'], info['sign'], self.node_secret, platform=info['platform'], keyfile=self.node_keyfile, certfile=self.node_certfile)
node.name = info['name']
node.avail_info = info['avail_info']
self._nodes[node.ip_addr] = node
else:
node.last_pulse = time.time()
auth = auth_code(self.node_secret, info['sign'])
if info['cpus'] > 0:
node.avail_cpus = info['cpus']
node.cpus = min(node.cpus, node.avail_cpus)
else:
logger.warning('Invalid "cpus" %s from %s ignored', info['cpus'], info['ip_addr'])
if node.port == info['port'] and node.auth == auth:
return
logger.debug('Node %s rediscovered', info['ip_addr'])
node.port = info['port']
if node.auth is not None:
dead_jobs = [_job for _job in self._sched_jobs.itervalues() if _job.node is not None and _job.node.ip_addr == node.ip_addr]
node.busy = 0
node.auth = auth
clusters = list(node.clusters)
node.clusters.clear()
for cluster in clusters:
dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None)
if not dispy_node:
continue
Task(self.send_node_status, cluster, dispy_node, DispyNode.Closed)
if not dead_jobs:
return
for _job in dead_jobs:
self._sched_jobs.pop(_job.uid, None)
cluster = self._clusters.get(_job.compute_id, None)
if not cluster:
continue
if cluster._compute.reentrant and (not _job.pinned):
logger.debug('Rescheduling job %s from %s', _job.uid, _job.node.ip_addr)
_job.job.status = DispyJob.Created
_job.hash = os.urandom(10).encode('hex')
cluster._jobs.append(_job)
else:
logger.debug('Terminating job %s scheduled on %s', _job.uid, _job.node.ip_addr)
reply = _JobReply(_job, _job.node.ip_addr, status=DispyJob.Abandoned)
reply.result = serialize(None)
cluster.pending_jobs -= 1
if cluster.pending_jobs == 0:
cluster.end_time = time.time()
self.done_jobs[_job.uid] = _job
Task(self.send_job_result, _job.uid, cluster, reply, resending=False)
self._sched_event.set()
node.auth = auth
setup_computations = []
node.name = info['name']
node.scheduler_ip_addr = info['scheduler_ip_addr']
for cluster in self._clusters.itervalues():
if cluster in node.clusters:
continue
compute = cluster._compute
for node_alloc in cluster._node_allocs:
cpus = node_alloc.allocate(cluster, node.ip_addr, node.name, node.avail_cpus)
if cpus <= 0:
continue
if cluster.exclusive or self.cooperative:
node.cpus = min(node.avail_cpus, cpus)
setup_computations.append((node_alloc.depends, node_alloc.setup_args, compute))
break
if setup_computations:
Task(self.setup_node, node, setup_computations)
|
dispy
|
positive
|
def __init__(self, *, mtype=None, mid=None, code=None, payload=b'', token=b'', uri=None, transport_tuning=None, **kwargs):
self.version = 1
if mtype is None:
self.mtype = None
else:
self.mtype = Type(mtype)
self.mid = mid
if code is None:
self.code = None
else:
self.code = Code(code)
self.token = token
self.payload = payload
self.opt = Options()
self.remote = None
self.transport_tuning = transport_tuning or TransportTuning()
if self.payload is None:
raise TypeError('Payload must not be None. Use empty string instead.')
if uri:
<DeepExtract>
parsed = urllib.parse.urlparse(uri)
if parsed.fragment:
raise ValueError('Fragment identifiers can not be set on a request URI')
if parsed.scheme not in coap_schemes:
self.opt.proxy_uri = uri
return
if parsed.username or parsed.password:
raise ValueError('User name and password not supported.')
if parsed.path not in ('', '/'):
self.opt.uri_path = [urllib.parse.unquote(x) for x in parsed.path.split('/')[1:]]
else:
self.opt.uri_path = []
if parsed.query:
self.opt.uri_query = [urllib.parse.unquote(x) for x in parsed.query.split('&')]
else:
self.opt.uri_query = []
self.remote = UndecidedRemote(parsed.scheme, parsed.netloc)
is_ip_literal = parsed.netloc.startswith('[') or (parsed.hostname.count('.') == 3 and all((c in '0123456789.' for c in parsed.hostname)) and all((int(x) <= 255 for x in parsed.hostname.split('.'))))
if set_uri_host and (not is_ip_literal):
self.opt.uri_host = urllib.parse.unquote(parsed.hostname).translate(_ascii_lowercase)
</DeepExtract>
for (k, v) in kwargs.items():
setattr(self.opt, k, v)
|
def __init__(self, *, mtype=None, mid=None, code=None, payload=b'', token=b'', uri=None, transport_tuning=None, **kwargs):
self.version = 1
if mtype is None:
self.mtype = None
else:
self.mtype = Type(mtype)
self.mid = mid
if code is None:
self.code = None
else:
self.code = Code(code)
self.token = token
self.payload = payload
self.opt = Options()
self.remote = None
self.transport_tuning = transport_tuning or TransportTuning()
if self.payload is None:
raise TypeError('Payload must not be None. Use empty string instead.')
if uri:
parsed = urllib.parse.urlparse(uri)
if parsed.fragment:
raise ValueError('Fragment identifiers can not be set on a request URI')
if parsed.scheme not in coap_schemes:
self.opt.proxy_uri = uri
return
if parsed.username or parsed.password:
raise ValueError('User name and password not supported.')
if parsed.path not in ('', '/'):
self.opt.uri_path = [urllib.parse.unquote(x) for x in parsed.path.split('/')[1:]]
else:
self.opt.uri_path = []
if parsed.query:
self.opt.uri_query = [urllib.parse.unquote(x) for x in parsed.query.split('&')]
else:
self.opt.uri_query = []
self.remote = UndecidedRemote(parsed.scheme, parsed.netloc)
is_ip_literal = parsed.netloc.startswith('[') or (parsed.hostname.count('.') == 3 and all((c in '0123456789.' for c in parsed.hostname)) and all((int(x) <= 255 for x in parsed.hostname.split('.'))))
if set_uri_host and (not is_ip_literal):
self.opt.uri_host = urllib.parse.unquote(parsed.hostname).translate(_ascii_lowercase)
for (k, v) in kwargs.items():
setattr(self.opt, k, v)
|
aiocoap
|
positive
|
def parse_contest_results(cvr_xml: ET.ElementTree):
results = defaultdict(set)
<DeepExtract>
contests = find(cvr_xml, 'Contests').findall(f"{{{namespace}}}{'Contest'}")
</DeepExtract>
for contest in contests:
contest_name = find(contest, 'Name').text
<DeepExtract>
choices = find(contest, 'Options').findall(f"{{{namespace}}}{'Option'}")
</DeepExtract>
for choice in choices:
if find(choice, 'WriteInData'):
choice_name = 'Write-In'
else:
choice_name = find(choice, 'Name').text
results[contest_name].add(choice_name)
return results
|
def parse_contest_results(cvr_xml: ET.ElementTree):
results = defaultdict(set)
contests = find(cvr_xml, 'Contests').findall(f"{{{namespace}}}{'Contest'}")
for contest in contests:
contest_name = find(contest, 'Name').text
choices = find(contest, 'Options').findall(f"{{{namespace}}}{'Option'}")
for choice in choices:
if find(choice, 'WriteInData'):
choice_name = 'Write-In'
else:
choice_name = find(choice, 'Name').text
results[contest_name].add(choice_name)
return results
|
arlo
|
positive
|
def get_output(layer_or_layers, inputs=None, **kwargs):
accepted_kwargs = {'deterministic'}
treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
<DeepExtract>
try:
queue = deque(layer_or_layers)
except TypeError:
queue = deque([layer_or_layers])
seen = set()
done = set()
result = []
if treat_as_input is not None:
seen.update(treat_as_input)
while queue:
layer_or_layers = queue[0]
if layer_or_layers is None:
queue.popleft()
elif layer_or_layers not in seen:
seen.add(layer_or_layers)
if hasattr(layer_or_layers, 'input_layers'):
queue.extendleft(reversed(layer_or_layers.input_layers))
elif hasattr(layer_or_layers, 'input_layer'):
queue.appendleft(layer_or_layers.input_layer)
else:
queue.popleft()
if layer_or_layers not in done:
result.append(layer_or_layers)
done.add(layer_or_layers)
all_layers = result
</DeepExtract>
all_outputs = dict(((layer, layer.input_var) for layer in all_layers if isinstance(layer, InputLayer) and layer not in treat_as_input))
if isinstance(inputs, dict):
all_outputs.update(((layer, tf.convert_to_tensor(expr)) for (layer, expr) in list(inputs.items())))
elif inputs is not None:
if len(all_outputs) > 1:
raise ValueError('get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.')
for input_layer in all_outputs:
all_outputs[input_layer] = tf.convert_to_tensor(inputs)
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
raise ValueError('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer)
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
(names, _, _, defaults, _, _, _) = getfullargspec(layer.get_output_for)
except TypeError:
pass
else:
if defaults is not None:
accepted_kwargs |= set(names[-len(defaults):])
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = set(kwargs.keys()) - accepted_kwargs
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append('%s (perhaps you meant %s)' % (kwarg, suggestion[0]))
else:
suggestions.append(kwarg)
warn('get_output() was called with unused kwargs:\n\t%s' % '\n\t'.join(suggestions))
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
|
def get_output(layer_or_layers, inputs=None, **kwargs):
accepted_kwargs = {'deterministic'}
treat_as_input = list(inputs.keys()) if isinstance(inputs, dict) else []
try:
queue = deque(layer_or_layers)
except TypeError:
queue = deque([layer_or_layers])
seen = set()
done = set()
result = []
if treat_as_input is not None:
seen.update(treat_as_input)
while queue:
layer_or_layers = queue[0]
if layer_or_layers is None:
queue.popleft()
elif layer_or_layers not in seen:
seen.add(layer_or_layers)
if hasattr(layer_or_layers, 'input_layers'):
queue.extendleft(reversed(layer_or_layers.input_layers))
elif hasattr(layer_or_layers, 'input_layer'):
queue.appendleft(layer_or_layers.input_layer)
else:
queue.popleft()
if layer_or_layers not in done:
result.append(layer_or_layers)
done.add(layer_or_layers)
all_layers = result
all_outputs = dict(((layer, layer.input_var) for layer in all_layers if isinstance(layer, InputLayer) and layer not in treat_as_input))
if isinstance(inputs, dict):
all_outputs.update(((layer, tf.convert_to_tensor(expr)) for (layer, expr) in list(inputs.items())))
elif inputs is not None:
if len(all_outputs) > 1:
raise ValueError('get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.')
for input_layer in all_outputs:
all_outputs[input_layer] = tf.convert_to_tensor(inputs)
for layer in all_layers:
if layer not in all_outputs:
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
raise ValueError('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer)
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
(names, _, _, defaults, _, _, _) = getfullargspec(layer.get_output_for)
except TypeError:
pass
else:
if defaults is not None:
accepted_kwargs |= set(names[-len(defaults):])
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = set(kwargs.keys()) - accepted_kwargs
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append('%s (perhaps you meant %s)' % (kwarg, suggestion[0]))
else:
suggestions.append(kwarg)
warn('get_output() was called with unused kwargs:\n\t%s' % '\n\t'.join(suggestions))
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
|
Conditional_Density_Estimation
|
positive
|
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
<DeepExtract>
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
dxsdk_dir = _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
cmd = ['reg.exe', 'query', 'HKLM\\Software\\Microsoft\\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + '\\'
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
dxsdk_dir = dxsdk_dir
</DeepExtract>
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
|
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
dxsdk_dir = _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
cmd = ['reg.exe', 'query', 'HKLM\\Software\\Microsoft\\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + '\\'
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
dxsdk_dir = dxsdk_dir
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
|
brackets-shell
|
positive
|
def get_trigg_freq(self) -> float:
mc = 10350000
<DeepExtract>
r = self.cap.serialWrite(add_crc('@R1B0'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_lsb = r[2:4]
</DeepExtract>
<DeepExtract>
r = self.cap.serialWrite(add_crc('@R1B1'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_mid = r[2:4]
</DeepExtract>
<DeepExtract>
r = self.cap.serialWrite(add_crc('@R1B2'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_msb = r[2:4]
</DeepExtract>
p = int(p_msb + p_mid + p_lsb, 16)
return mc / p
|
def get_trigg_freq(self) -> float:
mc = 10350000
r = self.cap.serialWrite(add_crc('@R1B0'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_lsb = r[2:4]
r = self.cap.serialWrite(add_crc('@R1B1'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_mid = r[2:4]
r = self.cap.serialWrite(add_crc('@R1B2'))
if not check_crc(r) or r[1] != 'Y':
print('WARNING! Incorrect reply!')
p_msb = r[2:4]
p = int(p_msb + p_mid + p_lsb, 16)
return mc / p
|
crappy
|
positive
|
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_prefix', help='Input prefix (ex: test.collapsed.min_fl_2)')
args = parser.parse_args()
output_prefix = args.input_prefix + '.nomono'
<DeepExtract>
group_filename = args.input_prefix + '.group.txt'
count_filename = args.input_prefix + '.abundance.txt'
gff_filename = args.input_prefix + '.gff'
rep_filename = args.input_prefix + '.rep.fq'
if not os.path.exists(count_filename):
print('File {0} does not exist. Abort!'.format(count_filename), file=sys.stderr)
sys.exit(-1)
if not os.path.exists(gff_filename):
print('File {0} does not exist. Abort!'.format(gff_filename), file=sys.stderr)
sys.exit(-1)
if not os.path.exists(rep_filename):
print('File {0} does not exist. Abort!'.format(rep_filename), file=sys.stderr)
sys.exit(-1)
pbids1 = set([r.id for r in SeqIO.parse(open(rep_filename), 'fastq')])
pbids2 = set([r.seqid for r in GFF.collapseGFFReader(gff_filename)])
pbids3 = set(read_count_file(count_filename)[0].keys())
if len(pbids1) != len(pbids2) or len(pbids2) != len(pbids3) or len(pbids1) != len(pbids3):
print('The number of PBID records in the files disagree! Sanity check failed.', file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(rep_filename, len(pbids1)), file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(gff_filename, len(pbids2)), file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(count_filename, len(pbids3)), file=sys.stderr)
sys.exit(-1)
(count_filename, gff_filename, rep_filename) = (count_filename, gff_filename, rep_filename)
</DeepExtract>
good = []
f = open(output_prefix + '.gff', 'w')
reader = GFF.collapseGFFReader(gff_filename)
for r in reader:
assert r.seqid.startswith('PB.')
if len(r.ref_exons) > 1:
good.append(r.seqid)
GFF.write_collapseGFF_format(f, r)
<DeepExtract>
f = open(count_filename)
count_header = ''
while True:
cur_pos = f.tell()
line = f.readline()
if not line.startswith('#'):
f.seek(cur_pos)
break
else:
count_header += line
d = dict(((r['pbid'], r) for r in DictReader(f, delimiter='\t')))
f.close()
(d, count_header) = (d, count_header)
</DeepExtract>
f = open(output_prefix + '.rep.fq', 'w')
for r in SeqIO.parse(open(rep_filename), 'fastq'):
if r.name.split('|')[0] in good:
SeqIO.write(r, f, 'fastq')
f.close()
f = open(output_prefix + '.abundance.txt', 'w')
f.write(count_header)
writer = DictWriter(f, fieldnames=['pbid', 'count_fl', 'count_nfl', 'count_nfl_amb', 'norm_fl', 'norm_nfl', 'norm_nfl_amb'], delimiter='\t', lineterminator='\n')
writer.writeheader()
for k in good:
r = d[k]
writer.writerow(r)
f.close()
print('Output written to:', output_prefix + '.gff', file=sys.stderr)
print('Output written to:', output_prefix + '.rep.fq', file=sys.stderr)
print('Output written to:', output_prefix + '.gff', file=sys.stderr)
|
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_prefix', help='Input prefix (ex: test.collapsed.min_fl_2)')
args = parser.parse_args()
output_prefix = args.input_prefix + '.nomono'
group_filename = args.input_prefix + '.group.txt'
count_filename = args.input_prefix + '.abundance.txt'
gff_filename = args.input_prefix + '.gff'
rep_filename = args.input_prefix + '.rep.fq'
if not os.path.exists(count_filename):
print('File {0} does not exist. Abort!'.format(count_filename), file=sys.stderr)
sys.exit(-1)
if not os.path.exists(gff_filename):
print('File {0} does not exist. Abort!'.format(gff_filename), file=sys.stderr)
sys.exit(-1)
if not os.path.exists(rep_filename):
print('File {0} does not exist. Abort!'.format(rep_filename), file=sys.stderr)
sys.exit(-1)
pbids1 = set([r.id for r in SeqIO.parse(open(rep_filename), 'fastq')])
pbids2 = set([r.seqid for r in GFF.collapseGFFReader(gff_filename)])
pbids3 = set(read_count_file(count_filename)[0].keys())
if len(pbids1) != len(pbids2) or len(pbids2) != len(pbids3) or len(pbids1) != len(pbids3):
print('The number of PBID records in the files disagree! Sanity check failed.', file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(rep_filename, len(pbids1)), file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(gff_filename, len(pbids2)), file=sys.stderr)
print('# of PBIDs in {0}: {1}'.format(count_filename, len(pbids3)), file=sys.stderr)
sys.exit(-1)
(count_filename, gff_filename, rep_filename) = (count_filename, gff_filename, rep_filename)
good = []
f = open(output_prefix + '.gff', 'w')
reader = GFF.collapseGFFReader(gff_filename)
for r in reader:
assert r.seqid.startswith('PB.')
if len(r.ref_exons) > 1:
good.append(r.seqid)
GFF.write_collapseGFF_format(f, r)
f = open(count_filename)
count_header = ''
while True:
cur_pos = f.tell()
line = f.readline()
if not line.startswith('#'):
f.seek(cur_pos)
break
else:
count_header += line
d = dict(((r['pbid'], r) for r in DictReader(f, delimiter='\t')))
f.close()
(d, count_header) = (d, count_header)
f = open(output_prefix + '.rep.fq', 'w')
for r in SeqIO.parse(open(rep_filename), 'fastq'):
if r.name.split('|')[0] in good:
SeqIO.write(r, f, 'fastq')
f.close()
f = open(output_prefix + '.abundance.txt', 'w')
f.write(count_header)
writer = DictWriter(f, fieldnames=['pbid', 'count_fl', 'count_nfl', 'count_nfl_amb', 'norm_fl', 'norm_nfl', 'norm_nfl_amb'], delimiter='\t', lineterminator='\n')
writer.writeheader()
for k in good:
r = d[k]
writer.writerow(r)
f.close()
print('Output written to:', output_prefix + '.gff', file=sys.stderr)
print('Output written to:', output_prefix + '.rep.fq', file=sys.stderr)
print('Output written to:', output_prefix + '.gff', file=sys.stderr)
|
cDNA_Cupcake
|
positive
|
def test_subscribe_notifications(test):
test.only_admin_implementation()
def init_data(handler):
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
return (device, network, notification_names, [])
def send_data(handler, device, notification_names):
for notification_name in notification_names:
notification = device.send_notification(notification_name)
handler.data['notification_ids'].append(notification.id)
def set_handler_data(handler, device, network, notification_names, notification_ids):
handler.data['device'] = device
handler.data['network'] = network
handler.data['notification_names'] = notification_names
handler.data['notification_ids'] = notification_ids
def handle_connect(handler):
<DeepExtract>
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
(device, network, notification_names, notification_ids) = (device, network, command_names, [])
</DeepExtract>
<DeepExtract>
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = notification_names
handler.data['command_ids'] = notification_ids
</DeepExtract>
<DeepExtract>
for command_name in notification_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
</DeepExtract>
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
<DeepExtract>
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
(device, network, notification_names, notification_ids) = (device, network, command_names, [])
</DeepExtract>
notification_name = notification_names[:1]
<DeepExtract>
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = notification_names
handler.data['command_ids'] = notification_ids
</DeepExtract>
<DeepExtract>
for command_name in notification_name:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
</DeepExtract>
handler.data['subscription'] = network.subscribe_notifications(names=notification_name)
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id, network_id=network.id)
notification_name = '%s-name-1' % device_id
notification = device.send_notification(notification_name)
<DeepExtract>
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = [notification_name]
handler.data['command_ids'] = [notification.id]
</DeepExtract>
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
network_1 = handler.api.get_network(network.id)
network.remove()
try:
network_1.subscribe_notifications()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
test.run(handle_connect)
|
def test_subscribe_notifications(test):
test.only_admin_implementation()
def init_data(handler):
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
notification_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
return (device, network, notification_names, [])
def send_data(handler, device, notification_names):
for notification_name in notification_names:
notification = device.send_notification(notification_name)
handler.data['notification_ids'].append(notification.id)
def set_handler_data(handler, device, network, notification_names, notification_ids):
handler.data['device'] = device
handler.data['network'] = network
handler.data['notification_names'] = notification_names
handler.data['notification_ids'] = notification_ids
def handle_connect(handler):
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
(device, network, notification_names, notification_ids) = (device, network, command_names, [])
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = notification_names
handler.data['command_ids'] = notification_ids
for command_name in notification_names:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id in handler.data['notification_ids']
handler.data['notification_ids'].remove(notification.id)
if handler.data['notification_ids']:
return
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
device_id = test.generate_id('n-s-i-c', test.DEVICE_ENTITY)
network_name = test.generate_id('n-s-i-c', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
command_names = ['%s-name-%s' % (device_id, i) for i in range(2)]
device = handler.api.put_device(device_id, network_id=network.id)
(device, network, notification_names, notification_ids) = (device, network, command_names, [])
notification_name = notification_names[:1]
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = notification_names
handler.data['command_ids'] = notification_ids
for command_name in notification_name:
command = device.send_command(command_name)
handler.data['command_ids'].append(command.id)
handler.data['subscription'] = network.subscribe_notifications(names=notification_name)
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
device_id = test.generate_id('n-s-n', test.DEVICE_ENTITY)
device = handler.api.put_device(device_id, network_id=network.id)
notification_name = '%s-name-1' % device_id
notification = device.send_notification(notification_name)
handler.data['device'] = device
handler.data['network'] = network
handler.data['command_names'] = [notification_name]
handler.data['command_ids'] = [notification.id]
handler.data['subscription'] = network.subscribe_notifications()
def handle_notification(handler, notification):
assert notification.id == handler.data['notification_ids'][0]
handler.data['subscription'].remove()
handler.data['device'].remove()
handler.data['network'].remove()
handler.disconnect()
test.run(handle_connect, handle_notification=handle_notification)
def handle_connect(handler):
network_name = test.generate_id('n-s-n', test.NETWORK_ENTITY)
description = '%s-description' % network_name
network = handler.api.create_network(network_name, description)
network_1 = handler.api.get_network(network.id)
network.remove()
try:
network_1.subscribe_notifications()
assert False
except ApiResponseError as api_response_error:
assert api_response_error.code == 404
test.run(handle_connect)
|
devicehive-python
|
positive
|
def test_message_completeness(self):
"""Test we're fully expecting all of the values for a message."""
self.err.error(('id',), 'error', 'description', 'file', 123, 456)
<DeepExtract>
results = json.loads(self.err.render_json())
</DeepExtract>
eq_(len(results['messages']), 1, 'Unexpected number of messages.')
message = results['messages'][0]
eq_(message['id'], ['id'])
eq_(message['message'], 'error')
eq_(message['description'], 'description')
eq_(message['file'], 'file')
eq_(message['line'], 123)
eq_(message['column'], 456)
|
def test_message_completeness(self):
"""Test we're fully expecting all of the values for a message."""
self.err.error(('id',), 'error', 'description', 'file', 123, 456)
results = json.loads(self.err.render_json())
eq_(len(results['messages']), 1, 'Unexpected number of messages.')
message = results['messages'][0]
eq_(message['id'], ['id'])
eq_(message['message'], 'error')
eq_(message['description'], 'description')
eq_(message['file'], 'file')
eq_(message['line'], 123)
eq_(message['column'], 456)
|
app-validator
|
positive
|
def getYesorNoResponse(handler_input, textType):
table = boto3.resource('dynamodb').Table('AdvgStoryDetails')
speak_output = GAME_END
try:
question_record = table.query(KeyConditionExpression=Key('CountryId').eq(get_country_id(handler_input.attributes_manager.session_attributes['country'])) & Key('QuestionNumber').eq(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] + 1))
if question_record['Count'] == 1:
speak_output = question_record['Items'][0][textType]
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">' + speak_output + ' </voice>'
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] += 1
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] += 1
if textType == 'YesResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['YesWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['YesEnergyImpact']
elif textType == 'NoResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['NoWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['NoEnergyImpact']
current_wealth = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel']
current_energy = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel']
if is_game_over(handler_input.attributes_manager.session_attributes['stats_record']):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Oh no adventurer, you don\'t have enough wealth or energy to continue on your adventure! This means your adventure is over. </voice> '
<DeepExtract>
if is_user_on_session(handler_input) and has_active_adventure(handler_input):
table = boto3.resource('dynamodb').Table('AdvgGameStats')
table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set ActiveFlag=:n', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':n': 'N', ':a': 'Y'})
</DeepExtract>
elif is_warning_needed(current_wealth, current_energy):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Be careful adventurer, you are running low on wealth or energy. If you need a travel tip, say speak to the guide.</voice> '
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
logger.error("That question number doesn't exist: {}".format(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber']))
except:
logger.error('An error in getYesorNoResponse for text type {} -- {}'.format(textType, handler_input))
speak_output = "Sorry, adventurer! I don't understand what you want to do. {}".format(VISIT_COUNTRY_REPROMPT)
return speak_output
|
def getYesorNoResponse(handler_input, textType):
table = boto3.resource('dynamodb').Table('AdvgStoryDetails')
speak_output = GAME_END
try:
question_record = table.query(KeyConditionExpression=Key('CountryId').eq(get_country_id(handler_input.attributes_manager.session_attributes['country'])) & Key('QuestionNumber').eq(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] + 1))
if question_record['Count'] == 1:
speak_output = question_record['Items'][0][textType]
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">' + speak_output + ' </voice>'
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['CurrentTurns'] += 1
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber'] += 1
if textType == 'YesResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['YesWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['YesEnergyImpact']
elif textType == 'NoResponseText':
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel'] += question_record['Items'][0]['NoWealthImpact']
handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel'] += question_record['Items'][0]['NoEnergyImpact']
current_wealth = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['MoneyLevel']
current_energy = handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['EnergyLevel']
if is_game_over(handler_input.attributes_manager.session_attributes['stats_record']):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Oh no adventurer, you don\'t have enough wealth or energy to continue on your adventure! This means your adventure is over. </voice> '
if is_user_on_session(handler_input) and has_active_adventure(handler_input):
table = boto3.resource('dynamodb').Table('AdvgGameStats')
table.update_item(Key={'PlayerNumber': handler_input.attributes_manager.session_attributes['user']['Items'][0]['PlayerNumber'], 'CountryId': get_country_id(handler_input.attributes_manager.session_attributes['country'])}, UpdateExpression='set ActiveFlag=:n', ConditionExpression='ActiveFlag=:a', ExpressionAttributeValues={':n': 'N', ':a': 'Y'})
elif is_warning_needed(current_wealth, current_energy):
speak_output = '<voice name="' + get_polly_voice(handler_input.attributes_manager.session_attributes['country']) + '">Be careful adventurer, you are running low on wealth or energy. If you need a travel tip, say speak to the guide.</voice> '
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
speak_output = speak_output + ' ' + get_next_question(handler_input.attributes_manager.session_attributes['country'], handler_input.attributes_manager.session_attributes['stats_record'], handler_input)
else:
logger.error("That question number doesn't exist: {}".format(handler_input.attributes_manager.session_attributes['stats_record']['Items'][0]['QuestionNumber']))
except:
logger.error('An error in getYesorNoResponse for text type {} -- {}'.format(textType, handler_input))
speak_output = "Sorry, adventurer! I don't understand what you want to do. {}".format(VISIT_COUNTRY_REPROMPT)
return speak_output
|
Course_Alexa_Skill_Builder
|
positive
|
def __init__(self, root_dir, split, data_transform=None, forward_context=0, back_context=0, strides=(1,), depth_type=None, **kwargs):
super().__init__()
assert depth_type is None or depth_type == '', 'VideoDataset currently does not support depth types'
self.depth_type = depth_type
self.with_depth = depth_type is not '' and depth_type is not None
self.root_dir = root_dir
self.split = split
self.backward_context = back_context
self.forward_context = forward_context
self.max_len = 11
self.has_context = self.backward_context + self.forward_context > 0
self.strides = strides[0]
self.files = []
<DeepExtract>
files = defaultdict(list)
for entry in os.scandir(root_dir):
relpath = os.path.relpath(entry.path, root_dir)
if entry.is_dir():
d_files = read_files(entry.path, ext=('.jpg', '.png', '.bmp', '.jpeg'), skip_empty=skip_empty)
if skip_empty and (not len(d_files)):
continue
files[relpath] = d_files[entry.path]
elif entry.is_file():
if ('.jpg', '.png', '.bmp', '.jpeg') is None or entry.path.lower().endswith(tuple(('.jpg', '.png', '.bmp', '.jpeg'))):
files[root_dir].append(relpath)
self.file_tree = files
</DeepExtract>
for (k, v) in self.file_tree.items():
file_list = sorted(v)
files = [fname for fname in file_list if self._has_context(k, fname, file_list)]
self.files.extend([[k, fname] for fname in files])
self.data_transform = data_transform
|
def __init__(self, root_dir, split, data_transform=None, forward_context=0, back_context=0, strides=(1,), depth_type=None, **kwargs):
super().__init__()
assert depth_type is None or depth_type == '', 'VideoDataset currently does not support depth types'
self.depth_type = depth_type
self.with_depth = depth_type is not '' and depth_type is not None
self.root_dir = root_dir
self.split = split
self.backward_context = back_context
self.forward_context = forward_context
self.max_len = 11
self.has_context = self.backward_context + self.forward_context > 0
self.strides = strides[0]
self.files = []
files = defaultdict(list)
for entry in os.scandir(root_dir):
relpath = os.path.relpath(entry.path, root_dir)
if entry.is_dir():
d_files = read_files(entry.path, ext=('.jpg', '.png', '.bmp', '.jpeg'), skip_empty=skip_empty)
if skip_empty and (not len(d_files)):
continue
files[relpath] = d_files[entry.path]
elif entry.is_file():
if ('.jpg', '.png', '.bmp', '.jpeg') is None or entry.path.lower().endswith(tuple(('.jpg', '.png', '.bmp', '.jpeg'))):
files[root_dir].append(relpath)
self.file_tree = files
for (k, v) in self.file_tree.items():
file_list = sorted(v)
files = [fname for fname in file_list if self._has_context(k, fname, file_list)]
self.files.extend([[k, fname] for fname in files])
self.data_transform = data_transform
|
dro-sfm
|
positive
|
def get_updates(self, params, constraints, loss):
print('Using the SGLD Optimizer')
<DeepExtract>
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
grads = grads
</DeepExtract>
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
self.updates = [(self.iterations, self.iterations + 1.0)]
for (p, g, c) in zip(params, grads, constraints):
m = K.variable(np.zeros(K.get_value(p).shape))
v = self.momentum * m - lr * g
self.updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
self.updates.append((p, c(new_p)))
return self.updates
|
def get_updates(self, params, constraints, loss):
print('Using the SGLD Optimizer')
grads = K.gradients(loss, params)
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
grads = grads
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
self.updates = [(self.iterations, self.iterations + 1.0)]
for (p, g, c) in zip(params, grads, constraints):
m = K.variable(np.zeros(K.get_value(p).shape))
v = self.momentum * m - lr * g
self.updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
self.updates.append((p, c(new_p)))
return self.updates
|
Deep-Bayesian-Active-Learning
|
positive
|
def __init__(self, name, context, template=None, templateuri=None, callables=None, inherits=None, populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
<DeepExtract>
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException("Template '%s' has no TemplateLookup associated" % context._with_template.uri)
templateuri = lookup.adjust_uri(templateuri, calling_uri)
try:
self.template = lookup.get_template(templateuri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
</DeepExtract>
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
<DeepExtract>
if self is None:
self = TemplateNamespace('self:%s' % self.template.uri, context, template=self.template, populate_self=False)
context._data['self'] = context._data['local'] = self
if hasattr(self.template.module, '_mako_inherit'):
ret = self.template.module._mako_inherit(self.template, context)
if ret:
(lclcallable, lclcontext) = ret
(lclcallable, lclcontext) = (self.template.callable_, context)
</DeepExtract>
|
def __init__(self, name, context, template=None, templateuri=None, callables=None, inherits=None, populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException("Template '%s' has no TemplateLookup associated" % context._with_template.uri)
templateuri = lookup.adjust_uri(templateuri, calling_uri)
try:
self.template = lookup.get_template(templateuri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
if self is None:
self = TemplateNamespace('self:%s' % self.template.uri, context, template=self.template, populate_self=False)
context._data['self'] = context._data['local'] = self
if hasattr(self.template.module, '_mako_inherit'):
ret = self.template.module._mako_inherit(self.template, context)
if ret:
(lclcallable, lclcontext) = ret
(lclcallable, lclcontext) = (self.template.callable_, context)
</DeepExtract>
|
atsf4g-co
|
positive
|
def count_rnn_cell(m: nn.RNNCell, x: torch.Tensor, y: torch.Tensor):
<DeepExtract>
total_ops = m.hidden_size * (m.input_size + m.hidden_size) + m.hidden_size
if m.bias:
total_ops += m.hidden_size * 2
total_ops = total_ops
</DeepExtract>
batch_size = x[0].size(0)
total_ops *= batch_size
m.total_ops += torch.DoubleTensor([int(total_ops)])
|
def count_rnn_cell(m: nn.RNNCell, x: torch.Tensor, y: torch.Tensor):
total_ops = m.hidden_size * (m.input_size + m.hidden_size) + m.hidden_size
if m.bias:
total_ops += m.hidden_size * 2
total_ops = total_ops
batch_size = x[0].size(0)
total_ops *= batch_size
m.total_ops += torch.DoubleTensor([int(total_ops)])
|
Divide-and-Co-training
|
positive
|
def aug_test(self, imgs, img_metas, rescale=False):
feats = self.extract_feats(imgs)
aug_bboxes = []
aug_scores = []
for (x, img_meta) in zip(feats, img_metas):
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
(det_bboxes, det_scores) = self.bbox_head.get_bboxes(*bbox_inputs)[0]
aug_bboxes.append(det_bboxes)
aug_scores.append(det_scores)
<DeepExtract>
recovered_bboxes = []
for (bboxes, img_info) in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(bboxes)
bboxes = torch.cat(recovered_bboxes, dim=0)
if aug_scores is None:
(merged_bboxes, merged_scores) = bboxes
else:
scores = torch.cat(aug_scores, dim=0)
(merged_bboxes, merged_scores) = (bboxes, scores)
</DeepExtract>
(det_bboxes, det_labels) = multiclass_nms(merged_bboxes, merged_scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']
bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes)
return bbox_results
|
def aug_test(self, imgs, img_metas, rescale=False):
feats = self.extract_feats(imgs)
aug_bboxes = []
aug_scores = []
for (x, img_meta) in zip(feats, img_metas):
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
(det_bboxes, det_scores) = self.bbox_head.get_bboxes(*bbox_inputs)[0]
aug_bboxes.append(det_bboxes)
aug_scores.append(det_scores)
recovered_bboxes = []
for (bboxes, img_info) in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(bboxes)
bboxes = torch.cat(recovered_bboxes, dim=0)
if aug_scores is None:
(merged_bboxes, merged_scores) = bboxes
else:
scores = torch.cat(aug_scores, dim=0)
(merged_bboxes, merged_scores) = (bboxes, scores)
(det_bboxes, det_labels) = multiclass_nms(merged_bboxes, merged_scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']
bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes)
return bbox_results
|
Cascade-RPN
|
positive
|
def set_session_config(enable_eager=False, enable_xla=False):
"""Sets the session config."""
if is_v2_0():
<DeepExtract>
if enable_xla:
tf.config.optimizer.set_jit(True)
tf.config.optimizer.set_experimental_options({'pin_to_host_optimization': False})
</DeepExtract>
else:
<DeepExtract>
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
config.graph_options.rewrite_options.pin_to_host_optimization = rewriter_config_pb2.RewriterConfig.OFF
config = config
</DeepExtract>
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
|
def set_session_config(enable_eager=False, enable_xla=False):
"""Sets the session config."""
if is_v2_0():
if enable_xla:
tf.config.optimizer.set_jit(True)
tf.config.optimizer.set_experimental_options({'pin_to_host_optimization': False})
else:
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
config.graph_options.rewrite_options.pin_to_host_optimization = rewriter_config_pb2.RewriterConfig.OFF
config = config
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
|
autodist
|
positive
|
def fom(self, params):
"""Run a forward simulation and calculate the figure of merit.
Notes
-----
The simualtion is performed in parallel with the help of all of the
MPI node, however the calculation of the figure of merit itself is
currently only performed on the master node (RANK == 0)
Parameters
----------
params : numpy.ndarray
List of design parameters of the system
Returns
-------
float
**(Master node only)** The figure of merit :math:`F(\\mathbf{E},\\mathbf{H} ; \\mathbf{p})`
"""
<DeepExtract>
pass
</DeepExtract>
self.prev_params = params
self.sim.update()
self.sim.solve_forward()
return self.calc_fom(self.sim, params)
|
def fom(self, params):
"""Run a forward simulation and calculate the figure of merit.
Notes
-----
The simualtion is performed in parallel with the help of all of the
MPI node, however the calculation of the figure of merit itself is
currently only performed on the master node (RANK == 0)
Parameters
----------
params : numpy.ndarray
List of design parameters of the system
Returns
-------
float
**(Master node only)** The figure of merit :math:`F(\\mathbf{E},\\mathbf{H} ; \\mathbf{p})`
"""
pass
self.prev_params = params
self.sim.update()
self.sim.solve_forward()
return self.calc_fom(self.sim, params)
|
emopt
|
positive
|
def _remove_items(*objs, **kwargs):
if objs:
chunks = [objs[x:x + 100] for x in range(0, len(objs), 100)]
for chunk in chunks:
kwargs = {self.column: multi_array_remove(self.column, *chunk)}
<DeepExtract>
qs = self.related_model.objects.filter(pk=self.instance.pk)
qs.update(**kwargs)
</DeepExtract>
if self.symmetrical:
kwargs = {self.column: ArrayRemove(self.column, self.instance.pk, output_field=self.field)}
self.model.objects.filter(pk__in=list(objs)).update(**kwargs)
|
def _remove_items(*objs, **kwargs):
if objs:
chunks = [objs[x:x + 100] for x in range(0, len(objs), 100)]
for chunk in chunks:
kwargs = {self.column: multi_array_remove(self.column, *chunk)}
qs = self.related_model.objects.filter(pk=self.instance.pk)
qs.update(**kwargs)
if self.symmetrical:
kwargs = {self.column: ArrayRemove(self.column, self.instance.pk, output_field=self.field)}
self.model.objects.filter(pk__in=list(objs)).update(**kwargs)
|
django_postgres_extensions
|
positive
|
def test_retry_select_events_after_deallocating_prepared_statement(self):
<DeepExtract>
if self.datastore.schema:
table_name = f'{self.datastore.schema}.{table_name}'
recorder = PostgresAggregateRecorder(datastore=self.datastore, events_table_name=table_name)
recorder.create_table()
recorder = recorder
</DeepExtract>
self.datastore.pool.pool_size = 1
originator_id = uuid4()
stored_event1 = StoredEvent(originator_id=originator_id, originator_version=0, topic='topic1', state=b'state1')
recorder.insert_events([stored_event1])
recorder.select_events(originator_id)
with self.datastore.get_connection() as conn:
if self.schema:
statement_name = f'select_{self.schema}_{EVENTS_TABLE_NAME}'
else:
statement_name = f'select_{EVENTS_TABLE_NAME}'
self.assertIn(statement_name, conn.is_prepared)
conn.cursor().execute(f'DEALLOCATE {recorder.statement_name_aliases[statement_name]}')
recorder.select_events(originator_id)
|
def test_retry_select_events_after_deallocating_prepared_statement(self):
if self.datastore.schema:
table_name = f'{self.datastore.schema}.{table_name}'
recorder = PostgresAggregateRecorder(datastore=self.datastore, events_table_name=table_name)
recorder.create_table()
recorder = recorder
self.datastore.pool.pool_size = 1
originator_id = uuid4()
stored_event1 = StoredEvent(originator_id=originator_id, originator_version=0, topic='topic1', state=b'state1')
recorder.insert_events([stored_event1])
recorder.select_events(originator_id)
with self.datastore.get_connection() as conn:
if self.schema:
statement_name = f'select_{self.schema}_{EVENTS_TABLE_NAME}'
else:
statement_name = f'select_{EVENTS_TABLE_NAME}'
self.assertIn(statement_name, conn.is_prepared)
conn.cursor().execute(f'DEALLOCATE {recorder.statement_name_aliases[statement_name]}')
recorder.select_events(originator_id)
|
eventsourcing
|
positive
|
def test_split_file_odd(create_dummy_file):
<DeepExtract>
patched_tempfile = mocker.patch('aizynthfinder.utils.files.tempfile.mktemp')
split_files = ['\n'.join(list('abcdefg')) / 'split1', '\n'.join(list('abcdefg')) / 'split2', '\n'.join(list('abcdefg')) / 'split3']
patched_tempfile.side_effect = split_files
filename = '\n'.join(list('abcdefg')) / 'input'
def wrapper(content):
with open(filename, 'w') as fileobj:
fileobj.write(content)
(filename, split_files) = (filename, split_files)
(filename, split_files) = wrapper
</DeepExtract>
split_file(filename, 3)
read_lines = []
for filename in split_files:
assert os.path.exists(filename)
with open(filename, 'r') as fileobj:
read_lines.append(fileobj.read())
assert read_lines[0] == 'a\nb\nc'
assert read_lines[1] == 'd\ne'
assert read_lines[2] == 'f\ng'
|
def test_split_file_odd(create_dummy_file):
patched_tempfile = mocker.patch('aizynthfinder.utils.files.tempfile.mktemp')
split_files = ['\n'.join(list('abcdefg')) / 'split1', '\n'.join(list('abcdefg')) / 'split2', '\n'.join(list('abcdefg')) / 'split3']
patched_tempfile.side_effect = split_files
filename = '\n'.join(list('abcdefg')) / 'input'
def wrapper(content):
with open(filename, 'w') as fileobj:
fileobj.write(content)
(filename, split_files) = (filename, split_files)
(filename, split_files) = wrapper
split_file(filename, 3)
read_lines = []
for filename in split_files:
assert os.path.exists(filename)
with open(filename, 'r') as fileobj:
read_lines.append(fileobj.read())
assert read_lines[0] == 'a\nb\nc'
assert read_lines[1] == 'd\ne'
assert read_lines[2] == 'f\ng'
|
aizynthfinder
|
positive
|
def test_unvectorize_multi_output(rngs, x_batch, x_single):
def f_batch(X):
return (hk.Linear(11)(X), hk.Linear(13)(X))
(init, f_batch) = hk.transform(f_batch)
params = init(next(rngs), x_batch)
<DeepExtract>
y_batch = hk.Linear(11)(params)
</DeepExtract>
assert y_batch[0].shape == (7, 11)
assert y_batch[1].shape == (7, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (13,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0, None))
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=None)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (1, 11)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = 'number of out_axes must match the number of function outputs'
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
|
def test_unvectorize_multi_output(rngs, x_batch, x_single):
def f_batch(X):
return (hk.Linear(11)(X), hk.Linear(13)(X))
(init, f_batch) = hk.transform(f_batch)
params = init(next(rngs), x_batch)
y_batch = hk.Linear(11)(params)
assert y_batch[0].shape == (7, 11)
assert y_batch[1].shape == (7, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (13,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0, None))
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=None)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (1, 11)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = 'number of out_axes must match the number of function outputs'
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
|
coax
|
positive
|
def test_spl_item(self):
<DeepExtract>
'dotconfig-04.txt' = realpath(join(dirname(__file__), '..', '..', 'resources', 'dotconfig-04.txt'))
</DeepExtract>
checker = UBootConfigChecker('2019.04')
config = checker.load(filename)
self.assertTrue('CONFIG_SPL_FS_EXT4' in config)
self.assertTrue(config['CONFIG_SPL_FS_EXT4'][0])
report = checker.audit()
self.assertTrue('CVE-2019-11059' in report)
|
def test_spl_item(self):
'dotconfig-04.txt' = realpath(join(dirname(__file__), '..', '..', 'resources', 'dotconfig-04.txt'))
checker = UBootConfigChecker('2019.04')
config = checker.load(filename)
self.assertTrue('CONFIG_SPL_FS_EXT4' in config)
self.assertTrue(config['CONFIG_SPL_FS_EXT4'][0])
report = checker.audit()
self.assertTrue('CVE-2019-11059' in report)
|
depthcharge
|
positive
|
def forward(self, tbl, tbl_len, tbl_split):
"""
Encode table headers.
:param tbl: header token list
:param tbl_len: length of token list (num_table_header, batch)
:param tbl_split: table header boundary list
"""
<DeepExtract>
(idx_sorted, tbl_len_sorted, idx_map_back) = sort_for_pack(tbl_len)
tbl_sorted = tbl.index_select(1, Variable(torch.LongTensor(idx_sorted).cuda(), requires_grad=False))
(__, tbl_context) = self.encoder(tbl_sorted, tbl_len_sorted)
v_idx_map_back = Variable(torch.LongTensor(idx_map_back).cuda(), requires_grad=False)
tbl_context = tbl_context.index_select(1, v_idx_map_back)
tbl_context = tbl_context
</DeepExtract>
if self.split_type == 'outcell':
batch_index = torch.LongTensor(range(tbl_split.data.size(1))).unsqueeze_(0).cuda().expand_as(tbl_split.data)
enc_split = tbl_context[tbl_split.data, batch_index, :]
(enc_left, enc_right) = (enc_split[:-1], enc_split[1:])
elif self.split_type == 'incell':
batch_index = torch.LongTensor(range(tbl_split.data.size(1))).unsqueeze_(0).cuda().expand(tbl_split.data.size(0) - 1, tbl_split.data.size(1))
split_left = (tbl_split.data[:-1] + 1).clamp(0, tbl_context.size(0) - 1)
enc_left = tbl_context[split_left, batch_index, :]
split_right = (tbl_split.data[1:] - 1).clamp(0, tbl_context.size(0) - 1)
enc_right = tbl_context[split_right, batch_index, :]
if self.merge_type == 'sub':
return enc_right - enc_left
elif self.merge_type == 'cat':
half_hidden_size = self.hidden_size // 2
return torch.cat([enc_right[:, :, :half_hidden_size], enc_left[:, :, half_hidden_size:]], 2)
elif self.merge_type == 'mlp':
return self.merge(torch.cat([enc_right, enc_left], 2))
|
def forward(self, tbl, tbl_len, tbl_split):
"""
Encode table headers.
:param tbl: header token list
:param tbl_len: length of token list (num_table_header, batch)
:param tbl_split: table header boundary list
"""
(idx_sorted, tbl_len_sorted, idx_map_back) = sort_for_pack(tbl_len)
tbl_sorted = tbl.index_select(1, Variable(torch.LongTensor(idx_sorted).cuda(), requires_grad=False))
(__, tbl_context) = self.encoder(tbl_sorted, tbl_len_sorted)
v_idx_map_back = Variable(torch.LongTensor(idx_map_back).cuda(), requires_grad=False)
tbl_context = tbl_context.index_select(1, v_idx_map_back)
tbl_context = tbl_context
if self.split_type == 'outcell':
batch_index = torch.LongTensor(range(tbl_split.data.size(1))).unsqueeze_(0).cuda().expand_as(tbl_split.data)
enc_split = tbl_context[tbl_split.data, batch_index, :]
(enc_left, enc_right) = (enc_split[:-1], enc_split[1:])
elif self.split_type == 'incell':
batch_index = torch.LongTensor(range(tbl_split.data.size(1))).unsqueeze_(0).cuda().expand(tbl_split.data.size(0) - 1, tbl_split.data.size(1))
split_left = (tbl_split.data[:-1] + 1).clamp(0, tbl_context.size(0) - 1)
enc_left = tbl_context[split_left, batch_index, :]
split_right = (tbl_split.data[1:] - 1).clamp(0, tbl_context.size(0) - 1)
enc_right = tbl_context[split_right, batch_index, :]
if self.merge_type == 'sub':
return enc_right - enc_left
elif self.merge_type == 'cat':
half_hidden_size = self.hidden_size // 2
return torch.cat([enc_right[:, :, :half_hidden_size], enc_left[:, :, half_hidden_size:]], 2)
elif self.merge_type == 'mlp':
return self.merge(torch.cat([enc_right, enc_left], 2))
|
coarse2fine
|
positive
|
def _mimetype(self, path):
"""
Attempt to read the file's mimetype.
"""
'\n The function below is implemented to handle linux system sys dev and proc directory\n bugs\n '
file_name = str(path.split('/')[-1]).strip()
if re.search('^\\./proc/', path) or re.search('^\\./sys/', path):
if file_name in self._files:
try:
<DeepExtract>
fp = self._options['storage'].open(path, mode)
</DeepExtract>
mime = magic.Magic(mime=True).from_buffer(fp.read(10))
fp.close()
return mime
except:
return 'application/empty'
if re.search('^\\./dev/', path) and self._files[file_name] in 'l':
return 'application/empty'
if file_name in self._files:
if self._files[file_name] not in '-l':
return 'application/empty'
<DeepExtract>
fp = self._options['storage'].open(path, mode)
</DeepExtract>
mime = magic.Magic(mime=True).from_buffer(fp.read(10))
fp.close()
return mime
|
def _mimetype(self, path):
"""
Attempt to read the file's mimetype.
"""
'\n The function below is implemented to handle linux system sys dev and proc directory\n bugs\n '
file_name = str(path.split('/')[-1]).strip()
if re.search('^\\./proc/', path) or re.search('^\\./sys/', path):
if file_name in self._files:
try:
fp = self._options['storage'].open(path, mode)
mime = magic.Magic(mime=True).from_buffer(fp.read(10))
fp.close()
return mime
except:
return 'application/empty'
if re.search('^\\./dev/', path) and self._files[file_name] in 'l':
return 'application/empty'
if file_name in self._files:
if self._files[file_name] not in '-l':
return 'application/empty'
fp = self._options['storage'].open(path, mode)
mime = magic.Magic(mime=True).from_buffer(fp.read(10))
fp.close()
return mime
|
adminset
|
positive
|
def _pl(self, image, context):
tmp = image.copy()
<DeepExtract>
assert tmp.dtype == np.float32
img = cl_builder.new_image_from_ndarray(tmp)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, self.gA)
gcr = img.to_numpy()
</DeepExtract>
error = (tmp - gcr) ** 2
mask = -gaussian_repeat(error, self.gB)
mask -= np.min(mask)
mask /= np.max(mask)
mask = (mask - 0.5) * self.strength + 1.0
res = gcr + mask * (tmp - gcr)
res[..., 3] = tmp[..., 3]
return res
|
def _pl(self, image, context):
tmp = image.copy()
assert tmp.dtype == np.float32
img = cl_builder.new_image_from_ndarray(tmp)
out = cl_builder.new_image(img.width, img.height)
gaussian_repeat_cl(img, out, self.gA)
gcr = img.to_numpy()
error = (tmp - gcr) ** 2
mask = -gaussian_repeat(error, self.gB)
mask -= np.min(mask)
mask /= np.max(mask)
mask = (mask - 0.5) * self.strength + 1.0
res = gcr + mask * (tmp - gcr)
res[..., 3] = tmp[..., 3]
return res
|
blender-texture-tools
|
positive
|
def mlp_actor_critic(x, a, hidden_sizes=(400, 300), activation=tf.nn.relu, output_activation=None, policy=mlp_gaussian_policy, action_space=None):
with tf.variable_scope('pi'):
(mu, pi, logp_pi) = policy(x, a, hidden_sizes, activation, output_activation)
<DeepExtract>
mu = tf.tanh(mu)
pi = tf.tanh(pi)
logp_pi -= tf.reduce_sum(tf.log(clip_but_pass_gradient(1 - pi ** 2, l=0, u=1) + 1e-06), axis=1)
(mu, pi, logp_pi) = (mu, pi, logp_pi)
</DeepExtract>
action_scale = action_space
mu *= action_scale
pi *= action_scale
vf_mlp = lambda x: tf.squeeze(mlp(x, list(hidden_sizes) + [1], activation, None), axis=1)
with tf.variable_scope('q1'):
q1 = vf_mlp(tf.concat([x, a], axis=-1))
with tf.variable_scope('q1', reuse=True):
q1_pi = vf_mlp(tf.concat([x, pi], axis=-1))
with tf.variable_scope('q2'):
q2 = vf_mlp(tf.concat([x, a], axis=-1))
with tf.variable_scope('q2', reuse=True):
q2_pi = vf_mlp(tf.concat([x, pi], axis=-1))
with tf.variable_scope('v'):
v = vf_mlp(x)
return (mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v)
|
def mlp_actor_critic(x, a, hidden_sizes=(400, 300), activation=tf.nn.relu, output_activation=None, policy=mlp_gaussian_policy, action_space=None):
with tf.variable_scope('pi'):
(mu, pi, logp_pi) = policy(x, a, hidden_sizes, activation, output_activation)
mu = tf.tanh(mu)
pi = tf.tanh(pi)
logp_pi -= tf.reduce_sum(tf.log(clip_but_pass_gradient(1 - pi ** 2, l=0, u=1) + 1e-06), axis=1)
(mu, pi, logp_pi) = (mu, pi, logp_pi)
action_scale = action_space
mu *= action_scale
pi *= action_scale
vf_mlp = lambda x: tf.squeeze(mlp(x, list(hidden_sizes) + [1], activation, None), axis=1)
with tf.variable_scope('q1'):
q1 = vf_mlp(tf.concat([x, a], axis=-1))
with tf.variable_scope('q1', reuse=True):
q1_pi = vf_mlp(tf.concat([x, pi], axis=-1))
with tf.variable_scope('q2'):
q2 = vf_mlp(tf.concat([x, a], axis=-1))
with tf.variable_scope('q2', reuse=True):
q2_pi = vf_mlp(tf.concat([x, pi], axis=-1))
with tf.variable_scope('v'):
v = vf_mlp(x)
return (mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v)
|
DRLib
|
positive
|
def run_in_background(name, args, **kwargs):
"""Pickle arguments to cache file, then call this script again via
:func:`subprocess.call`.
:param name: name of task
:type name: ``unicode``
:param args: arguments passed as first argument to :func:`subprocess.call`
:param \\**kwargs: keyword arguments to :func:`subprocess.call`
:returns: exit code of sub-process
:rtype: ``int``
When you call this function, it caches its arguments and then calls
``background.py`` in a subprocess. The Python subprocess will load the
cached arguments, fork into the background, and then run the command you
specified.
This function will return as soon as the ``background.py`` subprocess has
forked, returning the exit code of *that* process (i.e. not of the command
you're trying to run).
If that process fails, an error will be written to the log file.
If a process is already running under the same name, this function will
return immediately and will not run the specified command.
"""
if is_running(name):
log.info('Task `{}` is already running'.format(name))
return
<DeepExtract>
argcache = wf.cachefile('{}.argcache'.format(name))
</DeepExtract>
with open(argcache, 'wb') as file_obj:
pickle.dump({'args': args, 'kwargs': kwargs}, file_obj)
log.debug('Command arguments cached to `{}`'.format(argcache))
cmd = ['/usr/bin/python', __file__, name]
log.debug('Calling {!r} ...'.format(cmd))
retcode = subprocess.call(cmd)
if retcode:
log.error('Failed to call task in background')
else:
log.debug('Executing task `{}` in background...'.format(name))
return retcode
|
def run_in_background(name, args, **kwargs):
"""Pickle arguments to cache file, then call this script again via
:func:`subprocess.call`.
:param name: name of task
:type name: ``unicode``
:param args: arguments passed as first argument to :func:`subprocess.call`
:param \\**kwargs: keyword arguments to :func:`subprocess.call`
:returns: exit code of sub-process
:rtype: ``int``
When you call this function, it caches its arguments and then calls
``background.py`` in a subprocess. The Python subprocess will load the
cached arguments, fork into the background, and then run the command you
specified.
This function will return as soon as the ``background.py`` subprocess has
forked, returning the exit code of *that* process (i.e. not of the command
you're trying to run).
If that process fails, an error will be written to the log file.
If a process is already running under the same name, this function will
return immediately and will not run the specified command.
"""
if is_running(name):
log.info('Task `{}` is already running'.format(name))
return
argcache = wf.cachefile('{}.argcache'.format(name))
with open(argcache, 'wb') as file_obj:
pickle.dump({'args': args, 'kwargs': kwargs}, file_obj)
log.debug('Command arguments cached to `{}`'.format(argcache))
cmd = ['/usr/bin/python', __file__, name]
log.debug('Calling {!r} ...'.format(cmd))
retcode = subprocess.call(cmd)
if retcode:
log.error('Failed to call task in background')
else:
log.debug('Executing task `{}` in background...'.format(name))
return retcode
|
alfred_zotquery
|
positive
|
def run_advi(log_joint_fn, all_args, num_iterations, run_map=False):
"""Train model with automatic differentiation variational inference.
Args:
run_map: If True, runs ADVI with `E_q [ log p(data, params) ]` as loss
function.
"""
(alpha, beta, epsilon, w, tau, x) = all_args
log_posterior = lambda epsilon, w, tau: log_joint_fn(alpha, beta, epsilon, w, tau, x)
def unpack_params(params):
"""Unpacks `np.ndarray` into list of variational parameters."""
param_shapes = [epsilon.shape, epsilon.shape, w.shape, w.shape, tau.shape, tau.shape]
begin = 0
end = 0
unpacked_params = []
for param_shape in param_shapes:
end += int(np.prod(param_shape))
param = params[begin:end].reshape(param_shape)
begin = end
unpacked_params.append(param)
return unpacked_params
def loss(params, t, return_marginal=False):
"""Reparameterization-based Monte Carlo estimate of negative ELBO."""
del t
<DeepExtract>
param_shapes = [epsilon.shape, epsilon.shape, w.shape, w.shape, tau.shape, tau.shape]
begin = 0
end = 0
unpacked_params = []
for param_shape in param_shapes:
end += int(np.prod(param_shape))
param = params[begin:end].reshape(param_shape)
begin = end
unpacked_params.append(param)
unpacked_params = unpacked_params
</DeepExtract>
zs = []
log_q = 0.0
for t in range(2):
loc = unpacked_params[2 * t]
log_scale = unpacked_params[2 * t + 1]
z = loc + np.exp(log_scale) * np.random.normal(0, 1, size=log_scale.shape)
zs.append(z)
log_q += log_probs.norm_gen_log_prob(z, loc, np.exp(log_scale))
zs.append(tau)
log_p = log_posterior(*zs)
if return_marginal:
return log_p
elif run_map:
return -log_p
return log_q - log_p
def callback(params, t, g):
"""Callback for use in Autograd's optimizer routine."""
del g
if t % FLAGS.num_print == 0 or t + 1 == num_iterations:
<DeepExtract>
del t
unpacked_params = unpack_params(params)
zs = []
log_q = 0.0
for t in range(2):
loc = unpacked_params[2 * t]
log_scale = unpacked_params[2 * t + 1]
z = loc + np.exp(log_scale) * np.random.normal(0, 1, size=log_scale.shape)
zs.append(z)
log_q += log_probs.norm_gen_log_prob(z, loc, np.exp(log_scale))
zs.append(tau)
log_p = log_posterior(*zs)
if True:
log_joint = log_p
elif run_map:
log_joint = -log_p
log_joint = log_q - log_p
</DeepExtract>
elbo = -loss(params, t)
runtime = time.time() - start
print('Iteration: {:>3d} Log Joint: {:.3f} ELBO: {:.3f} Runtime (s): {:.3f}'.format(t, log_joint, elbo, runtime))
log_joints.append(log_joint)
elbos.append(elbo)
runtimes.append(runtime)
return
grad_loss = grad(loss)
if run_map:
print('Running MAP...')
else:
print('Running ADVI...')
num_params = int(2 * np.prod(epsilon.shape) + 2 * np.prod(w.shape) + 2 * np.prod(tau.shape))
print('Number of parameters: ', num_params)
params = np.concatenate([np.random.normal(0, 1, size=int(np.prod(epsilon.shape))), np.random.normal(-3, 0.001, size=int(np.prod(epsilon.shape))), np.random.normal(0, 1, size=int(np.prod(w.shape))), np.random.normal(-3, 0.001, size=int(np.prod(w.shape))), np.random.normal(0, 1, size=int(np.prod(tau.shape))), np.random.normal(-3, 0.001, size=int(np.prod(tau.shape)))], 0)
log_joints = []
elbos = []
runtimes = []
start = time.time()
params = adam(grad_loss, params, callback=callback, num_iters=num_iterations, step_size=0.01)
return (log_joints, runtimes, elbos)
|
def run_advi(log_joint_fn, all_args, num_iterations, run_map=False):
"""Train model with automatic differentiation variational inference.
Args:
run_map: If True, runs ADVI with `E_q [ log p(data, params) ]` as loss
function.
"""
(alpha, beta, epsilon, w, tau, x) = all_args
log_posterior = lambda epsilon, w, tau: log_joint_fn(alpha, beta, epsilon, w, tau, x)
def unpack_params(params):
"""Unpacks `np.ndarray` into list of variational parameters."""
param_shapes = [epsilon.shape, epsilon.shape, w.shape, w.shape, tau.shape, tau.shape]
begin = 0
end = 0
unpacked_params = []
for param_shape in param_shapes:
end += int(np.prod(param_shape))
param = params[begin:end].reshape(param_shape)
begin = end
unpacked_params.append(param)
return unpacked_params
def loss(params, t, return_marginal=False):
"""Reparameterization-based Monte Carlo estimate of negative ELBO."""
del t
param_shapes = [epsilon.shape, epsilon.shape, w.shape, w.shape, tau.shape, tau.shape]
begin = 0
end = 0
unpacked_params = []
for param_shape in param_shapes:
end += int(np.prod(param_shape))
param = params[begin:end].reshape(param_shape)
begin = end
unpacked_params.append(param)
unpacked_params = unpacked_params
zs = []
log_q = 0.0
for t in range(2):
loc = unpacked_params[2 * t]
log_scale = unpacked_params[2 * t + 1]
z = loc + np.exp(log_scale) * np.random.normal(0, 1, size=log_scale.shape)
zs.append(z)
log_q += log_probs.norm_gen_log_prob(z, loc, np.exp(log_scale))
zs.append(tau)
log_p = log_posterior(*zs)
if return_marginal:
return log_p
elif run_map:
return -log_p
return log_q - log_p
def callback(params, t, g):
"""Callback for use in Autograd's optimizer routine."""
del g
if t % FLAGS.num_print == 0 or t + 1 == num_iterations:
del t
unpacked_params = unpack_params(params)
zs = []
log_q = 0.0
for t in range(2):
loc = unpacked_params[2 * t]
log_scale = unpacked_params[2 * t + 1]
z = loc + np.exp(log_scale) * np.random.normal(0, 1, size=log_scale.shape)
zs.append(z)
log_q += log_probs.norm_gen_log_prob(z, loc, np.exp(log_scale))
zs.append(tau)
log_p = log_posterior(*zs)
if True:
log_joint = log_p
elif run_map:
log_joint = -log_p
log_joint = log_q - log_p
elbo = -loss(params, t)
runtime = time.time() - start
print('Iteration: {:>3d} Log Joint: {:.3f} ELBO: {:.3f} Runtime (s): {:.3f}'.format(t, log_joint, elbo, runtime))
log_joints.append(log_joint)
elbos.append(elbo)
runtimes.append(runtime)
return
grad_loss = grad(loss)
if run_map:
print('Running MAP...')
else:
print('Running ADVI...')
num_params = int(2 * np.prod(epsilon.shape) + 2 * np.prod(w.shape) + 2 * np.prod(tau.shape))
print('Number of parameters: ', num_params)
params = np.concatenate([np.random.normal(0, 1, size=int(np.prod(epsilon.shape))), np.random.normal(-3, 0.001, size=int(np.prod(epsilon.shape))), np.random.normal(0, 1, size=int(np.prod(w.shape))), np.random.normal(-3, 0.001, size=int(np.prod(w.shape))), np.random.normal(0, 1, size=int(np.prod(tau.shape))), np.random.normal(-3, 0.001, size=int(np.prod(tau.shape)))], 0)
log_joints = []
elbos = []
runtimes = []
start = time.time()
params = adam(grad_loss, params, callback=callback, num_iters=num_iterations, step_size=0.01)
return (log_joints, runtimes, elbos)
|
autoconj
|
positive
|
def test_mergeConstraintSets(self):
"""
Create a few disjoint sets, specify some constraints, and verify that
they are merged correctly.
"""
def _verify(_coll_sets, _correct_answer):
"""
Assert that the ``_coll_sets`` are reduced to ``_correct_answer``
by the `mergeConstraintSets` algorithm.
"""
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, _coll_sets)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in _correct_answer]
assert sorted(computed) == sorted(correct)
igor = self.igor
mergeConstraintSets = azrael.leonard.mergeConstraintSets
self.igor.reset()
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
assert mergeConstraintSets(ret.data, []) == (True, None, [])
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, [])
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in []]
assert sorted(computed) == sorted(correct)
</DeepExtract>
self.igor.reset()
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
assert mergeConstraintSets(ret.data, [[1]]) == (True, None, [[1]])
tmp = [[1, 2, 3]]
assert mergeConstraintSets(ret.data, tmp) == (True, None, tmp)
del tmp
self.igor.reset()
s = [['1'], ['2']]
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
assert igor.addConstraints([getP2P()]).ok
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2']]]
assert sorted(computed) == sorted(correct)
</DeepExtract>
self.igor.reset()
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
self.igor.reset()
s = [['1'], ['2']]
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
assert igor.addConstraints([getP2P(rb_a='1', rb_b='3')]).ok
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
self.igor.reset()
s = [['1', '2', '3'], ['4', '5'], ['6']]
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
assert igor.addConstraints([getP2P(rb_a='1', rb_b='6')]).ok
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2', '3', '6'], ['4', '5']]]
assert sorted(computed) == sorted(correct)
</DeepExtract>
self.igor.reset()
s = [['1', '2', '3'], ['4', '5'], ['6']]
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
</DeepExtract>
assert igor.addConstraints([getP2P(rb_a='1', rb_b='6')]).ok
assert igor.addConstraints([getP2P(rb_a='3', rb_b='4')]).ok
<DeepExtract>
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2', '3', '6', '4', '5']]]
assert sorted(computed) == sorted(correct)
</DeepExtract>
|
def test_mergeConstraintSets(self):
"""
Create a few disjoint sets, specify some constraints, and verify that
they are merged correctly.
"""
def _verify(_coll_sets, _correct_answer):
"""
Assert that the ``_coll_sets`` are reduced to ``_correct_answer``
by the `mergeConstraintSets` algorithm.
"""
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, _coll_sets)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in _correct_answer]
assert sorted(computed) == sorted(correct)
igor = self.igor
mergeConstraintSets = azrael.leonard.mergeConstraintSets
self.igor.reset()
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
assert mergeConstraintSets(ret.data, []) == (True, None, [])
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, [])
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in []]
assert sorted(computed) == sorted(correct)
self.igor.reset()
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
assert mergeConstraintSets(ret.data, [[1]]) == (True, None, [[1]])
tmp = [[1, 2, 3]]
assert mergeConstraintSets(ret.data, tmp) == (True, None, tmp)
del tmp
self.igor.reset()
s = [['1'], ['2']]
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
assert igor.addConstraints([getP2P()]).ok
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2']]]
assert sorted(computed) == sorted(correct)
self.igor.reset()
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
self.igor.reset()
s = [['1'], ['2']]
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
assert igor.addConstraints([getP2P(rb_a='1', rb_b='3')]).ok
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
self.igor.reset()
s = [['1', '2', '3'], ['4', '5'], ['6']]
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
assert igor.addConstraints([getP2P(rb_a='1', rb_b='6')]).ok
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2', '3', '6'], ['4', '5']]]
assert sorted(computed) == sorted(correct)
self.igor.reset()
s = [['1', '2', '3'], ['4', '5'], ['6']]
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in s]
assert sorted(computed) == sorted(correct)
assert igor.addConstraints([getP2P(rb_a='1', rb_b='6')]).ok
assert igor.addConstraints([getP2P(rb_a='3', rb_b='4')]).ok
assert self.igor.updateLocalCache().ok
ret = self.igor.uniquePairs()
assert ret.ok
ret = azrael.leonard.mergeConstraintSets(ret.data, s)
assert ret.ok
computed = ret.data
computed = [sorted(tuple(_)) for _ in computed]
correct = [sorted(tuple(_)) for _ in [['1', '2', '3', '6', '4', '5']]]
assert sorted(computed) == sorted(correct)
</DeepExtract>
|
azrael
|
positive
|
def compute(self, iteration_result, reference_matrix=None):
"""general compute method,
iteration_result is a dictionary that contains the
results generated by the scoring functions in the
current computation.
the reference_matrix is actually a hack that allows the scoring
function to normalize its scores to the range of a reference
score matrix. In the normal case, those would be the gene expression
row scores"""
iteration = iteration_result['iteration']
if self.run_in_iteration(iteration):
logging.debug("running '%s' in iteration %d with scaling: %f", self.id, iteration, self.scaling(iteration))
<DeepExtract>
raise Exception('implement me')
</DeepExtract>
<DeepExtract>
if self.cache_result:
self.cached_result = computed_result
else:
logging.debug('pickle result to %s', self.pickle_path())
with open(self.pickle_path(), 'wb') as outfile:
pickle.dump(computed_result, outfile)
</DeepExtract>
else:
<DeepExtract>
if self.cache_result and 'cached_result' in dir(self):
computed_result = self.cached_result
elif os.path.exists(self.pickle_path()):
with open(self.pickle_path(), 'rb') as infile:
computed_result = pickle.load(infile)
else:
computed_result = None
</DeepExtract>
self.run_log.log(iteration, self.run_in_iteration(iteration), self.scaling(iteration_result['iteration']))
<DeepExtract>
if computed_result is None:
iteration_result['score_means'][self.id] = 0.0
else:
iteration_result['score_means'][self.id] = computed_result.mean()
</DeepExtract>
return computed_result
|
def compute(self, iteration_result, reference_matrix=None):
"""general compute method,
iteration_result is a dictionary that contains the
results generated by the scoring functions in the
current computation.
the reference_matrix is actually a hack that allows the scoring
function to normalize its scores to the range of a reference
score matrix. In the normal case, those would be the gene expression
row scores"""
iteration = iteration_result['iteration']
if self.run_in_iteration(iteration):
logging.debug("running '%s' in iteration %d with scaling: %f", self.id, iteration, self.scaling(iteration))
raise Exception('implement me')
if self.cache_result:
self.cached_result = computed_result
else:
logging.debug('pickle result to %s', self.pickle_path())
with open(self.pickle_path(), 'wb') as outfile:
pickle.dump(computed_result, outfile)
else:
if self.cache_result and 'cached_result' in dir(self):
computed_result = self.cached_result
elif os.path.exists(self.pickle_path()):
with open(self.pickle_path(), 'rb') as infile:
computed_result = pickle.load(infile)
else:
computed_result = None
self.run_log.log(iteration, self.run_in_iteration(iteration), self.scaling(iteration_result['iteration']))
if computed_result is None:
iteration_result['score_means'][self.id] = 0.0
else:
iteration_result['score_means'][self.id] = computed_result.mean()
return computed_result
|
cmonkey2
|
positive
|
def __init__(self, model_params, dataset_class, vocab_size, vocab):
super().__init__(layers=None, name='Language Modeling Flow')
self.model_params = model_params
self.dataset_class = dataset_class
self.max_seq_len = self.model_params['max_seq_len']
self.vocab_size = vocab_size
self.vocab = vocab
<DeepExtract>
self.latent_dim = self.model_params['categ_encoding']['num_dimensions']
model_func = lambda c_out: AutoregressiveLSTMModel(c_in=self.latent_dim, c_out=c_out, max_seq_len=self.max_seq_len, num_layers=self.model_params['coupling_hidden_layers'], hidden_size=self.model_params['coupling_hidden_size'], dp_rate=self.model_params['coupling_dropout'], input_dp_rate=self.model_params['coupling_input_dropout'])
self.model_params['categ_encoding']['flow_config']['model_func'] = model_func
self.encoding_layer = create_encoding(self.model_params['categ_encoding'], dataset_class=self.dataset_class, vocab_size=self.vocab_size, vocab=self.vocab)
num_flows = self.model_params['coupling_num_flows']
layers = []
for flow_index in range(num_flows):
layers += [ActNormFlow(self.latent_dim)]
if flow_index > 0:
layers += [InvertibleConv(self.latent_dim)]
layers += [AutoregressiveMixtureCDFCoupling(c_in=self.latent_dim, model_func=model_func, block_type='LSTM model', num_mixtures=self.model_params['coupling_num_mixtures'])]
self.flow_layers = nn.ModuleList([self.encoding_layer] + layers)
</DeepExtract>
self.print_overview()
|
def __init__(self, model_params, dataset_class, vocab_size, vocab):
super().__init__(layers=None, name='Language Modeling Flow')
self.model_params = model_params
self.dataset_class = dataset_class
self.max_seq_len = self.model_params['max_seq_len']
self.vocab_size = vocab_size
self.vocab = vocab
self.latent_dim = self.model_params['categ_encoding']['num_dimensions']
model_func = lambda c_out: AutoregressiveLSTMModel(c_in=self.latent_dim, c_out=c_out, max_seq_len=self.max_seq_len, num_layers=self.model_params['coupling_hidden_layers'], hidden_size=self.model_params['coupling_hidden_size'], dp_rate=self.model_params['coupling_dropout'], input_dp_rate=self.model_params['coupling_input_dropout'])
self.model_params['categ_encoding']['flow_config']['model_func'] = model_func
self.encoding_layer = create_encoding(self.model_params['categ_encoding'], dataset_class=self.dataset_class, vocab_size=self.vocab_size, vocab=self.vocab)
num_flows = self.model_params['coupling_num_flows']
layers = []
for flow_index in range(num_flows):
layers += [ActNormFlow(self.latent_dim)]
if flow_index > 0:
layers += [InvertibleConv(self.latent_dim)]
layers += [AutoregressiveMixtureCDFCoupling(c_in=self.latent_dim, model_func=model_func, block_type='LSTM model', num_mixtures=self.model_params['coupling_num_mixtures'])]
self.flow_layers = nn.ModuleList([self.encoding_layer] + layers)
self.print_overview()
|
CategoricalNF
|
positive
|
def get_hint_pep484585_callable_params(hint: object, exception_cls: TypeException=BeartypeDecorHintPep484585Exception, exception_prefix: str='') -> _HINT_PEP484585_CALLABLE_PARAMS:
"""
Object describing all **parameter type hints** (i.e., PEP-compliant child
type hints typing the parameters accepted by a passed or returned callable)
of the passed **callable type hint** (i.e., :pep:`484`-compliant
``typing.Callable[...]`` or :pep:`585`-compliant
``collections.abc.Callable[...]`` type hint).
This getter returns one of several different types of objects, conditionally
depending on the type of the first argument originally subscripting this
hint. Specifically, if this hint was of the form:
* ``Callable[[{arg_hints}], {return_hint}]``, this getter returns a tuple of
the zero or more parameter type hints subscripting (indexing) this hint.
* ``Callable[..., {return_hint}]``, the :data:`Ellipsis` singleton.
* ``Callable[typing.ParamSpec[...], {return_hint}]``, the
``typing.ParamSpec[...]`` subscripting (indexing) this hint.
This getter is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation requires no
iteration and thus exhibits guaranteed constant-time behaviour.
Parameters
----------
hint : object
Callable type hint to be inspected.
exception_cls : TypeException, optional
Type of exception to be raised. Defaults to
:exc:`BeartypeDecorHintPep484585Exception`.
exception_prefix : str, optional
Human-readable substring prefixing the representation of this object in
the exception message. Defaults to the empty string.
Returns
----------
_HINT_PEP484585_CALLABLE_PARAMS
First argument originally subscripting this hint.
Raises
----------
:exc:`exception_cls`
If this hint is *not* a callable type hint.
"""
from beartype._util.hint.pep.utilpepget import get_hint_pep_args, get_hint_pep_sign_or_none
<DeepExtract>
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign
hint_sign = get_hint_pep_sign(hint=hint, exception_cls=exception_cls, exception_prefix=exception_prefix)
if hint_sign is not HintSignCallable:
assert issubclass(exception_cls, Exception), f'{repr(exception_cls)} not exception class.'
assert isinstance(exception_prefix, str), f'{repr(exception_prefix)} not string.'
raise exception_cls(f'{exception_prefix}type hint {repr(hint)} not PEP 484 or 585 callable type hint (i.e., "typing.Callable[...]" or "collections.abc.Callable[...]").')
</DeepExtract>
hint_args = get_hint_pep_args(hint)
hint_params_len = len(hint_args) - 1
if hint_params_len == 0:
return ()
elif hint_params_len >= 2:
return hint_args[:-1]
_TUPLE_EMPTY = ()
hint_param = hint_args[0]
if hint_param is ... or hint_param is _TUPLE_EMPTY:
return hint_param
hint_param_sign = get_hint_pep_sign_or_none(hint_param)
if hint_param_sign in HINT_SIGNS_CALLABLE_PARAMS:
return hint_param
return (hint_param,)
|
def get_hint_pep484585_callable_params(hint: object, exception_cls: TypeException=BeartypeDecorHintPep484585Exception, exception_prefix: str='') -> _HINT_PEP484585_CALLABLE_PARAMS:
"""
Object describing all **parameter type hints** (i.e., PEP-compliant child
type hints typing the parameters accepted by a passed or returned callable)
of the passed **callable type hint** (i.e., :pep:`484`-compliant
``typing.Callable[...]`` or :pep:`585`-compliant
``collections.abc.Callable[...]`` type hint).
This getter returns one of several different types of objects, conditionally
depending on the type of the first argument originally subscripting this
hint. Specifically, if this hint was of the form:
* ``Callable[[{arg_hints}], {return_hint}]``, this getter returns a tuple of
the zero or more parameter type hints subscripting (indexing) this hint.
* ``Callable[..., {return_hint}]``, the :data:`Ellipsis` singleton.
* ``Callable[typing.ParamSpec[...], {return_hint}]``, the
``typing.ParamSpec[...]`` subscripting (indexing) this hint.
This getter is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation requires no
iteration and thus exhibits guaranteed constant-time behaviour.
Parameters
----------
hint : object
Callable type hint to be inspected.
exception_cls : TypeException, optional
Type of exception to be raised. Defaults to
:exc:`BeartypeDecorHintPep484585Exception`.
exception_prefix : str, optional
Human-readable substring prefixing the representation of this object in
the exception message. Defaults to the empty string.
Returns
----------
_HINT_PEP484585_CALLABLE_PARAMS
First argument originally subscripting this hint.
Raises
----------
:exc:`exception_cls`
If this hint is *not* a callable type hint.
"""
from beartype._util.hint.pep.utilpepget import get_hint_pep_args, get_hint_pep_sign_or_none
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign
hint_sign = get_hint_pep_sign(hint=hint, exception_cls=exception_cls, exception_prefix=exception_prefix)
if hint_sign is not HintSignCallable:
assert issubclass(exception_cls, Exception), f'{repr(exception_cls)} not exception class.'
assert isinstance(exception_prefix, str), f'{repr(exception_prefix)} not string.'
raise exception_cls(f'{exception_prefix}type hint {repr(hint)} not PEP 484 or 585 callable type hint (i.e., "typing.Callable[...]" or "collections.abc.Callable[...]").')
hint_args = get_hint_pep_args(hint)
hint_params_len = len(hint_args) - 1
if hint_params_len == 0:
return ()
elif hint_params_len >= 2:
return hint_args[:-1]
_TUPLE_EMPTY = ()
hint_param = hint_args[0]
if hint_param is ... or hint_param is _TUPLE_EMPTY:
return hint_param
hint_param_sign = get_hint_pep_sign_or_none(hint_param)
if hint_param_sign in HINT_SIGNS_CALLABLE_PARAMS:
return hint_param
return (hint_param,)
|
beartype
|
positive
|
def test_when_resource_updated(is_created=False):
def test_when_policy_request_succeeds():
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
simple_dummy_resource.resource_params = {}
exec_logs = []
mock_policy_communicator.request.side_effect = [(200, 'OK'), (200, {'_revision': 1})]
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
if is_created:
expected_message = '%s with id %s created.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
else:
expected_message = '%s with id %s updated.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
expected_exec_logs = [{'changed': True, 'id': simple_dummy_resource.id, 'body': 'OK', 'message': expected_message, 'resource_type': simple_dummy_resource.__class__.__name__}]
self.assertEqual(exec_logs, expected_exec_logs)
def test_when_policy_request_fails():
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
exec_logs = []
mock_policy_communicator.request.return_value = Mock(side_effect=Exception)
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
<DeepExtract>
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
simple_dummy_resource.resource_params = {}
exec_logs = []
mock_policy_communicator.request.side_effect = [(200, 'OK'), (200, {'_revision': 1})]
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
if is_created:
expected_message = '%s with id %s created.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
else:
expected_message = '%s with id %s updated.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
expected_exec_logs = [{'changed': True, 'id': simple_dummy_resource.id, 'body': 'OK', 'message': expected_message, 'resource_type': simple_dummy_resource.__class__.__name__}]
self.assertEqual(exec_logs, expected_exec_logs)
</DeepExtract>
<DeepExtract>
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
exec_logs = []
mock_policy_communicator.request.return_value = Mock(side_effect=Exception)
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
</DeepExtract>
nonlocal policy_communicator_request_call_num
|
def test_when_resource_updated(is_created=False):
def test_when_policy_request_succeeds():
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
simple_dummy_resource.resource_params = {}
exec_logs = []
mock_policy_communicator.request.side_effect = [(200, 'OK'), (200, {'_revision': 1})]
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
if is_created:
expected_message = '%s with id %s created.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
else:
expected_message = '%s with id %s updated.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
expected_exec_logs = [{'changed': True, 'id': simple_dummy_resource.id, 'body': 'OK', 'message': expected_message, 'resource_type': simple_dummy_resource.__class__.__name__}]
self.assertEqual(exec_logs, expected_exec_logs)
def test_when_policy_request_fails():
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
exec_logs = []
mock_policy_communicator.request.return_value = Mock(side_effect=Exception)
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
simple_dummy_resource.resource_params = {}
exec_logs = []
mock_policy_communicator.request.side_effect = [(200, 'OK'), (200, {'_revision': 1})]
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
if is_created:
expected_message = '%s with id %s created.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
else:
expected_message = '%s with id %s updated.' % (simple_dummy_resource.__class__.__name__, simple_dummy_resource.id)
expected_exec_logs = [{'changed': True, 'id': simple_dummy_resource.id, 'body': 'OK', 'message': expected_message, 'resource_type': simple_dummy_resource.__class__.__name__}]
self.assertEqual(exec_logs, expected_exec_logs)
nonlocal policy_communicator_request_call_num
simple_dummy_resource.nsx_resource_params = {'dummy': 'dummy'}
exec_logs = []
mock_policy_communicator.request.return_value = Mock(side_effect=Exception)
simple_dummy_resource._achieve_present_state(exec_logs)
self.assertEqual(mock_policy_communicator.request.call_count, policy_communicator_request_call_num)
policy_communicator_request_call_num += 1
nonlocal policy_communicator_request_call_num
|
ansible-for-nsxt
|
positive
|
def get_reconstructed_adj(self, t, X=None, node_l=None):
if X is not None:
node_num = X.shape[0]
else:
node_num = self._node_num
adj_mtx_r = np.zeros((node_num, node_num))
for v_i in range(node_num):
for v_j in range(node_num):
if v_i == v_j:
continue
<DeepExtract>
try:
feat = np.fabs(self._X[t][v_i, :] - self._X[t][v_j, :])
adj_mtx_r[v_i, v_j] = self._model.predict(np.reshape(feat, [1, -1]))[0]
except:
pdb.set_trace()
</DeepExtract>
return adj_mtx_r
|
def get_reconstructed_adj(self, t, X=None, node_l=None):
if X is not None:
node_num = X.shape[0]
else:
node_num = self._node_num
adj_mtx_r = np.zeros((node_num, node_num))
for v_i in range(node_num):
for v_j in range(node_num):
if v_i == v_j:
continue
try:
feat = np.fabs(self._X[t][v_i, :] - self._X[t][v_j, :])
adj_mtx_r[v_i, v_j] = self._model.predict(np.reshape(feat, [1, -1]))[0]
except:
pdb.set_trace()
return adj_mtx_r
|
DynamicGEM
|
positive
|
def init_rule(self, new_rule, new=True):
""" Copies some necessary non-config state from an exiting rule to a new rule. """
if not new:
self.scheduler.remove_job(job_id=new_rule['name'])
try:
<DeepExtract>
rule_es = elasticsearch_client(new_rule)
if rule_es.is_atleastfive():
new_rule['five'] = True
else:
new_rule['five'] = False
return
new_filters = []
for es_filter in new_rule.get('filter', []):
if es_filter.get('query'):
new_filters.append(es_filter['query'])
else:
new_filters.append(es_filter)
new_rule['filter'] = new_filters
</DeepExtract>
except TransportError as e:
elastalert_logger.warning('Error connecting to Elasticsearch for rule {}. The rule has been disabled.'.format(new_rule['name']))
<DeepExtract>
email_body = text
rule_name = None
if new_rule:
rule_name = new_rule['name']
elif rule_file:
rule_name = rule_file
if e and rule_name:
if not subject:
subject = 'Uncaught exception in ElastAlert - %s' % rule_name
email_body += '\n\n'
email_body += 'The rule %s has raised an uncaught exception.\n\n' % rule_name
if self.disable_rules_on_error:
modified = ' or if the rule config file has been modified' if not self.args.pin_rules else ''
email_body += 'It has been disabled and will be re-enabled when ElastAlert restarts%s.\n\n' % modified
tb = traceback.format_exc()
email_body += tb
if isinstance(self.notify_email, str):
self.notify_email = [self.notify_email]
email = MIMEText(email_body)
email['Subject'] = subject if subject else 'ElastAlert notification'
recipients = self.notify_email
if new_rule and new_rule.get('notify_email'):
if isinstance(new_rule['notify_email'], str):
new_rule['notify_email'] = [new_rule['notify_email']]
recipients = recipients + new_rule['notify_email']
recipients = list(set(recipients))
email['To'] = ', '.join(recipients)
email['From'] = self.from_addr
email['Reply-To'] = self.conf.get('email_reply_to', email['To'])
try:
smtp = SMTP(self.smtp_host)
smtp.sendmail(self.from_addr, recipients, email.as_string())
except (SMTPException, error) as e:
self.handle_error('Error connecting to SMTP host: %s' % e, {'email_body': email_body})
</DeepExtract>
return False
<DeepExtract>
if not new_rule.get('filter_by_list', True):
return
if 'blacklist' in new_rule:
listname = 'blacklist'
elif 'whitelist' in new_rule:
listname = 'whitelist'
else:
return
filters = new_rule['filter']
additional_terms = []
for term in new_rule[listname]:
if not term.startswith('/') or not term.endswith('/'):
additional_terms.append(new_rule['compare_key'] + ':"' + term + '"')
else:
additional_terms.append(new_rule['compare_key'] + ':' + term)
if listname == 'whitelist':
query = 'NOT ' + ' AND NOT '.join(additional_terms)
else:
query = ' OR '.join(additional_terms)
query_str_filter = {'query_string': {'query': query}}
if self.writeback_es.is_atleastfive():
filters.append(query_str_filter)
else:
filters.append({'query': query_str_filter})
logging.debug('Enhanced filter with {} terms: {}'.format(listname, str(query_str_filter)))
</DeepExtract>
if 'top_count_keys' in new_rule and new_rule.get('raw_count_keys', True):
if self.string_multi_field_name:
string_multi_field_name = self.string_multi_field_name
elif self.writeback_es.is_atleastfive():
string_multi_field_name = '.keyword'
else:
string_multi_field_name = '.raw'
for (i, key) in enumerate(new_rule['top_count_keys']):
if not key.endswith(string_multi_field_name):
new_rule['top_count_keys'][i] += string_multi_field_name
if 'download_dashboard' in new_rule['filter']:
<DeepExtract>
try:
db = new_rule.get('dashboard_schema')
if not db:
db = self.get_dashboard(new_rule, new_rule['filter']['download_dashboard'])
filters = kibana.filters_from_dashboard(db)
except EAException:
db_filters = None
db_filters = filters
</DeepExtract>
if db_filters is not None:
new_rule['filter'] = db_filters
else:
raise EAException('Could not download filters from %s' % new_rule['filter']['download_dashboard'])
blank_rule = {'agg_matches': [], 'aggregate_alert_time': {}, 'current_aggregate_id': {}, 'processed_hits': {}, 'run_every': self.run_every, 'has_run_once': False}
rule = blank_rule
if not new:
for rule in self.rules:
if rule['name'] == new_rule['name']:
break
else:
rule = blank_rule
copy_properties = ['agg_matches', 'current_aggregate_id', 'aggregate_alert_time', 'processed_hits', 'starttime', 'minimum_starttime', 'has_run_once']
for prop in copy_properties:
if prop not in rule:
continue
new_rule[prop] = rule[prop]
job = self.scheduler.add_job(self.handle_rule_execution, 'interval', args=[new_rule], seconds=new_rule['run_every'].total_seconds(), id=new_rule['name'], max_instances=1, jitter=5)
job.modify(next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=random.randint(0, 15)))
return new_rule
|
def init_rule(self, new_rule, new=True):
""" Copies some necessary non-config state from an exiting rule to a new rule. """
if not new:
self.scheduler.remove_job(job_id=new_rule['name'])
try:
rule_es = elasticsearch_client(new_rule)
if rule_es.is_atleastfive():
new_rule['five'] = True
else:
new_rule['five'] = False
return
new_filters = []
for es_filter in new_rule.get('filter', []):
if es_filter.get('query'):
new_filters.append(es_filter['query'])
else:
new_filters.append(es_filter)
new_rule['filter'] = new_filters
except TransportError as e:
elastalert_logger.warning('Error connecting to Elasticsearch for rule {}. The rule has been disabled.'.format(new_rule['name']))
email_body = text
rule_name = None
if new_rule:
rule_name = new_rule['name']
elif rule_file:
rule_name = rule_file
if e and rule_name:
if not subject:
subject = 'Uncaught exception in ElastAlert - %s' % rule_name
email_body += '\n\n'
email_body += 'The rule %s has raised an uncaught exception.\n\n' % rule_name
if self.disable_rules_on_error:
modified = ' or if the rule config file has been modified' if not self.args.pin_rules else ''
email_body += 'It has been disabled and will be re-enabled when ElastAlert restarts%s.\n\n' % modified
tb = traceback.format_exc()
email_body += tb
if isinstance(self.notify_email, str):
self.notify_email = [self.notify_email]
email = MIMEText(email_body)
email['Subject'] = subject if subject else 'ElastAlert notification'
recipients = self.notify_email
if new_rule and new_rule.get('notify_email'):
if isinstance(new_rule['notify_email'], str):
new_rule['notify_email'] = [new_rule['notify_email']]
recipients = recipients + new_rule['notify_email']
recipients = list(set(recipients))
email['To'] = ', '.join(recipients)
email['From'] = self.from_addr
email['Reply-To'] = self.conf.get('email_reply_to', email['To'])
try:
smtp = SMTP(self.smtp_host)
smtp.sendmail(self.from_addr, recipients, email.as_string())
except (SMTPException, error) as e:
self.handle_error('Error connecting to SMTP host: %s' % e, {'email_body': email_body})
return False
if not new_rule.get('filter_by_list', True):
return
if 'blacklist' in new_rule:
listname = 'blacklist'
elif 'whitelist' in new_rule:
listname = 'whitelist'
else:
return
filters = new_rule['filter']
additional_terms = []
for term in new_rule[listname]:
if not term.startswith('/') or not term.endswith('/'):
additional_terms.append(new_rule['compare_key'] + ':"' + term + '"')
else:
additional_terms.append(new_rule['compare_key'] + ':' + term)
if listname == 'whitelist':
query = 'NOT ' + ' AND NOT '.join(additional_terms)
else:
query = ' OR '.join(additional_terms)
query_str_filter = {'query_string': {'query': query}}
if self.writeback_es.is_atleastfive():
filters.append(query_str_filter)
else:
filters.append({'query': query_str_filter})
logging.debug('Enhanced filter with {} terms: {}'.format(listname, str(query_str_filter)))
if 'top_count_keys' in new_rule and new_rule.get('raw_count_keys', True):
if self.string_multi_field_name:
string_multi_field_name = self.string_multi_field_name
elif self.writeback_es.is_atleastfive():
string_multi_field_name = '.keyword'
else:
string_multi_field_name = '.raw'
for (i, key) in enumerate(new_rule['top_count_keys']):
if not key.endswith(string_multi_field_name):
new_rule['top_count_keys'][i] += string_multi_field_name
if 'download_dashboard' in new_rule['filter']:
try:
db = new_rule.get('dashboard_schema')
if not db:
db = self.get_dashboard(new_rule, new_rule['filter']['download_dashboard'])
filters = kibana.filters_from_dashboard(db)
except EAException:
db_filters = None
db_filters = filters
if db_filters is not None:
new_rule['filter'] = db_filters
else:
raise EAException('Could not download filters from %s' % new_rule['filter']['download_dashboard'])
blank_rule = {'agg_matches': [], 'aggregate_alert_time': {}, 'current_aggregate_id': {}, 'processed_hits': {}, 'run_every': self.run_every, 'has_run_once': False}
rule = blank_rule
if not new:
for rule in self.rules:
if rule['name'] == new_rule['name']:
break
else:
rule = blank_rule
copy_properties = ['agg_matches', 'current_aggregate_id', 'aggregate_alert_time', 'processed_hits', 'starttime', 'minimum_starttime', 'has_run_once']
for prop in copy_properties:
if prop not in rule:
continue
new_rule[prop] = rule[prop]
job = self.scheduler.add_job(self.handle_rule_execution, 'interval', args=[new_rule], seconds=new_rule['run_every'].total_seconds(), id=new_rule['name'], max_instances=1, jitter=5)
job.modify(next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=random.randint(0, 15)))
return new_rule
|
elastalert
|
positive
|
def get_entry_with_id(self, id, args):
<DeepExtract>
columns = '*'
if with_rowid:
columns = 'rowid, ' + columns
query = 'SELECT {columns} FROM {table_name} WHERE rowid = ?;'.format(table_name=DataAccess.TABLE_PAIRS, columns=columns)
pair = self.db.execute(query, (id,)).fetchone()
</DeepExtract>
patient_id = pair[0]
result = {'pair': pair}
return (patient_id, result)
|
def get_entry_with_id(self, id, args):
columns = '*'
if with_rowid:
columns = 'rowid, ' + columns
query = 'SELECT {columns} FROM {table_name} WHERE rowid = ?;'.format(table_name=DataAccess.TABLE_PAIRS, columns=columns)
pair = self.db.execute(query, (id,)).fetchone()
patient_id = pair[0]
result = {'pair': pair}
return (patient_id, result)
|
drnet
|
positive
|
def setUp(self):
<DeepExtract>
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {'host_file_separator': ','}
config['cassandra'] = {'config_file': os.path.join(os.path.dirname(__file__), 'resources/yaml/work/cassandra_with_tokens_and_autobootstrap.yaml'), 'start_cmd': '/etc/init.d/cassandra start', 'stop_cmd': '/etc/init.d/cassandra stop', 'is_ccm': '1', 'resolve_ip_addresses': 'True', 'use_sudo': 'True'}
config['grpc'] = {'enabled': '0'}
config['kubernetes'] = {'enabled': '0', 'cassandra_url': 'None', 'use_mgmt_api': 'False'}
self.config = config
</DeepExtract>
self.medusa_config = MedusaConfig(file_path=None, storage=_namedtuple_from_dict(StorageConfig, self.config['storage']), monitoring={}, cassandra=_namedtuple_from_dict(CassandraConfig, self.config['cassandra']), ssh=None, checks=None, logging=None, grpc=_namedtuple_from_dict(GrpcConfig, self.config['grpc']), kubernetes=_namedtuple_from_dict(KubernetesConfig, self.config['kubernetes']))
self.tmp_dir = pathlib.Path(tempfile.gettempdir())
self.default_restore_job = RestoreJob(cluster_backup=Mock(), config=self.medusa_config, temp_dir=self.tmp_dir, host_list=None, seed_target=None, keep_auth=False, verify=False, parallel_restores=None)
|
def setUp(self):
config = configparser.ConfigParser(interpolation=None)
config['storage'] = {'host_file_separator': ','}
config['cassandra'] = {'config_file': os.path.join(os.path.dirname(__file__), 'resources/yaml/work/cassandra_with_tokens_and_autobootstrap.yaml'), 'start_cmd': '/etc/init.d/cassandra start', 'stop_cmd': '/etc/init.d/cassandra stop', 'is_ccm': '1', 'resolve_ip_addresses': 'True', 'use_sudo': 'True'}
config['grpc'] = {'enabled': '0'}
config['kubernetes'] = {'enabled': '0', 'cassandra_url': 'None', 'use_mgmt_api': 'False'}
self.config = config
self.medusa_config = MedusaConfig(file_path=None, storage=_namedtuple_from_dict(StorageConfig, self.config['storage']), monitoring={}, cassandra=_namedtuple_from_dict(CassandraConfig, self.config['cassandra']), ssh=None, checks=None, logging=None, grpc=_namedtuple_from_dict(GrpcConfig, self.config['grpc']), kubernetes=_namedtuple_from_dict(KubernetesConfig, self.config['kubernetes']))
self.tmp_dir = pathlib.Path(tempfile.gettempdir())
self.default_restore_job = RestoreJob(cluster_backup=Mock(), config=self.medusa_config, temp_dir=self.tmp_dir, host_list=None, seed_target=None, keep_auth=False, verify=False, parallel_restores=None)
|
cassandra-medusa
|
positive
|
def validate(self, value):
value = super().validate(value)
if not self.indexed or value is None:
return value
if not self.repeated:
<DeepExtract>
if len(value) > _max_indexed_length and len(value.encode(self.encoding)) > _max_indexed_length:
raise ValueError(f'String value is longer than the maximum allowed length ({_max_indexed_length}) for indexed properties. Set indexed to False if the value should not be indexed.')
</DeepExtract>
return value
|
def validate(self, value):
value = super().validate(value)
if not self.indexed or value is None:
return value
if not self.repeated:
if len(value) > _max_indexed_length and len(value.encode(self.encoding)) > _max_indexed_length:
raise ValueError(f'String value is longer than the maximum allowed length ({_max_indexed_length}) for indexed properties. Set indexed to False if the value should not be indexed.')
return value
|
anom-py
|
positive
|
def fit(self, X, y=None, sample_weight=None):
"""Applies the chainladder technique to triangle **X**
Parameters
----------
X : Triangle
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Ignored
sample_weight : Triangle
For exposure-based methods, the exposure to be used for fitting
Returns
-------
self : object
Returns the instance itself.
"""
<DeepExtract>
obj = X.copy()
if 'ldf_' not in obj:
obj = Development().fit_transform(obj)
if len(obj.ddims) - len(obj.ldf_.ddims) == 1:
obj = TailConstant().fit_transform(obj)
self.X_ = obj.val_to_dev()
</DeepExtract>
<DeepExtract>
if sample_weight and X.shape[:-1] != sample_weight.shape[:-1] and (sample_weight.shape[2] != 1) and (sample_weight.shape[0] > 1):
warnings.warn('X and sample_weight are not aligned. Broadcasting may occur.\n')
</DeepExtract>
if sample_weight:
self.sample_weight_ = sample_weight.set_backend(self.X_.array_backend)
else:
self.sample_weight_ = sample_weight
return self
|
def fit(self, X, y=None, sample_weight=None):
"""Applies the chainladder technique to triangle **X**
Parameters
----------
X : Triangle
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Ignored
sample_weight : Triangle
For exposure-based methods, the exposure to be used for fitting
Returns
-------
self : object
Returns the instance itself.
"""
obj = X.copy()
if 'ldf_' not in obj:
obj = Development().fit_transform(obj)
if len(obj.ddims) - len(obj.ldf_.ddims) == 1:
obj = TailConstant().fit_transform(obj)
self.X_ = obj.val_to_dev()
if sample_weight and X.shape[:-1] != sample_weight.shape[:-1] and (sample_weight.shape[2] != 1) and (sample_weight.shape[0] > 1):
warnings.warn('X and sample_weight are not aligned. Broadcasting may occur.\n')
if sample_weight:
self.sample_weight_ = sample_weight.set_backend(self.X_.array_backend)
else:
self.sample_weight_ = sample_weight
return self
|
chainladder-python
|
positive
|
def run_model_cfg(args, im, check_blobs):
workspace.ResetWorkspace()
<DeepExtract>
model = test_engine.initialize_model_from_cfg(cfg.TEST.WEIGHTS)
blobs = mutils.get_ws_blobs()
(model, _) = (model, blobs)
</DeepExtract>
with c2_utils.NamedCudaScope(0):
(cls_boxes, cls_segms, cls_keyps) = test_engine.im_detect_all(model, im, None, None)
(boxes, segms, keypoints, classes) = vis_utils.convert_from_cls_format(cls_boxes, cls_segms, cls_keyps)
<DeepExtract>
indices = np.argsort(boxes[:, -1])[::-1]
if boxes is not None:
boxes = boxes[indices, :]
if segms is not None:
segms = [segms[x] for x in indices]
if keypoints is not None:
keypoints = [keypoints[x] for x in indices]
if classes is not None:
if isinstance(classes, list):
classes = [classes[x] for x in indices]
else:
classes = classes[indices]
(boxes, segms, keypoints, classes) = (boxes, segms, keypoints, classes)
</DeepExtract>
def _ornone(res):
return np.array(res) if res is not None else np.array([], dtype=np.float32)
with c2_utils.NamedCudaScope(0):
workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))
with c2_utils.NamedCudaScope(0):
<DeepExtract>
ret = {}
for x in check_blobs:
sn = core.ScopedName(x)
if workspace.HasBlob(sn):
ret[x] = workspace.FetchBlob(sn)
else:
ret[x] = None
ret = ret
</DeepExtract>
return ret
|
def run_model_cfg(args, im, check_blobs):
workspace.ResetWorkspace()
model = test_engine.initialize_model_from_cfg(cfg.TEST.WEIGHTS)
blobs = mutils.get_ws_blobs()
(model, _) = (model, blobs)
with c2_utils.NamedCudaScope(0):
(cls_boxes, cls_segms, cls_keyps) = test_engine.im_detect_all(model, im, None, None)
(boxes, segms, keypoints, classes) = vis_utils.convert_from_cls_format(cls_boxes, cls_segms, cls_keyps)
indices = np.argsort(boxes[:, -1])[::-1]
if boxes is not None:
boxes = boxes[indices, :]
if segms is not None:
segms = [segms[x] for x in indices]
if keypoints is not None:
keypoints = [keypoints[x] for x in indices]
if classes is not None:
if isinstance(classes, list):
classes = [classes[x] for x in indices]
else:
classes = classes[indices]
(boxes, segms, keypoints, classes) = (boxes, segms, keypoints, classes)
def _ornone(res):
return np.array(res) if res is not None else np.array([], dtype=np.float32)
with c2_utils.NamedCudaScope(0):
workspace.FeedBlob(core.ScopedName('result_boxes'), _ornone(boxes))
workspace.FeedBlob(core.ScopedName('result_segms'), _ornone(segms))
workspace.FeedBlob(core.ScopedName('result_keypoints'), _ornone(keypoints))
workspace.FeedBlob(core.ScopedName('result_classids'), _ornone(classes))
with c2_utils.NamedCudaScope(0):
ret = {}
for x in check_blobs:
sn = core.ScopedName(x)
if workspace.HasBlob(sn):
ret[x] = workspace.FetchBlob(sn)
else:
ret[x] = None
ret = ret
return ret
|
AIC2018_iamai
|
positive
|
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
<DeepExtract>
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
</DeepExtract>
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat((xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
|
def convert(self, mode):
if mode not in ('xyxy', 'xywh'):
raise ValueError("mode should be 'xyxy' or 'xywh'")
if mode == self.mode:
return self
if self.mode == 'xyxy':
(xmin, ymin, xmax, ymax) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmax, ymax)
elif self.mode == 'xywh':
TO_REMOVE = 1
(xmin, ymin, w, h) = self.bbox.split(1, dim=-1)
(xmin, ymin, xmax, ymax) = (xmin, ymin, xmin + (w - TO_REMOVE).clamp(min=0), ymin + (h - TO_REMOVE).clamp(min=0))
else:
raise RuntimeError('Should not be here')
if mode == 'xyxy':
bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
else:
TO_REMOVE = 1
bbox = torch.cat((xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1)
bbox = BoxList(bbox, self.size, mode=mode)
bbox._copy_extra_fields(self)
return bbox
|
DetNAS
|
positive
|
def _generate_file_set(self, var=None, start_date=None, end_date=None, domain=None, intvl_in=None, dtype_in_vert=None, dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
<DeepExtract>
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(name, domain, dtype, intvl_in, year, intvl_out, data_start_year, self.data_dur)) for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
file_set = files
</DeepExtract>
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located using GFDL post-processing conventions. Attempted using the following sets of paths:\n\n{1}'.format(var, pprint.pformat(attempted_file_sets)))
|
def _generate_file_set(self, var=None, start_date=None, end_date=None, domain=None, intvl_in=None, dtype_in_vert=None, dtype_in_time=None, intvl_out=None):
attempted_file_sets = []
for name in var.names:
dtype_lbl = dtype_in_time
if intvl_in == 'daily':
domain += '_daily'
if dtype_in_vert == ETA_STR and name != 'ps':
domain += '_level'
if dtype_in_time == 'inst':
domain += '_inst'
dtype_lbl = 'ts'
if 'monthly_from_' in dtype_in_time:
dtype = dtype_in_time.replace('monthly_from_', '')
dtype_lbl = dtype
else:
dtype = dtype_in_time
dur_str = str(self.data_dur) + 'yr'
if dtype_in_time == 'av':
subdir = intvl_in + '_' + dur_str
else:
subdir = os.path.join(intvl_in, dur_str)
direc = os.path.join(self.data_direc, domain, dtype_lbl, subdir)
data_start_year = times.infer_year(self.data_start_date)
start_year = times.infer_year(start_date)
end_year = times.infer_year(end_date)
files = [os.path.join(direc, io.data_name_gfdl(name, domain, dtype, intvl_in, year, intvl_out, data_start_year, self.data_dur)) for year in range(start_year, end_year + 1)]
files = list(set(files))
files.sort()
file_set = files
attempted_file_sets.append(file_set)
if all([os.path.isfile(filename) for filename in file_set]):
return file_set
raise IOError('Files for the var {0} cannot be located using GFDL post-processing conventions. Attempted using the following sets of paths:\n\n{1}'.format(var, pprint.pformat(attempted_file_sets)))
|
aospy
|
positive
|
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' % src_dir)
fhindex.write('\n\n\n%s\n\n\n' % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
<DeepExtract>
new_list = [x for x in os.listdir(src_dir) if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for (count, exmpl) in enumerate(new_list):
(docstr_lines, total_lines) = extract_line_count(exmpl, src_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str), unsorted[:, 1].astype(np.float)))
if not len(unsorted):
sorted_listdir = []
sorted_listdir = np.array(unsorted[index][:, 0]).tolist()
</DeepExtract>
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
<DeepExtract>
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
my_stdout = my_stdout.replace(my_globals['__doc__'], '')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % '\n '.join(my_stdout.split('\n'))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(' - time elapsed : %.2g sec' % time_elapsed)
else:
figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace('%03d', '[0-9][0-9][0-9]'))]
figure_list.sort()
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail(image_path % carousel_thumbs[first_img][0], carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
make_thumbnail('source/_static/no_image.png', thumb_file, 200, 140)
(docstring, short_desc, end_row) = extract_docstring(example_file)
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
(time_m, time_s) = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set(('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')))
backrefs = backrefs
</DeepExtract>
new_fname = os.path.join(src_dir, fname)
<DeepExtract>
if six.PY2:
lines = open(new_fname).readlines()
else:
lines = open(new_fname, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda : next(line_iterator))
for (tok_type, tok_content, _, (erow, _), _) in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
paragraphs = '\n'.join((line.rstrip() for line in docstring.split('\n'))).split('\n\n')
if paragraphs:
if True:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = first_par[:95] + '...' if len(first_par) > 95 else first_par
else:
raise ValueError("Docstring not found by gallery.\nPlease check the layout of your example file:\n {}\n and make sure it's correct".format(new_fname))
else:
first_par = paragraphs[0]
break
(_, snippet, _) = (docstring, first_par, erow + 1 + start_row)
</DeepExtract>
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write('\n\n.. toctree::\n :hidden:\n\n %s/%s\n\n' % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)), file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write('\n.. raw:: html\n\n <div class="clearer"></div>\n ')
|
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' % src_dir)
fhindex.write('\n\n\n%s\n\n\n' % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
new_list = [x for x in os.listdir(src_dir) if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for (count, exmpl) in enumerate(new_list):
(docstr_lines, total_lines) = extract_line_count(exmpl, src_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str), unsorted[:, 1].astype(np.float)))
if not len(unsorted):
sorted_listdir = []
sorted_listdir = np.array(unsorted[index][:, 0]).tolist()
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
my_stdout = my_stdout.replace(my_globals['__doc__'], '')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % '\n '.join(my_stdout.split('\n'))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(' - time elapsed : %.2g sec' % time_elapsed)
else:
figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace('%03d', '[0-9][0-9][0-9]'))]
figure_list.sort()
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail(image_path % carousel_thumbs[first_img][0], carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
make_thumbnail('source/_static/no_image.png', thumb_file, 200, 140)
(docstring, short_desc, end_row) = extract_docstring(example_file)
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
(time_m, time_s) = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set(('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')))
backrefs = backrefs
new_fname = os.path.join(src_dir, fname)
if six.PY2:
lines = open(new_fname).readlines()
else:
lines = open(new_fname, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda : next(line_iterator))
for (tok_type, tok_content, _, (erow, _), _) in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
paragraphs = '\n'.join((line.rstrip() for line in docstring.split('\n'))).split('\n\n')
if paragraphs:
if True:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = first_par[:95] + '...' if len(first_par) > 95 else first_par
else:
raise ValueError("Docstring not found by gallery.\nPlease check the layout of your example file:\n {}\n and make sure it's correct".format(new_fname))
else:
first_par = paragraphs[0]
break
(_, snippet, _) = (docstring, first_par, erow + 1 + start_row)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write('\n\n.. toctree::\n :hidden:\n\n %s/%s\n\n' % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)), file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write('\n.. raw:: html\n\n <div class="clearer"></div>\n ')
|
acoular
|
positive
|
def __init__(self, objectListView, rowIndex, subItemIndex, rowModel, cellValue, cellBounds, editor):
CellEditEvent.__init__(self, olv_EVT_CELL_EDIT_STARTED)
<DeepExtract>
self.objectListView = objectListView
self.rowIndex = rowIndex
self.subItemIndex = subItemIndex
self.rowModel = rowModel
self.cellValue = cellValue
self.editor = editor
</DeepExtract>
self.cellBounds = cellBounds
|
def __init__(self, objectListView, rowIndex, subItemIndex, rowModel, cellValue, cellBounds, editor):
CellEditEvent.__init__(self, olv_EVT_CELL_EDIT_STARTED)
self.objectListView = objectListView
self.rowIndex = rowIndex
self.subItemIndex = subItemIndex
self.rowModel = rowModel
self.cellValue = cellValue
self.editor = editor
self.cellBounds = cellBounds
|
bookhub
|
positive
|
def create_doc_uniq_target_variable_at_least_specific_coef_variation(self, nbr_val, coef_var_min, max_difference, default_value):
<DeepExtract>
list_nbr_documents = np.random.randint(default_value - max_difference, default_value + max_difference + 1, size=nbr_val)
while list_nbr_documents.std() / list_nbr_documents.mean() < coef_var_min:
index = np.argmax(list_nbr_documents)
if list_nbr_documents[index] < list_nbr_documents.mean():
list_nbr_documents[index] -= 1
else:
list_nbr_documents[index] += 1
nbr_doc_to_generate = list_nbr_documents
</DeepExtract>
return self.generate_doc_time_variable_sensitivity(nbr_doc_to_generate)
|
def create_doc_uniq_target_variable_at_least_specific_coef_variation(self, nbr_val, coef_var_min, max_difference, default_value):
list_nbr_documents = np.random.randint(default_value - max_difference, default_value + max_difference + 1, size=nbr_val)
while list_nbr_documents.std() / list_nbr_documents.mean() < coef_var_min:
index = np.argmax(list_nbr_documents)
if list_nbr_documents[index] < list_nbr_documents.mean():
list_nbr_documents[index] -= 1
else:
list_nbr_documents[index] += 1
nbr_doc_to_generate = list_nbr_documents
return self.generate_doc_time_variable_sensitivity(nbr_doc_to_generate)
|
ee-outliers
|
positive
|
def parse_document_content(self):
if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken):
<DeepExtract>
event = ScalarEvent(None, None, (True, False), u'', self.peek_token().start_mark, self.peek_token().start_mark)
</DeepExtract>
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
|
def parse_document_content(self):
if self.check_token(DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken):
event = ScalarEvent(None, None, (True, False), u'', self.peek_token().start_mark, self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
|
aws-serverless-workshop-greater-china-region
|
positive
|
@checks(ABINARY2, uintpair)
def r_opening(image, size, origin=0):
"""Opening with rectangular structuring element using maximum/minimum filter"""
<DeepExtract>
assert image.dtype == 'B' or image.dtype == 'i' or image.dtype == dtype('bool'), 'array should be binary, is %s %s' % (image.dtype, image.shape)
assert amin(image) >= 0 and amax(image) <= 1, 'array should be binary, has values %g to %g' % (amin(image), amax(image))
</DeepExtract>
<DeepExtract>
image = filters.minimum_filter(image, size, origin=origin)
</DeepExtract>
return r_dilation(image, size, origin=origin)
|
@checks(ABINARY2, uintpair)
def r_opening(image, size, origin=0):
"""Opening with rectangular structuring element using maximum/minimum filter"""
assert image.dtype == 'B' or image.dtype == 'i' or image.dtype == dtype('bool'), 'array should be binary, is %s %s' % (image.dtype, image.shape)
assert amin(image) >= 0 and amax(image) <= 1, 'array should be binary, has values %g to %g' % (amin(image), amax(image))
image = filters.minimum_filter(image, size, origin=origin)
return r_dilation(image, size, origin=origin)
|
deep_ocr
|
positive
|
def _process_next(node):
children = []
_node = node
while True:
next_node = _node.find('next')
if next_node is None:
break
_children = next_node.getchildren()
if len(_children) != 1:
raise BlocklyXmlParserException('Incorrect number of children ({}) for BlocklyXmlParser._process_next()'.format(len(_children)))
_node = _children[0]
children.append(_node)
data = {'children': [self._call_method(node)], 'data': {}}
if children:
<DeepExtract>
if 'children' not in data:
data['children'] = []
for child in None.getchildren() if children is None else children:
child_data = self.visit(child)
if child_data is not None:
data['children'].append(child_data)
</DeepExtract>
return data
|
def _process_next(node):
children = []
_node = node
while True:
next_node = _node.find('next')
if next_node is None:
break
_children = next_node.getchildren()
if len(_children) != 1:
raise BlocklyXmlParserException('Incorrect number of children ({}) for BlocklyXmlParser._process_next()'.format(len(_children)))
_node = _children[0]
children.append(_node)
data = {'children': [self._call_method(node)], 'data': {}}
if children:
if 'children' not in data:
data['children'] = []
for child in None.getchildren() if children is None else children:
child_data = self.visit(child)
if child_data is not None:
data['children'].append(child_data)
return data
|
django-business-logic
|
positive
|
def test_serializationErrorTraceback(self):
"""
If serialization fails in L{Logger.write}, a traceback is logged,
along with a C{eliot:serialization_failure} message for debugging
purposes.
"""
<DeepExtract>
logger = Logger()
logger._destinations = Destinations()
written = []
logger._destinations.add(written.append)
(logger, written) = (logger, written)
</DeepExtract>
def raiser(i):
raise RuntimeError('oops')
serializer = _MessageSerializer([Field.forValue('message_type', 'mymessage', 'The type'), Field('fail', raiser, 'Serialization fail')])
message = {'message_type': 'mymessage', 'fail': 'will'}
logger.write(message, serializer)
self.assertEqual(len(written), 2)
tracebackMessage = written[0]
assertContainsFields(self, tracebackMessage, {'exception': '%s.RuntimeError' % (RuntimeError.__module__,), 'message_type': 'eliot:traceback'})
self.assertIn('RuntimeError: oops', tracebackMessage['traceback'])
assertContainsFields(self, written[1], {'message_type': 'eliot:serialization_failure'})
self.assertEqual(eval(written[1]['message']), dict(((repr(key), repr(value)) for (key, value) in message.items())))
|
def test_serializationErrorTraceback(self):
"""
If serialization fails in L{Logger.write}, a traceback is logged,
along with a C{eliot:serialization_failure} message for debugging
purposes.
"""
logger = Logger()
logger._destinations = Destinations()
written = []
logger._destinations.add(written.append)
(logger, written) = (logger, written)
def raiser(i):
raise RuntimeError('oops')
serializer = _MessageSerializer([Field.forValue('message_type', 'mymessage', 'The type'), Field('fail', raiser, 'Serialization fail')])
message = {'message_type': 'mymessage', 'fail': 'will'}
logger.write(message, serializer)
self.assertEqual(len(written), 2)
tracebackMessage = written[0]
assertContainsFields(self, tracebackMessage, {'exception': '%s.RuntimeError' % (RuntimeError.__module__,), 'message_type': 'eliot:traceback'})
self.assertIn('RuntimeError: oops', tracebackMessage['traceback'])
assertContainsFields(self, written[1], {'message_type': 'eliot:serialization_failure'})
self.assertEqual(eval(written[1]['message']), dict(((repr(key), repr(value)) for (key, value) in message.items())))
|
eliot
|
positive
|
def read_line(self):
"""
Reads the next line of `self._current_file` and
returns it without the trailing new line (`
`)
(and any trailing whitespaces).
If the file was entirely read, closes it and
continues to read the file that was previously being read (if any).
Returns `None` if there is no file left to read.
"""
line = self._current_file.readline()
while line == '':
<DeepExtract>
if self._current_file is not None:
self._current_file.close()
if len(self._opened_files) > 0:
self._current_file = self._opened_files.pop()
else:
self._current_file = None
</DeepExtract>
if self._current_file is None:
return None
line = self._current_file.readline()
line = line.rstrip()
self._last_read_line = line
return line
|
def read_line(self):
"""
Reads the next line of `self._current_file` and
returns it without the trailing new line (`
`)
(and any trailing whitespaces).
If the file was entirely read, closes it and
continues to read the file that was previously being read (if any).
Returns `None` if there is no file left to read.
"""
line = self._current_file.readline()
while line == '':
if self._current_file is not None:
self._current_file.close()
if len(self._opened_files) > 0:
self._current_file = self._opened_files.pop()
else:
self._current_file = None
if self._current_file is None:
return None
line = self._current_file.readline()
line = line.rstrip()
self._last_read_line = line
return line
|
Chatette
|
positive
|
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
<DeepExtract>
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' % (path.__class__, path))
</DeepExtract>
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket but got %s' % path)
|
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' % (path.__class__, path))
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket but got %s' % path)
|
appengine-gcs-blobstore-python
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.