before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def test_getitem():
<DeepExtract>
global _current_test
_current_test = 'a = b[c]'
if ast_text is None:
ast_text = 'a = b[c]'
ast = parse(ast_text)
code = compile('a = b[c]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c:]'
if ast_text is None:
ast_text = 'a = b[c:]'
ast = parse(ast_text)
code = compile('a = b[c:]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[:c]'
if ast_text is None:
ast_text = 'a = b[:c]'
ast = parse(ast_text)
code = compile('a = b[:c]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c::]'
if ast_text is None:
ast_text = 'a = b[c::]'
ast = parse(ast_text)
code = compile('a = b[c::]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c:d]'
if ast_text is None:
ast_text = 'a = b[c:d]'
ast = parse(ast_text)
code = compile('a = b[c:d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c:d:e]'
if ast_text is None:
ast_text = 'a = b[c:d:e]'
ast = parse(ast_text)
code = compile('a = b[c:d:e]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c, d]'
if ast_text is None:
ast_text = 'a = b[c, d]'
ast = parse(ast_text)
code = compile('a = b[c, d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c:, d]'
if ast_text is None:
ast_text = 'a = b[c:, d]'
ast = parse(ast_text)
code = compile('a = b[c:, d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c:d:e, f:g:h, i:j:k]'
if ast_text is None:
ast_text = 'a = b[c:d:e, f:g:h, i:j:k]'
ast = parse(ast_text)
code = compile('a = b[c:d:e, f:g:h, i:j:k]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
<DeepExtract>
global _current_test
_current_test = 'a = b[c + d][e]'
if ast_text is None:
ast_text = 'a = b[c + d][e]'
ast = parse(ast_text)
code = compile('a = b[c + d][e]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
|
def test_getitem():
global _current_test
_current_test = 'a = b[c]'
if ast_text is None:
ast_text = 'a = b[c]'
ast = parse(ast_text)
code = compile('a = b[c]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c:]'
if ast_text is None:
ast_text = 'a = b[c:]'
ast = parse(ast_text)
code = compile('a = b[c:]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[:c]'
if ast_text is None:
ast_text = 'a = b[:c]'
ast = parse(ast_text)
code = compile('a = b[:c]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c::]'
if ast_text is None:
ast_text = 'a = b[c::]'
ast = parse(ast_text)
code = compile('a = b[c::]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c:d]'
if ast_text is None:
ast_text = 'a = b[c:d]'
ast = parse(ast_text)
code = compile('a = b[c:d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c:d:e]'
if ast_text is None:
ast_text = 'a = b[c:d:e]'
ast = parse(ast_text)
code = compile('a = b[c:d:e]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c, d]'
if ast_text is None:
ast_text = 'a = b[c, d]'
ast = parse(ast_text)
code = compile('a = b[c, d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c:, d]'
if ast_text is None:
ast_text = 'a = b[c:, d]'
ast = parse(ast_text)
code = compile('a = b[c:, d]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c:d:e, f:g:h, i:j:k]'
if ast_text is None:
ast_text = 'a = b[c:d:e, f:g:h, i:j:k]'
ast = parse(ast_text)
code = compile('a = b[c:d:e, f:g:h, i:j:k]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
global _current_test
_current_test = 'a = b[c + d][e]'
if ast_text is None:
ast_text = 'a = b[c + d][e]'
ast = parse(ast_text)
code = compile('a = b[c + d][e]', '<test>', 'exec')
decompiled_ast = Module(body=pycode_to_body(code, DecompilationContext()))
compare(decompiled_ast, ast)
</DeepExtract>
|
codetransformer
|
positive
|
def preprocess_image_ten_crop(image_buffer, output_height, output_width, num_channels):
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
<DeepExtract>
shape = tf.shape(image)
(height, width) = (shape[0], shape[1])
(new_height, new_width) = _smallest_size_at_least(height, width, _RESIZE_MIN)
image = _resize_image(image, new_height, new_width)
</DeepExtract>
<DeepExtract>
def _crop(img, center_offset):
img = tf.image.extract_glimpse([img], [output_width, output_height], offsets=tf.to_float([center_offset]), centered=False, normalized=False)
images = tf.squeeze(img, 0)
def _crop5(img):
im_shape = tf.shape(image)
(height, width) = (im_shape[0], im_shape[1])
(ch, cw) = (tf.to_int32(height / 2), tf.to_int32(width / 2))
(hh, hw) = (tf.to_int32(output_height / 2), tf.to_int32(output_width / 2))
ct = _crop(img, [ch, cw])
lu = _crop(img, [hh, hw])
ld = _crop(img, [height - hh, hw])
ru = _crop(img, [hh, width - hw])
rd = _crop(img, [height - hh, width - hw])
images = tf.stack([lu, ru, ld, rd, ct])
lhs = _crop5(image)
rhs = tf.image.flip_left_right(lhs)
images = tf.concat([lhs, rhs], axis=0)
</DeepExtract>
images.set_shape([10, output_height, output_width, num_channels])
images = tf.map_fn(lambda x: _mean_image_subtraction(x, _CHANNEL_MEANS, num_channels), images)
return images
|
def preprocess_image_ten_crop(image_buffer, output_height, output_width, num_channels):
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
shape = tf.shape(image)
(height, width) = (shape[0], shape[1])
(new_height, new_width) = _smallest_size_at_least(height, width, _RESIZE_MIN)
image = _resize_image(image, new_height, new_width)
def _crop(img, center_offset):
img = tf.image.extract_glimpse([img], [output_width, output_height], offsets=tf.to_float([center_offset]), centered=False, normalized=False)
images = tf.squeeze(img, 0)
def _crop5(img):
im_shape = tf.shape(image)
(height, width) = (im_shape[0], im_shape[1])
(ch, cw) = (tf.to_int32(height / 2), tf.to_int32(width / 2))
(hh, hw) = (tf.to_int32(output_height / 2), tf.to_int32(output_width / 2))
ct = _crop(img, [ch, cw])
lu = _crop(img, [hh, hw])
ld = _crop(img, [height - hh, hw])
ru = _crop(img, [hh, width - hw])
rd = _crop(img, [height - hh, width - hw])
images = tf.stack([lu, ru, ld, rd, ct])
lhs = _crop5(image)
rhs = tf.image.flip_left_right(lhs)
images = tf.concat([lhs, rhs], axis=0)
images.set_shape([10, output_height, output_width, num_channels])
images = tf.map_fn(lambda x: _mean_image_subtraction(x, _CHANNEL_MEANS, num_channels), images)
return images
|
assembled-cnn
|
positive
|
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
<DeepExtract>
(m, n) = shape(array(trainingSet))
weights = ones(n)
for i in range(500):
dataIndex = len(list(range(m)))
for j in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, dataIndex))
h = sigmoid(sum(array(trainingSet)[randIndex] * weights))
error = trainingLabels[randIndex] - h
weights = weights + alpha * error * array(trainingSet)[randIndex]
dataIndex -= 1
trainWeights = weights
</DeepExtract>
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rate of this test is: %f' % errorRate)
return errorRate
|
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
(m, n) = shape(array(trainingSet))
weights = ones(n)
for i in range(500):
dataIndex = len(list(range(m)))
for j in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, dataIndex))
h = sigmoid(sum(array(trainingSet)[randIndex] * weights))
error = trainingLabels[randIndex] - h
weights = weights + alpha * error * array(trainingSet)[randIndex]
dataIndex -= 1
trainWeights = weights
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rate of this test is: %f' % errorRate)
return errorRate
|
AlgorithmsByPython
|
positive
|
def save_conceptcat_graph(filename, conceptcatlist):
<DeepExtract>
count = defaultdict(int)
catcount = defaultdict(int)
for c in conceptcatlist:
count[c] += 1
for c in count.keys():
catcount[c[1]] += 1
cats = ['object', 'part', 'material', 'texture', 'color']
catorder = dict(((c, i) for (i, c) in enumerate(cats)))
sorted_labels = sorted(count.keys(), key=lambda x: (catorder[x[1]], -count[x]))
sorted_labels
svg = bargraph.make_svg_bargraph([label for (label, cat) in sorted_labels], [count[k] for k in sorted_labels], [(c, catcount[c]) for c in cats], **kwargs)
</DeepExtract>
with open(filename, 'w') as f:
f.write(svg)
|
def save_conceptcat_graph(filename, conceptcatlist):
count = defaultdict(int)
catcount = defaultdict(int)
for c in conceptcatlist:
count[c] += 1
for c in count.keys():
catcount[c[1]] += 1
cats = ['object', 'part', 'material', 'texture', 'color']
catorder = dict(((c, i) for (i, c) in enumerate(cats)))
sorted_labels = sorted(count.keys(), key=lambda x: (catorder[x[1]], -count[x]))
sorted_labels
svg = bargraph.make_svg_bargraph([label for (label, cat) in sorted_labels], [count[k] for k in sorted_labels], [(c, catcount[c]) for c in cats], **kwargs)
with open(filename, 'w') as f:
f.write(svg)
|
dissect
|
positive
|
def assert_and_infer_cfg(cache_urls=True, make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:
__C.RPN.RPN_ON = True
if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:
__C.TEST.PRECOMPUTED_PROPOSALS = False
if cache_urls:
<DeepExtract>
__C.TRAIN.WEIGHTS = cache_url(__C.TRAIN.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TEST.WEIGHTS = cache_url(__C.TEST.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TRAIN.PROPOSAL_FILES = tuple((cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TRAIN.PROPOSAL_FILES))
__C.TEST.PROPOSAL_FILES = tuple((cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TEST.PROPOSAL_FILES))
</DeepExtract>
if make_immutable:
cfg.immutable(True)
|
def assert_and_infer_cfg(cache_urls=True, make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:
__C.RPN.RPN_ON = True
if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:
__C.TEST.PRECOMPUTED_PROPOSALS = False
if cache_urls:
__C.TRAIN.WEIGHTS = cache_url(__C.TRAIN.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TEST.WEIGHTS = cache_url(__C.TEST.WEIGHTS, __C.DOWNLOAD_CACHE)
__C.TRAIN.PROPOSAL_FILES = tuple((cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TRAIN.PROPOSAL_FILES))
__C.TEST.PROPOSAL_FILES = tuple((cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TEST.PROPOSAL_FILES))
if make_immutable:
cfg.immutable(True)
|
AC-FPN
|
positive
|
def rgb_to_lch(r, g, b):
<DeepExtract>
rgb = lcms.COLORB()
rgb[0] = r
rgb[1] = g
rgb[2] = b
lab = lcms.cmsCIELab(0, 0, 0)
lcms.cmsDoTransform(rgb2lab, rgb, lab, 1)
(l, a, b) = (lab.L, lab.a, lab.b)
</DeepExtract>
lab = lcms.cmsCIELab(l, a, b)
lch = lcms.cmsCIELCh(0, 0, 0)
lcms.cmsLab2LCh(lch, lab)
return (lch.L, lch.C, lch.h)
|
def rgb_to_lch(r, g, b):
rgb = lcms.COLORB()
rgb[0] = r
rgb[1] = g
rgb[2] = b
lab = lcms.cmsCIELab(0, 0, 0)
lcms.cmsDoTransform(rgb2lab, rgb, lab, 1)
(l, a, b) = (lab.L, lab.a, lab.b)
lab = lcms.cmsCIELab(l, a, b)
lch = lcms.cmsCIELCh(0, 0, 0)
lcms.cmsLab2LCh(lch, lab)
return (lch.L, lch.C, lch.h)
|
color-palette
|
positive
|
@deprecate_param('v2.0.0', 'api_key')
def read_civis(table, database, columns=None, use_pandas=False, encoding=None, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs):
"""Read data from a Civis table.
Parameters
----------
table : str
Name of table, including schema, in the database. E.g.
``'my_schema.my_table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'my_schema."my.table"'``.
database : str or int
Read data from this database. Can be the database name or ID.
columns : list, optional
A list of column names. Column SQL transformations are possible.
If omitted, all columns are exported.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
encoding : str, optional
If ``use_pandas`` is ``True``, this parameter is passed to
the ``encoding`` kwarg of :func:`pandas:pandas.read_csv`.
If ``use_pandas`` is ``False``, and if this parameter isn't provided,
then the UTF-8 encoding is assumed. In case you encounter
a ``UnicodeDecodeError``, consider choosing an encoding suitable
for your data; see the `list of standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if ``use_pandas`` is ``True`` or
passed into :func:`python:csv.reader` if ``use_pandas`` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if ``use_pandas`` is
``False``, otherwise a :class:`pandas:pandas.DataFrame`. Note that if
``use_pandas`` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If ``use_pandas`` is ``True`` and pandas is not installed.
EmptyResultError
If the table is empty.
Examples
--------
>>> table = "schema.table"
>>> database = "my_data"
>>> columns = ["column_a", "ROW_NUMBER() OVER(ORDER BY date) AS order"]
>>> data = read_civis(table, database, columns=columns)
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
>>> df = read_civis("schema.table", "my_data", use_pandas=True)
>>> col_a = df["column_a"]
See Also
--------
civis.io.read_civis_sql : Read directly into memory using SQL.
civis.io.civis_to_csv : Write directly to csv.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if use_pandas and NO_PANDAS:
raise ImportError('use_pandas is True but pandas is not installed.')
if archive:
warnings.warn('`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.', FutureWarning)
if client is None:
client = APIClient(api_key=api_key)
<DeepExtract>
if columns and (not isinstance(columns, (list, tuple))):
raise TypeError('columns must be a list, tuple or None')
select = ', '.join(columns) if columns is not None else '*'
sql = 'select {} from {}'.format(select, table)
sql = sql
</DeepExtract>
<DeepExtract>
if client is None:
client = APIClient(api_key=api_key)
if use_pandas and NO_PANDAS:
raise ImportError('use_pandas is True but pandas is not installed.')
if archive:
warnings.warn('`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.', FutureWarning)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
(script_id, run_id) = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings={'compression': 'gzip'}, hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False)
if archive:
def f(x):
data = client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
fut.result()
outputs = client.scripts.get_sql_runs(script_id, run_id)['output']
if not outputs:
raise EmptyResultError('Query {} returned no output.'.format(script_id))
url = outputs[0]['path']
file_id = outputs[0]['file_id']
log.debug('Exported results to Civis file %s (%s)', outputs[0]['output_name'], file_id)
if use_pandas:
kwargs['compression'] = 'gzip'
kwargs['encoding'] = encoding
data = pd.read_csv(url, **kwargs)
else:
response = requests.get(url, stream=True)
response.raise_for_status()
with io.StringIO() as buf:
_decompress_stream(response, buf, write_bytes=False, encoding=encoding or 'utf-8')
buf.seek(0)
data = list(csv.reader(buf, **kwargs))
data = data
</DeepExtract>
return data
|
@deprecate_param('v2.0.0', 'api_key')
def read_civis(table, database, columns=None, use_pandas=False, encoding=None, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs):
"""Read data from a Civis table.
Parameters
----------
table : str
Name of table, including schema, in the database. E.g.
``'my_schema.my_table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'my_schema."my.table"'``.
database : str or int
Read data from this database. Can be the database name or ID.
columns : list, optional
A list of column names. Column SQL transformations are possible.
If omitted, all columns are exported.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
encoding : str, optional
If ``use_pandas`` is ``True``, this parameter is passed to
the ``encoding`` kwarg of :func:`pandas:pandas.read_csv`.
If ``use_pandas`` is ``False``, and if this parameter isn't provided,
then the UTF-8 encoding is assumed. In case you encounter
a ``UnicodeDecodeError``, consider choosing an encoding suitable
for your data; see the `list of standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if ``use_pandas`` is ``True`` or
passed into :func:`python:csv.reader` if ``use_pandas`` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if ``use_pandas`` is
``False``, otherwise a :class:`pandas:pandas.DataFrame`. Note that if
``use_pandas`` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If ``use_pandas`` is ``True`` and pandas is not installed.
EmptyResultError
If the table is empty.
Examples
--------
>>> table = "schema.table"
>>> database = "my_data"
>>> columns = ["column_a", "ROW_NUMBER() OVER(ORDER BY date) AS order"]
>>> data = read_civis(table, database, columns=columns)
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
>>> df = read_civis("schema.table", "my_data", use_pandas=True)
>>> col_a = df["column_a"]
See Also
--------
civis.io.read_civis_sql : Read directly into memory using SQL.
civis.io.civis_to_csv : Write directly to csv.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if use_pandas and NO_PANDAS:
raise ImportError('use_pandas is True but pandas is not installed.')
if archive:
warnings.warn('`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.', FutureWarning)
if client is None:
client = APIClient(api_key=api_key)
if columns and (not isinstance(columns, (list, tuple))):
raise TypeError('columns must be a list, tuple or None')
select = ', '.join(columns) if columns is not None else '*'
sql = 'select {} from {}'.format(select, table)
sql = sql
if client is None:
client = APIClient(api_key=api_key)
if use_pandas and NO_PANDAS:
raise ImportError('use_pandas is True but pandas is not installed.')
if archive:
warnings.warn('`archive` is deprecated and will be removed in v2.0.0. Use `hidden` instead.', FutureWarning)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
(script_id, run_id) = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings={'compression': 'gzip'}, hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False)
if archive:
def f(x):
data = client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
fut.result()
outputs = client.scripts.get_sql_runs(script_id, run_id)['output']
if not outputs:
raise EmptyResultError('Query {} returned no output.'.format(script_id))
url = outputs[0]['path']
file_id = outputs[0]['file_id']
log.debug('Exported results to Civis file %s (%s)', outputs[0]['output_name'], file_id)
if use_pandas:
kwargs['compression'] = 'gzip'
kwargs['encoding'] = encoding
data = pd.read_csv(url, **kwargs)
else:
response = requests.get(url, stream=True)
response.raise_for_status()
with io.StringIO() as buf:
_decompress_stream(response, buf, write_bytes=False, encoding=encoding or 'utf-8')
buf.seek(0)
data = list(csv.reader(buf, **kwargs))
data = data
return data
|
civis-python
|
positive
|
def iteration(vecs):
<DeepExtract>
new_vecs = hessian_vector_product(self.loss, self.params, vecs, grad_params=self.grad_params)
</DeepExtract>
new_eigval = sum(((v * new_v).sum() for (v, new_v) in zip(vecs, new_vecs)))
<DeepExtract>
norm = sum(((v ** 2).sum() for v in new_vecs)).sqrt()
for v in new_vecs:
v /= norm
</DeepExtract>
return (new_vecs, new_eigval)
|
def iteration(vecs):
new_vecs = hessian_vector_product(self.loss, self.params, vecs, grad_params=self.grad_params)
new_eigval = sum(((v * new_v).sum() for (v, new_v) in zip(vecs, new_vecs)))
norm = sum(((v ** 2).sum() for v in new_vecs)).sqrt()
for v in new_vecs:
v /= norm
return (new_vecs, new_eigval)
|
cockpit
|
positive
|
def rocket_blast_damage(self):
"""The damage a rocket deals to adjacent units upon landing.
* InappropriateUnitType - the unit is not a rocket.
:type self: Unit
:rtype: int
"""
result = _lib.bc_Unit_rocket_blast_damage(self._ptr)
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
return result
|
def rocket_blast_damage(self):
"""The damage a rocket deals to adjacent units upon landing.
* InappropriateUnitType - the unit is not a rocket.
:type self: Unit
:rtype: int
"""
result = _lib.bc_Unit_rocket_blast_damage(self._ptr)
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
return result
|
bc18-scaffold
|
positive
|
def __add__(self, other: 'Reaction') -> 'Reaction':
"""Add two reactions and return a new one.
The stoichiometry will be the combined stoichiometry of the two
reactions, and the gene reaction rule will be both rules combined by an
and. All other attributes (i.e. reaction bounds) will match those of
the first reaction.
Does not modify in place.
Parameters
----------
other: cobra.Reaction
Another reaction to add to the current one.
Returns
-------
Reaction - new reaction with the added properties.
"""
<DeepExtract>
model = self._model
self._model = None
for i in self._metabolites:
i._model = None
for i in self._genes:
i._model = None
new_reaction = deepcopy(self)
self._model = model
for i in self._metabolites:
i._model = model
for i in self._genes:
i._model = model
new_reaction = new_reaction
</DeepExtract>
if other == 0:
return new_reaction
else:
new_reaction += other
return new_reaction
|
def __add__(self, other: 'Reaction') -> 'Reaction':
"""Add two reactions and return a new one.
The stoichiometry will be the combined stoichiometry of the two
reactions, and the gene reaction rule will be both rules combined by an
and. All other attributes (i.e. reaction bounds) will match those of
the first reaction.
Does not modify in place.
Parameters
----------
other: cobra.Reaction
Another reaction to add to the current one.
Returns
-------
Reaction - new reaction with the added properties.
"""
model = self._model
self._model = None
for i in self._metabolites:
i._model = None
for i in self._genes:
i._model = None
new_reaction = deepcopy(self)
self._model = model
for i in self._metabolites:
i._model = model
for i in self._genes:
i._model = model
new_reaction = new_reaction
if other == 0:
return new_reaction
else:
new_reaction += other
return new_reaction
|
cobrapy
|
positive
|
def configure_optimizers(self):
self._vprint('Configuring optimizers')
<DeepExtract>
if self.language_model is None:
assert not self._training_started, 'Attempting to reset language model after training started'
self.language_model = GPT2LMHeadModel.from_pretrained(self.hparams.baseline_model)
</DeepExtract>
return Lamb(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
|
def configure_optimizers(self):
self._vprint('Configuring optimizers')
if self.language_model is None:
assert not self._training_started, 'Attempting to reset language model after training started'
self.language_model = GPT2LMHeadModel.from_pretrained(self.hparams.baseline_model)
return Lamb(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
|
agatha
|
positive
|
def _update_connection(connect_info: dict) -> None:
"""
Updates the connection info member variables of the kernel manager. Also pulls the PID and PGID
info, if present, in case we need to use it for lifecycle management.
Note: Do NOT update connect_info with IP and other such artifacts in this method/function.
"""
self.kernel_manager.stdin_port = self.kernel_manager.iopub_port = self.kernel_manager.shell_port = self.kernel_manager.hb_port = self.kernel_manager.control_port = 0
if connect_info:
<DeepExtract>
pid = connect_info.pop('pid', None)
if pid:
try:
self.pid = int(pid)
except ValueError:
self.log.warning('pid returned from kernel launcher is not an integer: {} - ignoring.'.format(pid))
pid = None
pgid = connect_info.pop('pgid', None)
if pgid:
try:
self.pgid = int(pgid)
except ValueError:
self.log.warning('pgid returned from kernel launcher is not an integer: {} - ignoring.'.format(pgid))
pgid = None
if pid or pgid:
self.ip = self.assigned_ip
if not BaseProcessProxyABC.ip_is_local(self.ip):
self.local_proc = None
</DeepExtract>
self.kernel_manager.load_connection_info(info=connect_info)
self.log.debug("Received connection info for KernelID '{}' from host '{}': {}...".format(self.kernel_id, self.assigned_host, connect_info))
else:
error_message = "Unexpected runtime encountered for Kernel ID '{}' - connection information is null!".format(self.kernel_id)
<DeepExtract>
if error_message is None:
error_message = 'Internal server issue!'
self.log.error(error_message)
if 500:
raise web.HTTPError(status_code=500, reason=error_message)
else:
raise RuntimeError(error_message)
</DeepExtract>
<DeepExtract>
if self.response_socket:
try:
self.log.debug('response socket still open, close it')
self.response_socket.shutdown(SHUT_RDWR)
self.response_socket.close()
except OSError:
pass
self.response_socket = None
</DeepExtract>
self.kernel_manager._connection_file_written = True
|
def _update_connection(connect_info: dict) -> None:
"""
Updates the connection info member variables of the kernel manager. Also pulls the PID and PGID
info, if present, in case we need to use it for lifecycle management.
Note: Do NOT update connect_info with IP and other such artifacts in this method/function.
"""
self.kernel_manager.stdin_port = self.kernel_manager.iopub_port = self.kernel_manager.shell_port = self.kernel_manager.hb_port = self.kernel_manager.control_port = 0
if connect_info:
pid = connect_info.pop('pid', None)
if pid:
try:
self.pid = int(pid)
except ValueError:
self.log.warning('pid returned from kernel launcher is not an integer: {} - ignoring.'.format(pid))
pid = None
pgid = connect_info.pop('pgid', None)
if pgid:
try:
self.pgid = int(pgid)
except ValueError:
self.log.warning('pgid returned from kernel launcher is not an integer: {} - ignoring.'.format(pgid))
pgid = None
if pid or pgid:
self.ip = self.assigned_ip
if not BaseProcessProxyABC.ip_is_local(self.ip):
self.local_proc = None
self.kernel_manager.load_connection_info(info=connect_info)
self.log.debug("Received connection info for KernelID '{}' from host '{}': {}...".format(self.kernel_id, self.assigned_host, connect_info))
else:
error_message = "Unexpected runtime encountered for Kernel ID '{}' - connection information is null!".format(self.kernel_id)
if error_message is None:
error_message = 'Internal server issue!'
self.log.error(error_message)
if 500:
raise web.HTTPError(status_code=500, reason=error_message)
else:
raise RuntimeError(error_message)
if self.response_socket:
try:
self.log.debug('response socket still open, close it')
self.response_socket.shutdown(SHUT_RDWR)
self.response_socket.close()
except OSError:
pass
self.response_socket = None
self.kernel_manager._connection_file_written = True
|
enterprise_gateway
|
positive
|
def union(self, p, q):
<DeepExtract>
j = p
while j != self._id[j]:
self._id[j] = self._id[self._id[j]]
j = self._id[j]
p = j
</DeepExtract>
<DeepExtract>
j = q
while j != self._id[j]:
self._id[j] = self._id[self._id[j]]
j = self._id[j]
j = j
</DeepExtract>
if self._sz[i] < self._sz[j]:
self._id[i] = j
self._sz[j] += self._sz[i]
else:
self._id[j] = i
self._sz[i] += self._sz[j]
|
def union(self, p, q):
j = p
while j != self._id[j]:
self._id[j] = self._id[self._id[j]]
j = self._id[j]
p = j
j = q
while j != self._id[j]:
self._id[j] = self._id[self._id[j]]
j = self._id[j]
j = j
if self._sz[i] < self._sz[j]:
self._id[i] = j
self._sz[j] += self._sz[i]
else:
self._id[j] = i
self._sz[i] += self._sz[j]
|
code-catalog-python
|
positive
|
def GetMenuBlocks(ctrls):
allMenuBlocks = []
for ctrl in ctrls:
if 'MenuItems' in ctrl:
<DeepExtract>
blocks = []
curBlock = []
for item in ctrl.MenuItems():
itemAsCtrl = MenuItemAsControl(item)
if parentage:
itemPath = '%s->%s' % ('->'.join(parentage), item['Text'])
else:
itemPath = item['Text']
curBlock.append(itemAsCtrl)
if 'MenuItems' in item:
parentage.append(item['Text'])
blocks.extend(MenuBlockAsControls(item['MenuItems']['MenuItems'], parentage))
del parentage[-1]
blocks.append(curBlock)
menuBlocks = blocks
</DeepExtract>
allMenuBlocks.extend(menuBlocks)
return allMenuBlocks
|
def GetMenuBlocks(ctrls):
allMenuBlocks = []
for ctrl in ctrls:
if 'MenuItems' in ctrl:
blocks = []
curBlock = []
for item in ctrl.MenuItems():
itemAsCtrl = MenuItemAsControl(item)
if parentage:
itemPath = '%s->%s' % ('->'.join(parentage), item['Text'])
else:
itemPath = item['Text']
curBlock.append(itemAsCtrl)
if 'MenuItems' in item:
parentage.append(item['Text'])
blocks.extend(MenuBlockAsControls(item['MenuItems']['MenuItems'], parentage))
del parentage[-1]
blocks.append(curBlock)
menuBlocks = blocks
allMenuBlocks.extend(menuBlocks)
return allMenuBlocks
|
BrowserRefresh-Sublime
|
positive
|
def test_optimise_custom_function_linear_ineq_con1(self):
n = 2
N = 20
X = np.random.uniform(-1.0, 1.0, (N, n))
<DeepExtract>
if X.ndim == 1:
f = sp.optimize.rosen(X)
else:
f = np.zeros(X.shape[0])
for i in range(X.shape[0]):
f[i] = sp.optimize.rosen(X[i, :])
f = f
</DeepExtract>
fparam = eq.Parameter(distribution='uniform', lower=-1.0, upper=1.0, order=self.degf)
fParameters = [fparam for i in range(n)]
myBasis = eq.Basis('total-order')
fpoly = eq.Poly(fParameters, myBasis, method='least-squares', sampling_args={'mesh': 'user-defined', 'sample-points': X, 'sample-outputs': f})
fpoly.set_model()
for method in ['COBYLA', 'SLSQP', 'trust-constr']:
Opt = eq.Optimisation(method=method)
Opt.add_objective(poly=fpoly)
Opt.add_linear_ineq_con(np.eye(n), -np.inf * np.ones(n), np.ones(n))
x0 = np.random.uniform(-1.0, 1.0, n)
sol = Opt.optimise(x0)
if sol['status'] == 0:
np.testing.assert_almost_equal(sol['x'].flatten(), np.array([1.0, 1.0]), decimal=3)
|
def test_optimise_custom_function_linear_ineq_con1(self):
n = 2
N = 20
X = np.random.uniform(-1.0, 1.0, (N, n))
if X.ndim == 1:
f = sp.optimize.rosen(X)
else:
f = np.zeros(X.shape[0])
for i in range(X.shape[0]):
f[i] = sp.optimize.rosen(X[i, :])
f = f
fparam = eq.Parameter(distribution='uniform', lower=-1.0, upper=1.0, order=self.degf)
fParameters = [fparam for i in range(n)]
myBasis = eq.Basis('total-order')
fpoly = eq.Poly(fParameters, myBasis, method='least-squares', sampling_args={'mesh': 'user-defined', 'sample-points': X, 'sample-outputs': f})
fpoly.set_model()
for method in ['COBYLA', 'SLSQP', 'trust-constr']:
Opt = eq.Optimisation(method=method)
Opt.add_objective(poly=fpoly)
Opt.add_linear_ineq_con(np.eye(n), -np.inf * np.ones(n), np.ones(n))
x0 = np.random.uniform(-1.0, 1.0, n)
sol = Opt.optimise(x0)
if sol['status'] == 0:
np.testing.assert_almost_equal(sol['x'].flatten(), np.array([1.0, 1.0]), decimal=3)
|
equadratures
|
positive
|
def read_dataset_page(_id, _format):
if not _format:
<DeepExtract>
_format = None
accept_header = toolkit.request.headers.get('Accept', '')
if accept_header:
_format = parse_accept_header(accept_header)
_format = _format
</DeepExtract>
if not _format:
if toolkit.check_ckan_version(max_version='2.8.99'):
return read_endpoint(_id)
else:
return read_endpoint(_get_package_type(_id), _id)
_profiles = toolkit.request.params.get('profiles')
if _profiles:
_profiles = _profiles.split(',')
try:
response = toolkit.get_action('dcat_dataset_show')({}, {'id': _id, 'format': _format, 'profiles': _profiles})
except toolkit.ObjectNotFound:
toolkit.abort(404)
except (toolkit.ValidationError, RDFProfileException) as e:
toolkit.abort(409, str(e))
if toolkit.check_ckan_version(max_version='2.8.99'):
toolkit.response.headers.update({'Content-type': CONTENT_TYPES[_format]})
else:
from flask import make_response
response = make_response(response)
response.headers['Content-type'] = CONTENT_TYPES[_format]
return response
|
def read_dataset_page(_id, _format):
if not _format:
_format = None
accept_header = toolkit.request.headers.get('Accept', '')
if accept_header:
_format = parse_accept_header(accept_header)
_format = _format
if not _format:
if toolkit.check_ckan_version(max_version='2.8.99'):
return read_endpoint(_id)
else:
return read_endpoint(_get_package_type(_id), _id)
_profiles = toolkit.request.params.get('profiles')
if _profiles:
_profiles = _profiles.split(',')
try:
response = toolkit.get_action('dcat_dataset_show')({}, {'id': _id, 'format': _format, 'profiles': _profiles})
except toolkit.ObjectNotFound:
toolkit.abort(404)
except (toolkit.ValidationError, RDFProfileException) as e:
toolkit.abort(409, str(e))
if toolkit.check_ckan_version(max_version='2.8.99'):
toolkit.response.headers.update({'Content-type': CONTENT_TYPES[_format]})
else:
from flask import make_response
response = make_response(response)
response.headers['Content-type'] = CONTENT_TYPES[_format]
return response
|
ckanext-dcat
|
positive
|
def test_collect_firewalls() -> None:
do_client = ClientMock({'list_regions': regions, 'list_firewalls': firewalls, 'list_droplets': droplets, 'list_tags': tags})
<DeepExtract>
cloud = Cloud(id='do')
team = DigitalOceanTeam(id='test_team', urn='do:team:test_team')
plugin_instance = DigitalOceanTeamCollector(team, do_client)
plugin_instance.collect()
cloud_graph = Graph(root=cloud)
graph = Graph(root=GraphRoot(id='root', tags={}))
cloud_graph.merge(plugin_instance.graph)
graph.merge(cloud_graph)
sanitize(graph)
graph = graph
</DeepExtract>
<DeepExtract>
for (node_from, node_to, edge) in graph.edges:
if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:tag:firewall_tag') and (node_to.urn == 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)):
return
assert False, f"Edge {'do:tag:firewall_tag'} -> {'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711'} not found"
</DeepExtract>
<DeepExtract>
for (node_from, node_to, edge) in graph.edges:
if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711') and (node_to.urn == 'do:droplet:289110074') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)):
return
assert False, f"Edge {'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711'} -> {'do:droplet:289110074'} not found"
</DeepExtract>
firewall = graph.search_first('urn', 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711')
assert firewall.firewall_status == 'succeeded'
assert firewall.ctime == datetime.datetime(2022, 3, 10, 13, 10, 50, 0, datetime.timezone.utc)
|
def test_collect_firewalls() -> None:
do_client = ClientMock({'list_regions': regions, 'list_firewalls': firewalls, 'list_droplets': droplets, 'list_tags': tags})
cloud = Cloud(id='do')
team = DigitalOceanTeam(id='test_team', urn='do:team:test_team')
plugin_instance = DigitalOceanTeamCollector(team, do_client)
plugin_instance.collect()
cloud_graph = Graph(root=cloud)
graph = Graph(root=GraphRoot(id='root', tags={}))
cloud_graph.merge(plugin_instance.graph)
graph.merge(cloud_graph)
sanitize(graph)
graph = graph
for (node_from, node_to, edge) in graph.edges:
if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:tag:firewall_tag') and (node_to.urn == 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)):
return
assert False, f"Edge {'do:tag:firewall_tag'} -> {'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711'} not found"
for (node_from, node_to, edge) in graph.edges:
if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711') and (node_to.urn == 'do:droplet:289110074') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)):
return
assert False, f"Edge {'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711'} -> {'do:droplet:289110074'} not found"
firewall = graph.search_first('urn', 'do:firewall:fe2e76df-3e15-4895-800f-2d5b3b807711')
assert firewall.firewall_status == 'succeeded'
assert firewall.ctime == datetime.datetime(2022, 3, 10, 13, 10, 50, 0, datetime.timezone.utc)
|
cloudkeeper
|
positive
|
def do_eval(gt_annos, dt_annos, current_classes, min_overlaps, compute_aos=False, PR_detail_dict=None):
difficultys = [0, 1, 2]
<DeepExtract>
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 0, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 0, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 0, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 0, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_bbox = sums / 11 * 100
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_bbox_R40 = sums / 40 * 100
</DeepExtract>
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
<DeepExtract>
sums = 0
for i in range(0, ret['orientation'].shape[-1], 4):
sums = sums + ret['orientation'][..., i]
mAP_aos = sums / 11 * 100
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(1, ret['orientation'].shape[-1]):
sums = sums + ret['orientation'][..., i]
mAP_aos_R40 = sums / 40 * 100
</DeepExtract>
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
<DeepExtract>
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 1, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 1, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 1, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 1, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_bev = sums / 11 * 100
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_bev_R40 = sums / 40 * 100
</DeepExtract>
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
<DeepExtract>
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 2, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 2, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 2, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 2, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_3d = sums / 11 * 100
</DeepExtract>
<DeepExtract>
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_3d_R40 = sums / 40 * 100
</DeepExtract>
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return (mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40)
|
def do_eval(gt_annos, dt_annos, current_classes, min_overlaps, compute_aos=False, PR_detail_dict=None):
difficultys = [0, 1, 2]
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 0, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 0, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 0, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 0, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_bbox = sums / 11 * 100
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_bbox_R40 = sums / 40 * 100
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
sums = 0
for i in range(0, ret['orientation'].shape[-1], 4):
sums = sums + ret['orientation'][..., i]
mAP_aos = sums / 11 * 100
sums = 0
for i in range(1, ret['orientation'].shape[-1]):
sums = sums + ret['orientation'][..., i]
mAP_aos_R40 = sums / 40 * 100
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 1, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 1, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 1, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 1, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_bev = sums / 11 * 100
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_bev_R40 = sums / 40 * 100
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, 2, num_parts)
(overlaps, parted_overlaps, total_dt_num, total_gt_num) = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for (m, current_class) in enumerate(current_classes):
for (l, difficulty) in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, total_dc_num, total_num_valid_gt) = rets
for (k, min_overlap) in enumerate(min_overlaps[:, 2, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(overlaps[i], gt_datas_list[i], dt_datas_list[i], ignored_gts[i], ignored_dets[i], dontcares[i], 2, min_overlap=min_overlap, thresh=0.0, compute_fp=False)
(tp, fp, fn, similarity, thresholds) = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for (j, num_part) in enumerate(split_parts):
gt_datas_part = np.concatenate(gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(parted_overlaps[j], pr, total_gt_num[idx:idx + num_part], total_dt_num[idx:idx + num_part], total_dc_num[idx:idx + num_part], gt_datas_part, dt_datas_part, dc_datas_part, ignored_gts_part, ignored_dets_part, 2, min_overlap=min_overlap, thresholds=thresholds, compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {'recall': recall, 'precision': precision, 'orientation': aos}
ret = ret_dict
sums = 0
for i in range(0, ret['precision'].shape[-1], 4):
sums = sums + ret['precision'][..., i]
mAP_3d = sums / 11 * 100
sums = 0
for i in range(1, ret['precision'].shape[-1]):
sums = sums + ret['precision'][..., i]
mAP_3d_R40 = sums / 40 * 100
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return (mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40)
|
CenterPoint-KITTI
|
positive
|
def test_add_tokens_tokenizer(self):
<DeepExtract>
raise NotImplementedError
</DeepExtract>
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
self.assertEqual(vocab_size, all_size)
new_toks = ['aaaaabbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode('aaaaabbbbbb low cccccccccdddddddd l')
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode('>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l')
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.convert_tokens_to_ids(tokenizer.eos_token))
self.assertEqual(tokens[-2], tokenizer.convert_tokens_to_ids(tokenizer.pad_token))
|
def test_add_tokens_tokenizer(self):
raise NotImplementedError
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
self.assertEqual(vocab_size, all_size)
new_toks = ['aaaaabbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode('aaaaabbbbbb low cccccccccdddddddd l')
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode('>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l')
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.convert_tokens_to_ids(tokenizer.eos_token))
self.assertEqual(tokens[-2], tokenizer.convert_tokens_to_ids(tokenizer.pad_token))
|
CCF-BDCI-Sentiment-Analysis-Baseline
|
positive
|
def resolve_replace(spec, src_repo, dest_repo):
<DeepExtract>
src = find_vfs_item(spec.src, src_repo)
spec_args = spec_msg(spec)
if not src:
misuse('cannot find source for %s' % spec_args)
if src.type == 'root':
misuse('cannot fetch entire repository for %s' % spec_args)
if src.type == 'tags':
misuse('cannot fetch entire /.tag directory for %s' % spec_args)
debug1('src: %s\n' % loc_desc(src))
src = src
</DeepExtract>
<DeepExtract>
if not spec.dest:
spec_args = '--%s %s' % (spec.method, path_msg(spec.src))
spec_args = '--%s: %s %s' % (spec.method, path_msg(spec.src), path_msg(spec.dest))
</DeepExtract>
if not spec.dest:
if src.path.startswith(b'/.tag/') or src.type == 'branch':
spec = spec._replace(dest=spec.src)
if not spec.dest:
<DeepExtract>
sys.stderr.write(usage(argspec))
if 'no destination provided for %s':
sys.stderr.write('\nerror: ')
sys.stderr.write('no destination provided for %s')
sys.stderr.write('\n')
sys.exit(1)
</DeepExtract>
<DeepExtract>
res = dest_repo.resolve(spec.dest, follow=False, want_meta=False)
(leaf_name, leaf_item) = res[-1]
if not leaf_item:
dest = None
kind = type(leaf_item)
if kind == vfs.Root:
kind = 'root'
elif kind == vfs.Tags:
kind = 'tags'
elif kind == vfs.RevList:
kind = 'branch'
elif kind == vfs.Commit:
if len(res) > 1 and isinstance(res[-2][1], vfs.RevList):
kind = 'save'
else:
kind = 'commit'
elif kind == vfs.Item:
if S_ISDIR(vfs.item_mode(leaf_item)):
kind = 'tree'
else:
kind = 'blob'
elif kind == vfs.Chunky:
kind = 'tree'
elif kind == vfs.FakeLink:
assert leaf_name == b'latest'
res = dest_repo.resolve(leaf_item.target, parent=res[:-1], follow=False, want_meta=False)
(leaf_name, leaf_item) = res[-1]
assert leaf_item
assert isinstance(leaf_item, vfs.Commit)
spec.dest = b'/'.join((x[0] for x in res))
kind = 'save'
else:
raise Exception('unexpected resolution for %s: %r' % (path_msg(spec.dest), res))
path = b'/'.join((spec.dest for (spec.dest, item) in res))
if hasattr(leaf_item, 'coid'):
result = Loc(type=kind, hash=leaf_item.coid, path=path)
elif hasattr(leaf_item, 'oid'):
result = Loc(type=kind, hash=leaf_item.oid, path=path)
else:
result = Loc(type=kind, hash=None, path=path)
dest = result
</DeepExtract>
if dest:
if not dest.type == 'branch' and (not dest.path.startswith(b'/.tag/')):
<DeepExtract>
sys.stderr.write(usage(argspec))
if '%s impossible; can only overwrite branch or tag' % spec_args:
sys.stderr.write('\nerror: ')
sys.stderr.write('%s impossible; can only overwrite branch or tag' % spec_args)
sys.stderr.write('\n')
sys.exit(1)
</DeepExtract>
else:
<DeepExtract>
if cleanup_vfs_path(spec.dest).startswith(b'/.') and (not cleanup_vfs_path(spec.dest).startswith(b'/.tag/')):
misuse('unsupported destination path %s in %s' % (path_msg(cleanup_vfs_path(spec.dest)), spec_msg(spec)))
cp = cleanup_vfs_path(spec.dest)
</DeepExtract>
dest = default_loc._replace(path=cp)
if not dest.path.startswith(b'/.tag/') and (not src.type in ('branch', 'save', 'commit')):
<DeepExtract>
sys.stderr.write(usage(argspec))
if 'cannot overwrite branch with %s for %s' % (src.type, spec_args):
sys.stderr.write('\nerror: ')
sys.stderr.write('cannot overwrite branch with %s for %s' % (src.type, spec_args))
sys.stderr.write('\n')
sys.exit(1)
</DeepExtract>
return Target(spec=spec, src=src, dest=dest)
|
def resolve_replace(spec, src_repo, dest_repo):
src = find_vfs_item(spec.src, src_repo)
spec_args = spec_msg(spec)
if not src:
misuse('cannot find source for %s' % spec_args)
if src.type == 'root':
misuse('cannot fetch entire repository for %s' % spec_args)
if src.type == 'tags':
misuse('cannot fetch entire /.tag directory for %s' % spec_args)
debug1('src: %s\n' % loc_desc(src))
src = src
if not spec.dest:
spec_args = '--%s %s' % (spec.method, path_msg(spec.src))
spec_args = '--%s: %s %s' % (spec.method, path_msg(spec.src), path_msg(spec.dest))
if not spec.dest:
if src.path.startswith(b'/.tag/') or src.type == 'branch':
spec = spec._replace(dest=spec.src)
if not spec.dest:
sys.stderr.write(usage(argspec))
if 'no destination provided for %s':
sys.stderr.write('\nerror: ')
sys.stderr.write('no destination provided for %s')
sys.stderr.write('\n')
sys.exit(1)
res = dest_repo.resolve(spec.dest, follow=False, want_meta=False)
(leaf_name, leaf_item) = res[-1]
if not leaf_item:
dest = None
kind = type(leaf_item)
if kind == vfs.Root:
kind = 'root'
elif kind == vfs.Tags:
kind = 'tags'
elif kind == vfs.RevList:
kind = 'branch'
elif kind == vfs.Commit:
if len(res) > 1 and isinstance(res[-2][1], vfs.RevList):
kind = 'save'
else:
kind = 'commit'
elif kind == vfs.Item:
if S_ISDIR(vfs.item_mode(leaf_item)):
kind = 'tree'
else:
kind = 'blob'
elif kind == vfs.Chunky:
kind = 'tree'
elif kind == vfs.FakeLink:
assert leaf_name == b'latest'
res = dest_repo.resolve(leaf_item.target, parent=res[:-1], follow=False, want_meta=False)
(leaf_name, leaf_item) = res[-1]
assert leaf_item
assert isinstance(leaf_item, vfs.Commit)
spec.dest = b'/'.join((x[0] for x in res))
kind = 'save'
else:
raise Exception('unexpected resolution for %s: %r' % (path_msg(spec.dest), res))
path = b'/'.join((spec.dest for (spec.dest, item) in res))
if hasattr(leaf_item, 'coid'):
result = Loc(type=kind, hash=leaf_item.coid, path=path)
elif hasattr(leaf_item, 'oid'):
result = Loc(type=kind, hash=leaf_item.oid, path=path)
else:
result = Loc(type=kind, hash=None, path=path)
dest = result
if dest:
if not dest.type == 'branch' and (not dest.path.startswith(b'/.tag/')):
sys.stderr.write(usage(argspec))
if '%s impossible; can only overwrite branch or tag' % spec_args:
sys.stderr.write('\nerror: ')
sys.stderr.write('%s impossible; can only overwrite branch or tag' % spec_args)
sys.stderr.write('\n')
sys.exit(1)
else:
if cleanup_vfs_path(spec.dest).startswith(b'/.') and (not cleanup_vfs_path(spec.dest).startswith(b'/.tag/')):
misuse('unsupported destination path %s in %s' % (path_msg(cleanup_vfs_path(spec.dest)), spec_msg(spec)))
cp = cleanup_vfs_path(spec.dest)
dest = default_loc._replace(path=cp)
if not dest.path.startswith(b'/.tag/') and (not src.type in ('branch', 'save', 'commit')):
sys.stderr.write(usage(argspec))
if 'cannot overwrite branch with %s for %s' % (src.type, spec_args):
sys.stderr.write('\nerror: ')
sys.stderr.write('cannot overwrite branch with %s for %s' % (src.type, spec_args))
sys.stderr.write('\n')
sys.exit(1)
return Target(spec=spec, src=src, dest=dest)
|
bup
|
positive
|
def main():
model_file = sys.argv[1]
base_output_dir = sys.argv[2]
rep_dir = sys.argv[3]
prefix = sys.argv[4]
gpu_mem = sys.argv[5]
main_mem = sys.argv[6]
model = util.ReadModel(model_file)
data_pb = deepnet_pb2.Dataset()
data_pb.name = model.name
data_pb.gpu_memory = gpu_mem
data_pb.main_memory = main_mem
output_dir = os.path.join(base_output_dir, 'validation')
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_proto_file = os.path.join(base_output_dir, 'data.pbtxt')
img_input_pbtxt = os.path.join(prefix, 'flickr.pbtxt')
img_hidden1_pbtxt = os.path.join(rep_dir, 'image_rbm1_LAST', 'data.pbtxt')
img_hidden2_pbtxt = os.path.join(rep_dir, 'image_rbm2_LAST', 'data.pbtxt')
text_input_pbtxt = os.path.join(prefix, 'flickr_nnz.pbtxt')
text_hidden1_pbtxt = os.path.join(rep_dir, 'text_rbm1_LAST', 'data.pbtxt')
text_hidden2_pbtxt = os.path.join(rep_dir, 'text_rbm2_LAST', 'data.pbtxt')
text_pbtxt_z = os.path.join(rep_dir, 'generated_text', 'data.pbtxt')
joint_pbtxt = os.path.join(rep_dir, 'joint_rbm_LAST', 'data.pbtxt')
img_input_pb = util.ReadData(img_input_pbtxt)
data = next((d for d in img_input_pb.data if d.name == 'image_labelled'))
data.file_pattern = os.path.join(img_input_pb.prefix, data.file_pattern)
data.stats_file = os.path.join(img_input_pb.prefix, data.stats_file)
data.name = 'image_input'
data_pb.data.extend([data])
img_hidden1_pb = util.ReadData(img_hidden1_pbtxt)
data = next((d for d in img_hidden1_pb.data if d.name == 'image_hidden1_validation'))
data.file_pattern = os.path.join(img_hidden1_pb.prefix, data.file_pattern)
data.name = 'image_hidden1'
data_pb.data.extend([data])
img_hidden2_pb = util.ReadData(img_hidden2_pbtxt)
data = next((d for d in img_hidden2_pb.data if d.name == 'image_hidden2_validation'))
data.file_pattern = os.path.join(img_hidden2_pb.prefix, data.file_pattern)
data.name = 'image_hidden2'
data_pb.data.extend([data])
indices_file = os.path.join(prefix, 'text', 'indices_labelled.npz')
indices = np.load(indices_file)
nnz_indices = indices['nnz_indices']
z_indices = indices['z_indices']
text_pb_z = util.ReadData(text_pbtxt_z)
text_input_pb = util.ReadData(text_input_pbtxt)
data_nnz = next((d for d in text_input_pb.data if d.name == 'text_labelled'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_input_layer_validation'))
output_file = os.path.join(output_dir, 'text_input-00001-of-00001.npy')
<DeepExtract>
data_nnz = Load(os.path.join(text_input_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_input'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
</DeepExtract>
data_pb.data.extend([data])
text_hidden1_pb = util.ReadData(text_hidden1_pbtxt)
data_nnz = next((d for d in text_hidden1_pb.data if d.name == 'text_hidden1_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_hidden1_validation'))
output_file = os.path.join(output_dir, 'text_hidden1-00001-of-00001.npy')
<DeepExtract>
data_nnz = Load(os.path.join(text_hidden1_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_hidden1'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
</DeepExtract>
data_pb.data.extend([data])
text_hidden2_pb = util.ReadData(text_hidden2_pbtxt)
data_nnz = next((d for d in text_hidden2_pb.data if d.name == 'text_hidden2_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_hidden2_validation'))
output_file = os.path.join(output_dir, 'text_hidden2-00001-of-00001.npy')
<DeepExtract>
data_nnz = Load(os.path.join(text_hidden2_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_hidden2'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
</DeepExtract>
data_pb.data.extend([data])
joint_pb = util.ReadData(joint_pbtxt)
data_nnz = next((d for d in joint_pb.data if d.name == 'joint_hidden_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'joint_hidden_validation'))
output_file = os.path.join(output_dir, 'joint_hidden-00001-of-00001.npy')
<DeepExtract>
data_nnz = Load(os.path.join(joint_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'joint_hidden'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
</DeepExtract>
data_pb.data.extend([data])
with open(output_proto_file, 'w') as f:
text_format.PrintMessage(data_pb, f)
|
def main():
model_file = sys.argv[1]
base_output_dir = sys.argv[2]
rep_dir = sys.argv[3]
prefix = sys.argv[4]
gpu_mem = sys.argv[5]
main_mem = sys.argv[6]
model = util.ReadModel(model_file)
data_pb = deepnet_pb2.Dataset()
data_pb.name = model.name
data_pb.gpu_memory = gpu_mem
data_pb.main_memory = main_mem
output_dir = os.path.join(base_output_dir, 'validation')
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_proto_file = os.path.join(base_output_dir, 'data.pbtxt')
img_input_pbtxt = os.path.join(prefix, 'flickr.pbtxt')
img_hidden1_pbtxt = os.path.join(rep_dir, 'image_rbm1_LAST', 'data.pbtxt')
img_hidden2_pbtxt = os.path.join(rep_dir, 'image_rbm2_LAST', 'data.pbtxt')
text_input_pbtxt = os.path.join(prefix, 'flickr_nnz.pbtxt')
text_hidden1_pbtxt = os.path.join(rep_dir, 'text_rbm1_LAST', 'data.pbtxt')
text_hidden2_pbtxt = os.path.join(rep_dir, 'text_rbm2_LAST', 'data.pbtxt')
text_pbtxt_z = os.path.join(rep_dir, 'generated_text', 'data.pbtxt')
joint_pbtxt = os.path.join(rep_dir, 'joint_rbm_LAST', 'data.pbtxt')
img_input_pb = util.ReadData(img_input_pbtxt)
data = next((d for d in img_input_pb.data if d.name == 'image_labelled'))
data.file_pattern = os.path.join(img_input_pb.prefix, data.file_pattern)
data.stats_file = os.path.join(img_input_pb.prefix, data.stats_file)
data.name = 'image_input'
data_pb.data.extend([data])
img_hidden1_pb = util.ReadData(img_hidden1_pbtxt)
data = next((d for d in img_hidden1_pb.data if d.name == 'image_hidden1_validation'))
data.file_pattern = os.path.join(img_hidden1_pb.prefix, data.file_pattern)
data.name = 'image_hidden1'
data_pb.data.extend([data])
img_hidden2_pb = util.ReadData(img_hidden2_pbtxt)
data = next((d for d in img_hidden2_pb.data if d.name == 'image_hidden2_validation'))
data.file_pattern = os.path.join(img_hidden2_pb.prefix, data.file_pattern)
data.name = 'image_hidden2'
data_pb.data.extend([data])
indices_file = os.path.join(prefix, 'text', 'indices_labelled.npz')
indices = np.load(indices_file)
nnz_indices = indices['nnz_indices']
z_indices = indices['z_indices']
text_pb_z = util.ReadData(text_pbtxt_z)
text_input_pb = util.ReadData(text_input_pbtxt)
data_nnz = next((d for d in text_input_pb.data if d.name == 'text_labelled'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_input_layer_validation'))
output_file = os.path.join(output_dir, 'text_input-00001-of-00001.npy')
data_nnz = Load(os.path.join(text_input_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_input'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
data_pb.data.extend([data])
text_hidden1_pb = util.ReadData(text_hidden1_pbtxt)
data_nnz = next((d for d in text_hidden1_pb.data if d.name == 'text_hidden1_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_hidden1_validation'))
output_file = os.path.join(output_dir, 'text_hidden1-00001-of-00001.npy')
data_nnz = Load(os.path.join(text_hidden1_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_hidden1'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
data_pb.data.extend([data])
text_hidden2_pb = util.ReadData(text_hidden2_pbtxt)
data_nnz = next((d for d in text_hidden2_pb.data if d.name == 'text_hidden2_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'text_hidden2_validation'))
output_file = os.path.join(output_dir, 'text_hidden2-00001-of-00001.npy')
data_nnz = Load(os.path.join(text_hidden2_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'text_hidden2'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
data_pb.data.extend([data])
joint_pb = util.ReadData(joint_pbtxt)
data_nnz = next((d for d in joint_pb.data if d.name == 'joint_hidden_validation'))
data_z = next((d for d in text_pb_z.data if d.name == 'joint_hidden_validation'))
output_file = os.path.join(output_dir, 'joint_hidden-00001-of-00001.npy')
data_nnz = Load(os.path.join(joint_pb.prefix, data_nnz.file_pattern))
data_z = Load(os.path.join(text_pb_z.prefix, data_z.file_pattern))
assert data_nnz.shape[1] == data_z.shape[1], 'Dimension mismatch.'
size = data_nnz.shape[0] + data_z.shape[0]
numdims = data_nnz.shape[1]
data = np.zeros((size, numdims), dtype=np.float32)
data[nnz_indices] = data_nnz
data[z_indices] = data_z
np.save(output_file, data)
data = deepnet_pb2.Dataset.Data()
data.name = 'joint_hidden'
data.size = size
data.dimensions.extend([numdims])
data.file_pattern = output_file
data = data
data_pb.data.extend([data])
with open(output_proto_file, 'w') as f:
text_format.PrintMessage(data_pb, f)
|
deepnet
|
positive
|
def plot_sample(samples, nodes=None, n=-1, displays=None, **options):
"""Plot a scatterplot of samples.
Experimental, only dims 1-2 supported.
Parameters
----------
samples : Sample
nodes : str or list[str], optional
n : int, optional
Number of plotted samples [0, n).
displays : IPython.display.HTML
"""
<DeepExtract>
axes = get_axes(**options)
ion = options.get('interactive')
if ion:
axes.clear()
if options.get('xlim'):
axes.set_xlim(options.get('xlim'))
if options.get('ylim'):
axes.set_ylim(options.get('ylim'))
axes = axes
</DeepExtract>
nodes = nodes or sorted(samples.keys())[:2]
if isinstance(nodes, str):
nodes = [nodes]
if len(nodes) == 1:
axes.set_xlabel(nodes[0])
axes.hist(samples[nodes[0]][:n])
else:
if len(nodes) > 2:
logger.warning('Over 2-dimensional plots not supported. Falling back to 2dprojection.')
axes.set_xlabel(nodes[0])
axes.set_ylabel(nodes[1])
axes.scatter(samples[nodes[0]][:n], samples[nodes[1]][:n])
<DeepExtract>
displays = displays or []
if options.get('interactive'):
from IPython import display
display.clear_output(wait=True)
displays.insert(0, plt.gcf())
display.display(*displays)
</DeepExtract>
if options.get('close'):
plt.close()
|
def plot_sample(samples, nodes=None, n=-1, displays=None, **options):
"""Plot a scatterplot of samples.
Experimental, only dims 1-2 supported.
Parameters
----------
samples : Sample
nodes : str or list[str], optional
n : int, optional
Number of plotted samples [0, n).
displays : IPython.display.HTML
"""
axes = get_axes(**options)
ion = options.get('interactive')
if ion:
axes.clear()
if options.get('xlim'):
axes.set_xlim(options.get('xlim'))
if options.get('ylim'):
axes.set_ylim(options.get('ylim'))
axes = axes
nodes = nodes or sorted(samples.keys())[:2]
if isinstance(nodes, str):
nodes = [nodes]
if len(nodes) == 1:
axes.set_xlabel(nodes[0])
axes.hist(samples[nodes[0]][:n])
else:
if len(nodes) > 2:
logger.warning('Over 2-dimensional plots not supported. Falling back to 2dprojection.')
axes.set_xlabel(nodes[0])
axes.set_ylabel(nodes[1])
axes.scatter(samples[nodes[0]][:n], samples[nodes[1]][:n])
displays = displays or []
if options.get('interactive'):
from IPython import display
display.clear_output(wait=True)
displays.insert(0, plt.gcf())
display.display(*displays)
if options.get('close'):
plt.close()
|
elfi
|
positive
|
def call(self, query, key, value, attention_mask=None, head_mask=None, training=False):
batch_size = get_shape(query)[0]
mixed_query_layer = self.query(query)
mixed_key_layer = self.key(key)
mixed_value_layer = self.value(value)
<DeepExtract>
mixed_query_layer = tf.reshape(mixed_query_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
query_layer = tf.transpose(mixed_query_layer, perm=[0, 2, 1, 3])
</DeepExtract>
<DeepExtract>
mixed_key_layer = tf.reshape(mixed_key_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
key_layer = tf.transpose(mixed_key_layer, perm=[0, 2, 1, 3])
</DeepExtract>
<DeepExtract>
mixed_value_layer = tf.reshape(mixed_value_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
value_layer = tf.transpose(mixed_value_layer, perm=[0, 2, 1, 3])
</DeepExtract>
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_shape = get_shape(attention_scores)
from_seq_length = attention_shape[2]
to_seq_length = attention_shape[3]
if self.use_relative_position:
max_relative_position = 64
relations_keys = generate_relative_positions_embeddings(to_seq_length, self.attention_head_size, max_relative_position, 'relative_positions_keys', cache=False)
query_layer_t = tf.transpose(query_layer, [2, 0, 1, 3])
query_layer_r = tf.reshape(query_layer_t, [from_seq_length, batch_size * self.num_attention_heads, self.attention_head_size])
key_position_scores = tf.matmul(query_layer_r, relations_keys, transpose_b=True)
key_position_scores_r = tf.reshape(key_position_scores, [from_seq_length, batch_size, self.num_attention_heads, from_seq_length])
key_position_scores_r_t = tf.transpose(key_position_scores_r, [1, 2, 0, 3])
attention_scores += key_position_scores_r_t
dk = tf.cast(get_shape(key_layer)[-1], tf.float32)
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
|
def call(self, query, key, value, attention_mask=None, head_mask=None, training=False):
batch_size = get_shape(query)[0]
mixed_query_layer = self.query(query)
mixed_key_layer = self.key(key)
mixed_value_layer = self.value(value)
mixed_query_layer = tf.reshape(mixed_query_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
query_layer = tf.transpose(mixed_query_layer, perm=[0, 2, 1, 3])
mixed_key_layer = tf.reshape(mixed_key_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
key_layer = tf.transpose(mixed_key_layer, perm=[0, 2, 1, 3])
mixed_value_layer = tf.reshape(mixed_value_layer, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
value_layer = tf.transpose(mixed_value_layer, perm=[0, 2, 1, 3])
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_shape = get_shape(attention_scores)
from_seq_length = attention_shape[2]
to_seq_length = attention_shape[3]
if self.use_relative_position:
max_relative_position = 64
relations_keys = generate_relative_positions_embeddings(to_seq_length, self.attention_head_size, max_relative_position, 'relative_positions_keys', cache=False)
query_layer_t = tf.transpose(query_layer, [2, 0, 1, 3])
query_layer_r = tf.reshape(query_layer_t, [from_seq_length, batch_size * self.num_attention_heads, self.attention_head_size])
key_position_scores = tf.matmul(query_layer_r, relations_keys, transpose_b=True)
key_position_scores_r = tf.reshape(key_position_scores, [from_seq_length, batch_size, self.num_attention_heads, from_seq_length])
key_position_scores_r_t = tf.transpose(key_position_scores_r, [1, 2, 0, 3])
attention_scores += key_position_scores_r_t
dk = tf.cast(get_shape(key_layer)[-1], tf.float32)
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
|
AiSpace
|
positive
|
def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if orig in cache:
return cache[orig]
if orig in glossaries:
cache[orig] = (orig,)
return (orig,)
if version == (0, 1):
word = tuple(orig) + ('</w>',)
elif version == (0, 2):
word = tuple(orig[:-1]) + (orig[-1] + '</w>',)
else:
raise NotImplementedError
<DeepExtract>
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
</DeepExtract>
if not pairs:
return orig
while True:
bigram = min(pairs, key=lambda pair: bpe_codes.get(pair, float('inf')))
if bigram not in bpe_codes:
break
(first, second) = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
<DeepExtract>
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
</DeepExtract>
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word = word[:-1] + (word[-1].replace('</w>', ''),)
if vocab:
<DeepExtract>
out = []
for segment in word[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes_reverse, vocab, separator, False):
out.append(item)
segment = word[-1]
if segment in vocab:
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes_reverse, vocab, separator, True):
out.append(item)
word = out
</DeepExtract>
cache[orig] = word
return word
|
def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries=None):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if orig in cache:
return cache[orig]
if orig in glossaries:
cache[orig] = (orig,)
return (orig,)
if version == (0, 1):
word = tuple(orig) + ('</w>',)
elif version == (0, 2):
word = tuple(orig[:-1]) + (orig[-1] + '</w>',)
else:
raise NotImplementedError
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
if not pairs:
return orig
while True:
bigram = min(pairs, key=lambda pair: bpe_codes.get(pair, float('inf')))
if bigram not in bpe_codes:
break
(first, second) = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = pairs
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word = word[:-1] + (word[-1].replace('</w>', ''),)
if vocab:
out = []
for segment in word[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes_reverse, vocab, separator, False):
out.append(item)
segment = word[-1]
if segment in vocab:
out.append(segment)
else:
for item in recursive_split(segment, bpe_codes_reverse, vocab, separator, True):
out.append(item)
word = out
cache[orig] = word
return word
|
BiSET
|
positive
|
eeweather
|
positive
|
||
def efuse_read(self):
<DeepExtract>
deadline = time.time() + self.REGS.EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self.read_reg(self.REGS.EFUSE_STATUS_REG) & 7 == 1:
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
</DeepExtract>
self.write_reg(self.REGS.EFUSE_CONF_REG, self.REGS.EFUSE_READ_OP_CODE)
try:
self.write_reg(self.REGS.EFUSE_CMD_REG, self.REGS.EFUSE_READ_CMD, delay_after_us=1000)
<DeepExtract>
deadline = time.time() + self.REGS.EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self.read_reg(self.REGS.EFUSE_STATUS_REG) & 7 == 1:
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
</DeepExtract>
except esptool.FatalError:
secure_download_mode_before = self._esp.secure_download_mode
try:
self._esp = self.reconnect_chip(self._esp)
except esptool.FatalError:
print('Can not re-connect to the chip')
if not self['DIS_DOWNLOAD_MODE'].get() and self['DIS_DOWNLOAD_MODE'].get(from_read=False):
print('This is the correct behavior as we are actually burning DIS_DOWNLOAD_MODE which disables the connection to the chip')
print('DIS_DOWNLOAD_MODE is enabled')
print('Successful')
exit(0)
raise
print('Established a connection with the chip')
if self._esp.secure_download_mode and (not secure_download_mode_before):
print('Secure download mode is enabled')
if not self['ENABLE_SECURITY_DOWNLOAD'].get() and self['ENABLE_SECURITY_DOWNLOAD'].get(from_read=False):
print('espefuse tool can not continue to work in Secure download mode')
print('ENABLE_SECURITY_DOWNLOAD is enabled')
print('Successful')
exit(0)
raise
|
def efuse_read(self):
deadline = time.time() + self.REGS.EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self.read_reg(self.REGS.EFUSE_STATUS_REG) & 7 == 1:
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
self.write_reg(self.REGS.EFUSE_CONF_REG, self.REGS.EFUSE_READ_OP_CODE)
try:
self.write_reg(self.REGS.EFUSE_CMD_REG, self.REGS.EFUSE_READ_CMD, delay_after_us=1000)
deadline = time.time() + self.REGS.EFUSE_BURN_TIMEOUT
while time.time() < deadline:
if self.read_reg(self.REGS.EFUSE_STATUS_REG) & 7 == 1:
return
raise esptool.FatalError('Timed out waiting for Efuse controller command to complete')
except esptool.FatalError:
secure_download_mode_before = self._esp.secure_download_mode
try:
self._esp = self.reconnect_chip(self._esp)
except esptool.FatalError:
print('Can not re-connect to the chip')
if not self['DIS_DOWNLOAD_MODE'].get() and self['DIS_DOWNLOAD_MODE'].get(from_read=False):
print('This is the correct behavior as we are actually burning DIS_DOWNLOAD_MODE which disables the connection to the chip')
print('DIS_DOWNLOAD_MODE is enabled')
print('Successful')
exit(0)
raise
print('Established a connection with the chip')
if self._esp.secure_download_mode and (not secure_download_mode_before):
print('Secure download mode is enabled')
if not self['ENABLE_SECURITY_DOWNLOAD'].get() and self['ENABLE_SECURITY_DOWNLOAD'].get(from_read=False):
print('espefuse tool can not continue to work in Secure download mode')
print('ENABLE_SECURITY_DOWNLOAD is enabled')
print('Successful')
exit(0)
raise
|
esptool
|
positive
|
def update_broker_stat(self):
"""
Updates all sliding broker statistics with latest-step values such as:
- normalized broker value
- normalized broker cash
- normalized exposure (position size)
- exp. scaled episode duration in steps, normalized wrt. max possible episode steps
- normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
- normalized profit/loss for current opened trade (unrealized p/l);
"""
current_value = self.env.broker.get_value()
<DeepExtract>
stat_data = np.asarray(self.stat_asset.get(size=1))
(mean, var) = self.norm_stat_tracker.update(stat_data[None, :])
var = np.clip(var, 1e-08, None)
intervals = stats.norm.interval(0.99, mean, var ** 0.5)
self.normalisation_state = NormalisationState(mean=float(mean), variance=float(var), low_interval=intervals[0][0], up_interval=intervals[1][0])
norm_state = self.normalisation_state
</DeepExtract>
positions = [self.env.broker.getposition(data) for data in self.datas]
exposure = sum([abs(pos.size) for pos in positions])
self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None)
for (key, method) in self.collection_get_broker_stat_methods.items():
update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer)
self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])])
self.trade_just_closed = False
self.trade_result = 0
|
def update_broker_stat(self):
"""
Updates all sliding broker statistics with latest-step values such as:
- normalized broker value
- normalized broker cash
- normalized exposure (position size)
- exp. scaled episode duration in steps, normalized wrt. max possible episode steps
- normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
- normalized profit/loss for current opened trade (unrealized p/l);
"""
current_value = self.env.broker.get_value()
stat_data = np.asarray(self.stat_asset.get(size=1))
(mean, var) = self.norm_stat_tracker.update(stat_data[None, :])
var = np.clip(var, 1e-08, None)
intervals = stats.norm.interval(0.99, mean, var ** 0.5)
self.normalisation_state = NormalisationState(mean=float(mean), variance=float(var), low_interval=intervals[0][0], up_interval=intervals[1][0])
norm_state = self.normalisation_state
positions = [self.env.broker.getposition(data) for data in self.datas]
exposure = sum([abs(pos.size) for pos in positions])
self.normalizer = 1 / np.clip(norm_state.up_interval - norm_state.low_interval, 1e-08, None)
for (key, method) in self.collection_get_broker_stat_methods.items():
update = method(current_value=current_value, positions=positions, exposure=exposure, lower_bound=norm_state.low_interval, upper_bound=norm_state.up_interval, normalizer=self.normalizer)
self.broker_stat[key] = np.concatenate([self.broker_stat[key][1:], np.asarray([float(update)])])
self.trade_just_closed = False
self.trade_result = 0
|
btgym
|
positive
|
def run_asyncgen_fixture(self, fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], kwargs: dict[str, Any]) -> Iterable[T_Retval]:
asyncgen = fixture_func(**kwargs)
fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
yield fixturevalue
try:
<DeepExtract>
if self._send_stream is None:
trio.lowlevel.start_guest_run(self._run_tests_and_fixtures, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options)
while self._send_stream is None:
self._call_queue.get()()
outcome_holder: list[Outcome] = []
self._send_stream.send_nowait((asyncgen.asend(*args, **kwargs), outcome_holder))
while not outcome_holder:
self._call_queue.get()()
return outcome_holder[0].unwrap()
</DeepExtract>
except StopAsyncIteration:
pass
else:
<DeepExtract>
if self._send_stream is None:
trio.lowlevel.start_guest_run(self._run_tests_and_fixtures, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options)
while self._send_stream is None:
self._call_queue.get()()
outcome_holder: list[Outcome] = []
self._send_stream.send_nowait((asyncgen.aclose(*args, **kwargs), outcome_holder))
while not outcome_holder:
self._call_queue.get()()
return outcome_holder[0].unwrap()
</DeepExtract>
raise RuntimeError('Async generator fixture did not stop')
|
def run_asyncgen_fixture(self, fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]], kwargs: dict[str, Any]) -> Iterable[T_Retval]:
asyncgen = fixture_func(**kwargs)
fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
yield fixturevalue
try:
if self._send_stream is None:
trio.lowlevel.start_guest_run(self._run_tests_and_fixtures, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options)
while self._send_stream is None:
self._call_queue.get()()
outcome_holder: list[Outcome] = []
self._send_stream.send_nowait((asyncgen.asend(*args, **kwargs), outcome_holder))
while not outcome_holder:
self._call_queue.get()()
return outcome_holder[0].unwrap()
except StopAsyncIteration:
pass
else:
if self._send_stream is None:
trio.lowlevel.start_guest_run(self._run_tests_and_fixtures, run_sync_soon_threadsafe=self._call_queue.put, done_callback=self._main_task_finished, **self._options)
while self._send_stream is None:
self._call_queue.get()()
outcome_holder: list[Outcome] = []
self._send_stream.send_nowait((asyncgen.aclose(*args, **kwargs), outcome_holder))
while not outcome_holder:
self._call_queue.get()()
return outcome_holder[0].unwrap()
raise RuntimeError('Async generator fixture did not stop')
|
anyio
|
positive
|
def load_weights(self, path):
npz = np.load(path)
params_dict = {}
<DeepExtract>
for name in self._params:
obj = self.__dict__[name]
key = parent_key + '/' + name if parent_key else name
if isinstance(obj, Layer):
obj._flatten_params(params_dict, key)
else:
params_dict[key] = obj
</DeepExtract>
for (key, param) in params_dict.items():
param.data = npz[key]
|
def load_weights(self, path):
npz = np.load(path)
params_dict = {}
for name in self._params:
obj = self.__dict__[name]
key = parent_key + '/' + name if parent_key else name
if isinstance(obj, Layer):
obj._flatten_params(params_dict, key)
else:
params_dict[key] = obj
for (key, param) in params_dict.items():
param.data = npz[key]
|
deep-learning-from-scratch-3
|
positive
|
def read32(state):
<DeepExtract>
val = read8(state)
val |= read8(state) << 8
val = val
</DeepExtract>
val |= read16(state) << 16
return val
|
def read32(state):
val = read8(state)
val |= read8(state) << 8
val = val
val |= read16(state) << 16
return val
|
deprecated-binaryninja-python
|
positive
|
def _process_new_objects(self, obj_cls=NumericPoint, obj_type='analog', objList=None):
_newpoints = []
for each in retrieve_type(objList, obj_type):
point_type = str(each[0])
point_address = str(each[1])
if obj_type == 'analog':
<DeepExtract>
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} units '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
</DeepExtract>
elif obj_type == 'multi':
<DeepExtract>
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} stateText '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
</DeepExtract>
elif obj_type == 'loop':
<DeepExtract>
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} units '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
</DeepExtract>
elif obj_type == 'binary':
units_state = (self.read_single('{} {} inactiveText '.format(point_type, point_address)), self.read_single('{} {} activeText '.format(point_type, point_address)))
else:
units_state = None
<DeepExtract>
if discover_request[0]:
presentValue = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} presentValue '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
</DeepExtract>
if (obj_type == 'analog' or obj_type == 'loop') and presentValue:
presentValue = float(presentValue)
_newpoints.append(obj_cls(pointType=point_type, pointAddress=point_address, pointName=self.read_single('{} {} objectName '.format(point_type, point_address)), description=self.read_single('{} {} description '.format(point_type, point_address)), presentValue=presentValue, units_state=units_state, device=self))
return _newpoints
|
def _process_new_objects(self, obj_cls=NumericPoint, obj_type='analog', objList=None):
_newpoints = []
for each in retrieve_type(objList, obj_type):
point_type = str(each[0])
point_address = str(each[1])
if obj_type == 'analog':
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} units '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
elif obj_type == 'multi':
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} stateText '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
elif obj_type == 'loop':
if discover_request[0]:
units_state = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} units '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
elif obj_type == 'binary':
units_state = (self.read_single('{} {} inactiveText '.format(point_type, point_address)), self.read_single('{} {} activeText '.format(point_type, point_address)))
else:
units_state = None
if discover_request[0]:
presentValue = self.rp_discovered_values(discover_request, points_per_request=points_per_request)
else:
big_request = self._rpm_request_by_name('{} {} presentValue '.format(point_type, point_address))
i = 0
for request in batch_requests(big_request[0], points_per_request):
try:
request = '{} {}'.format(self.properties.address, ''.join(request))
val = self.properties.network.read(request, vendor_id=self.properties.vendor_id)
points_values = zip(big_request[1][i:i + len(val)], val)
i += len(val)
for each in points_values:
each[0]._trend(each[1])
except KeyError as error:
raise Exception('Unknown point name : {}'.format(error))
if (obj_type == 'analog' or obj_type == 'loop') and presentValue:
presentValue = float(presentValue)
_newpoints.append(obj_cls(pointType=point_type, pointAddress=point_address, pointName=self.read_single('{} {} objectName '.format(point_type, point_address)), description=self.read_single('{} {} description '.format(point_type, point_address)), presentValue=presentValue, units_state=units_state, device=self))
return _newpoints
|
BAC0
|
positive
|
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, use SparseAdam instead')
<DeepExtract>
if len(grad.size()) > 2:
(is_matrix, is_need_reshape) = (True, True)
elif len(grad.size()) == 2:
(is_matrix, is_need_reshape) = (True, False)
elif len(grad.size()) == 2 and (grad.size()[0] == 1 or grad.size()[1] == 1):
(is_matrix, is_need_reshape) = (False, False)
else:
(is_matrix, is_need_reshape) = (False, False)
</DeepExtract>
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
<DeepExtract>
temp_shape = p.data.size()[2:]
if len(temp_shape) == 1:
new_shape = (p.data.size()[0], p.data.size()[1] * p.data.size()[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (p.data.size()[0] * functools.reduce(operator.mul, temp_shape[tmp_div:], 1), p.data.size()[1] * functools.reduce(operator.mul, temp_shape[:tmp_div], 1))
(new_shape, old_shape) = (new_shape, copy(p.data.size()))
</DeepExtract>
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = torch.zeros((1, new_shape[1]), dtype=torch.float32, device=p.grad.device)
state['exp_avg_sq_C'] = torch.zeros((new_shape[0], 1), dtype=torch.float32, device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
if group['lr'] is None:
lr_t = min(0.01, 1 / sqrt(state['step']))
lr_t *= max(group['eps2'], self._rms(p.data))
else:
lr_t = group['lr']
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * (1 - group['beta1'] ** (state['step'] - 1)) / (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * (1 - group['beta2'] ** (state['step'] - 1)) / (1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t).add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).add_(group['eps1']), dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t).add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).add_(group['eps1']), dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c, exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t).addcmul_(1 - beta2_t, grad, grad).add_((1 - beta2_t) * group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, torch.div(v, 1 - beta2_t ** state['step']).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
|
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, use SparseAdam instead')
if len(grad.size()) > 2:
(is_matrix, is_need_reshape) = (True, True)
elif len(grad.size()) == 2:
(is_matrix, is_need_reshape) = (True, False)
elif len(grad.size()) == 2 and (grad.size()[0] == 1 or grad.size()[1] == 1):
(is_matrix, is_need_reshape) = (False, False)
else:
(is_matrix, is_need_reshape) = (False, False)
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
temp_shape = p.data.size()[2:]
if len(temp_shape) == 1:
new_shape = (p.data.size()[0], p.data.size()[1] * p.data.size()[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (p.data.size()[0] * functools.reduce(operator.mul, temp_shape[tmp_div:], 1), p.data.size()[1] * functools.reduce(operator.mul, temp_shape[:tmp_div], 1))
(new_shape, old_shape) = (new_shape, copy(p.data.size()))
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = torch.zeros((1, new_shape[1]), dtype=torch.float32, device=p.grad.device)
state['exp_avg_sq_C'] = torch.zeros((new_shape[0], 1), dtype=torch.float32, device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = torch.zeros(new_shape, dtype=torch.float32, device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
if group['lr'] is None:
lr_t = min(0.01, 1 / sqrt(state['step']))
lr_t *= max(group['eps2'], self._rms(p.data))
else:
lr_t = group['lr']
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * (1 - group['beta1'] ** (state['step'] - 1)) / (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * (1 - group['beta2'] ** (state['step'] - 1)) / (1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t).add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).add_(group['eps1']), dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t).add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).add_(group['eps1']), dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c, exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t).addcmul_(1 - beta2_t, grad, grad).add_((1 - beta2_t) * group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, torch.div(v, 1 - beta2_t ** state['step']).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
|
ExHiRD-DKG
|
positive
|
def optimize(self, verbose=True):
"""
recreate Faceset with optimized structure.
"""
if verbose:
print(f'Optimizing {self._path.name}...')
tmp_path = self._path.parent / (self._path.stem + '_optimizing' + self._path.suffix)
tmp_fs = Faceset(tmp_path, write_access=True, recreate=True)
<DeepExtract>
for (key, value) in lib_con.progress_bar_iterator(self._UFaceMark_grp.items(), desc=f'Copying {self._UFaceMark_grp.name} -> {tmp_fs._UFaceMark_grp.name}', suppress_print=not verbose):
d = tmp_fs._UFaceMark_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
</DeepExtract>
<DeepExtract>
for (key, value) in lib_con.progress_bar_iterator(self._UPerson_grp.items(), desc=f'Copying {self._UPerson_grp.name} -> {tmp_fs._UPerson_grp.name}', suppress_print=not verbose):
d = tmp_fs._UPerson_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
</DeepExtract>
<DeepExtract>
for (key, value) in lib_con.progress_bar_iterator(self._UImage_grp.items(), desc=f'Copying {self._UImage_grp.name} -> {tmp_fs._UImage_grp.name}', suppress_print=not verbose):
d = tmp_fs._UImage_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
</DeepExtract>
<DeepExtract>
for (key, value) in lib_con.progress_bar_iterator(self._UImage_image_data_grp.items(), desc=f'Copying {self._UImage_image_data_grp.name} -> {tmp_fs._UImage_image_data_grp.name}', suppress_print=not verbose):
d = tmp_fs._UImage_image_data_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
</DeepExtract>
tmp_fs.close()
<DeepExtract>
if self._f is not None:
self._f.close()
self._f = None
</DeepExtract>
self._path.unlink()
tmp_path.rename(self._path)
<DeepExtract>
if self._f is None:
self._f = f = h5py.File(self._path, mode=self._mode)
self._UFaceMark_grp = f.require_group('UFaceMark')
self._UImage_grp = f.require_group('UImage')
self._UImage_image_data_grp = f.require_group('UImage_image_data')
self._UPerson_grp = f.require_group('UPerson')
</DeepExtract>
|
def optimize(self, verbose=True):
"""
recreate Faceset with optimized structure.
"""
if verbose:
print(f'Optimizing {self._path.name}...')
tmp_path = self._path.parent / (self._path.stem + '_optimizing' + self._path.suffix)
tmp_fs = Faceset(tmp_path, write_access=True, recreate=True)
for (key, value) in lib_con.progress_bar_iterator(self._UFaceMark_grp.items(), desc=f'Copying {self._UFaceMark_grp.name} -> {tmp_fs._UFaceMark_grp.name}', suppress_print=not verbose):
d = tmp_fs._UFaceMark_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
for (key, value) in lib_con.progress_bar_iterator(self._UPerson_grp.items(), desc=f'Copying {self._UPerson_grp.name} -> {tmp_fs._UPerson_grp.name}', suppress_print=not verbose):
d = tmp_fs._UPerson_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
for (key, value) in lib_con.progress_bar_iterator(self._UImage_grp.items(), desc=f'Copying {self._UImage_grp.name} -> {tmp_fs._UImage_grp.name}', suppress_print=not verbose):
d = tmp_fs._UImage_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
for (key, value) in lib_con.progress_bar_iterator(self._UImage_image_data_grp.items(), desc=f'Copying {self._UImage_image_data_grp.name} -> {tmp_fs._UImage_image_data_grp.name}', suppress_print=not verbose):
d = tmp_fs._UImage_image_data_grp.create_dataset(key, shape=value.shape, dtype=value.dtype)
d[:] = value[:]
for (a_key, a_value) in value.attrs.items():
d.attrs[a_key] = a_value
tmp_fs.close()
if self._f is not None:
self._f.close()
self._f = None
self._path.unlink()
tmp_path.rename(self._path)
if self._f is None:
self._f = f = h5py.File(self._path, mode=self._mode)
self._UFaceMark_grp = f.require_group('UFaceMark')
self._UImage_grp = f.require_group('UImage')
self._UImage_image_data_grp = f.require_group('UImage_image_data')
self._UPerson_grp = f.require_group('UPerson')
</DeepExtract>
|
DeepFaceLive
|
positive
|
def parse(self, pipeline_json: Dict) -> Pipeline:
"""
The pipeline definitions allow for defining multiple pipelines in one json file.
When super_nodes are used, their node actually references another pipeline in the
set of pipeline definitions - which is "flattened" into the overall pipeline object's
list of operations.
"""
try:
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline_json)
primary_pipeline = pipeline_definition.primary_pipeline
except Exception as e:
raise ValueError(f'Invalid Pipeline: {e}')
runtime = primary_pipeline.runtime
if not runtime:
raise ValueError('Invalid pipeline: Missing runtime.')
runtime_config = primary_pipeline.runtime_config
source = primary_pipeline.source
description = primary_pipeline.get_property('description')
pipeline_object = Pipeline(id=primary_pipeline.id, name=primary_pipeline.name, runtime=runtime, runtime_config=runtime_config, source=source, description=description, pipeline_properties=primary_pipeline.pipeline_default_properties, pipeline_parameters=primary_pipeline.pipeline_parameters)
nodes = primary_pipeline.nodes
for pipeline in pipeline_definition.pipelines:
if pipeline.id == primary_pipeline.id:
nodes = pipeline.nodes
<DeepExtract>
for node in nodes:
if node.type == 'super_node':
self._super_node_to_operations(pipeline_definition, node, pipeline_object, node)
continue
elif node.type == 'binding':
continue
elif node.type == 'model_node':
raise NotImplementedError(f"Node type '{node.type}' is currently not supported!")
elif node.type != 'execution_node':
raise ValueError(f"Node type '{node.type}' is invalid!")
operation = self._create_pipeline_operation(node, super_node)
comment = pipeline_definition.get_node_comments(node.id)
if comment:
operation.doc = comment
self.log.debug(f"Adding operation for '{operation.name}' to pipeline: {pipeline_object.name}")
pipeline_object.operations[operation.id] = operation
</DeepExtract>
return pipeline_object
|
def parse(self, pipeline_json: Dict) -> Pipeline:
"""
The pipeline definitions allow for defining multiple pipelines in one json file.
When super_nodes are used, their node actually references another pipeline in the
set of pipeline definitions - which is "flattened" into the overall pipeline object's
list of operations.
"""
try:
pipeline_definition = PipelineDefinition(pipeline_definition=pipeline_json)
primary_pipeline = pipeline_definition.primary_pipeline
except Exception as e:
raise ValueError(f'Invalid Pipeline: {e}')
runtime = primary_pipeline.runtime
if not runtime:
raise ValueError('Invalid pipeline: Missing runtime.')
runtime_config = primary_pipeline.runtime_config
source = primary_pipeline.source
description = primary_pipeline.get_property('description')
pipeline_object = Pipeline(id=primary_pipeline.id, name=primary_pipeline.name, runtime=runtime, runtime_config=runtime_config, source=source, description=description, pipeline_properties=primary_pipeline.pipeline_default_properties, pipeline_parameters=primary_pipeline.pipeline_parameters)
nodes = primary_pipeline.nodes
for pipeline in pipeline_definition.pipelines:
if pipeline.id == primary_pipeline.id:
nodes = pipeline.nodes
for node in nodes:
if node.type == 'super_node':
self._super_node_to_operations(pipeline_definition, node, pipeline_object, node)
continue
elif node.type == 'binding':
continue
elif node.type == 'model_node':
raise NotImplementedError(f"Node type '{node.type}' is currently not supported!")
elif node.type != 'execution_node':
raise ValueError(f"Node type '{node.type}' is invalid!")
operation = self._create_pipeline_operation(node, super_node)
comment = pipeline_definition.get_node_comments(node.id)
if comment:
operation.doc = comment
self.log.debug(f"Adding operation for '{operation.name}' to pipeline: {pipeline_object.name}")
pipeline_object.operations[operation.id] = operation
return pipeline_object
|
elyra
|
positive
|
def _run_navigation_episode(agent, client, time_out, target, episode_name, metrics_parameters, collision_as_failure, traffic_light_as_failure):
"""
Run one episode of the benchmark (Pose) for a certain agent.
Args:
agent: the agent object
client: an object of the carla client to communicate
with the CARLA simulator
time_out: the time limit to complete this episode
target: the target to reach
episode_name: The name for saving images of this episode
metrics_object: The metrics object to check for collisions
"""
(measurements, sensor_data) = client.read_data()
client.send_control(VehicleControl())
initial_timestamp = measurements.game_timestamp
current_timestamp = initial_timestamp
measurement_vec = []
control_vec = []
frame = 0
distance = 10000
(col_ped, col_veh, col_oth) = (0, 0, 0)
(traffic_light_state, number_red_lights, number_green_lights) = (None, 0, 0)
fail = False
success = False
not_count = 0
while not fail and (not success):
(measurements, sensor_data) = client.read_data()
<DeepExtract>
directions = self._planner.get_next_command((measurements.player_measurements.transform.location.x, measurements.player_measurements.transform.location.y, 0.22), (measurements.player_measurements.transform.orientation.x, measurements.player_measurements.transform.orientation.y, measurements.player_measurements.transform.orientation.z), (target.location.x, target.location.y, 0.22), (target.orientation.x, target.orientation.y, target.orientation.z))
directions = directions
</DeepExtract>
control = agent.run_step(measurements, sensor_data, directions, target)
client.send_control(control)
self._recording.save_images(sensor_data, episode_name, frame)
current_x = measurements.player_measurements.transform.location.x
current_y = measurements.player_measurements.transform.location.y
logging.info('Controller is Inputting:')
logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer, control.throttle, control.brake)
current_timestamp = measurements.game_timestamp
logging.info('Timestamp %f', current_timestamp)
<DeepExtract>
distance = math.sqrt(([target.location.x, target.location.y][0] - [current_x, current_y][0]) ** 2 + ([target.location.x, target.location.y][1] - [current_x, current_y][1]) ** 2)
</DeepExtract>
logging.info('Status:')
logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', float(distance), current_x, current_y, target.location.x, target.location.y)
<DeepExtract>
collided_veh = 0
collided_ped = 0
collided_oth = 0
if measurements.player_measurements.collision_vehicles - self._previous_vehicle_collision > metrics_parameters['collision_vehicles']['threshold'] / 2.0:
collided_veh = 1
if measurements.player_measurements.collision_pedestrians - self._previous_pedestrian_collision > metrics_parameters['collision_pedestrians']['threshold'] / 2.0:
collided_ped = 1
if measurements.player_measurements.collision_other - self._previous_other_collision > metrics_parameters['collision_other']['threshold'] / 2.0:
collided_oth = 1
self._previous_pedestrian_collision = measurements.player_measurements.collision_pedestrians
self._previous_vehicle_collision = measurements.player_measurements.collision_vehicles
self._previous_other_collision = measurements.player_measurements.collision_other
(col_ped, col_veh, col_oth) = (collided_ped, collided_veh, collided_oth)
</DeepExtract>
<DeepExtract>
def is_on_burning_point(_map, location):
(ori_x, ori_y) = _map.get_lane_orientation([location.x, location.y, 38])
future_location_x = location.x
future_location_y = location.y
for i in range(3):
future_location_x += ori_x
future_location_y += ori_y
location_on_intersection_x = future_location_x + 2 * ori_x
location_on_intersection_y = future_location_y + 2 * ori_y
if not _map.is_point_on_intersection([future_location_x, future_location_y, 38]) and _map.is_point_on_intersection([location_on_intersection_x, location_on_intersection_y, 38]):
traffic_light_state = True
traffic_light_state = False
player_x = measurements.player_measurements.transform.location.x
player_y = measurements.player_measurements.transform.location.y
for agent in measurements.non_player_agents:
if agent.HasField('traffic_light'):
if not self._map.is_point_on_intersection([player_x, player_y, 38]):
x_agent = agent.traffic_light.transform.location.x
y_agent = agent.traffic_light.transform.location.y
(tl_vector, tl_dist) = get_vec_dist(x_agent, y_agent, player_x, player_y)
if self._is_traffic_light_active(agent, measurements.player_measurements.transform.orientation):
if is_on_burning_point(self._map, measurements.player_measurements.transform.location) and tl_dist < 6.0:
if agent.traffic_light.state != 0:
traffic_light_state = 'red'
else:
traffic_light_state = 'green'
traffic_light_state = None
</DeepExtract>
if traffic_light_state == 'red' and not_count == 0:
number_red_lights += 1
not_count = 20
elif traffic_light_state == 'green' and not_count == 0:
number_green_lights += 1
not_count = 20
else:
not_count -= 1
not_count = max(0, not_count)
if distance < self._distance_for_success:
success = True
elif current_timestamp - initial_timestamp > time_out * 1000:
fail = True
elif collision_as_failure and (col_ped or col_veh or col_oth):
fail = True
elif traffic_light_as_failure and traffic_light_state == 'red':
fail = True
logging.info('Traffic Lights:')
logging.info('red %f green %f, total %f', number_red_lights, number_green_lights, number_red_lights + number_green_lights)
frame += 1
measurement_vec.append(measurements.player_measurements)
control_vec.append(control)
if success:
return (1, measurement_vec, control_vec, float(current_timestamp - initial_timestamp) / 1000.0, distance, col_ped, col_veh, col_oth, number_red_lights, number_green_lights)
return (0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, number_red_lights, number_green_lights)
|
def _run_navigation_episode(agent, client, time_out, target, episode_name, metrics_parameters, collision_as_failure, traffic_light_as_failure):
"""
Run one episode of the benchmark (Pose) for a certain agent.
Args:
agent: the agent object
client: an object of the carla client to communicate
with the CARLA simulator
time_out: the time limit to complete this episode
target: the target to reach
episode_name: The name for saving images of this episode
metrics_object: The metrics object to check for collisions
"""
(measurements, sensor_data) = client.read_data()
client.send_control(VehicleControl())
initial_timestamp = measurements.game_timestamp
current_timestamp = initial_timestamp
measurement_vec = []
control_vec = []
frame = 0
distance = 10000
(col_ped, col_veh, col_oth) = (0, 0, 0)
(traffic_light_state, number_red_lights, number_green_lights) = (None, 0, 0)
fail = False
success = False
not_count = 0
while not fail and (not success):
(measurements, sensor_data) = client.read_data()
directions = self._planner.get_next_command((measurements.player_measurements.transform.location.x, measurements.player_measurements.transform.location.y, 0.22), (measurements.player_measurements.transform.orientation.x, measurements.player_measurements.transform.orientation.y, measurements.player_measurements.transform.orientation.z), (target.location.x, target.location.y, 0.22), (target.orientation.x, target.orientation.y, target.orientation.z))
directions = directions
control = agent.run_step(measurements, sensor_data, directions, target)
client.send_control(control)
self._recording.save_images(sensor_data, episode_name, frame)
current_x = measurements.player_measurements.transform.location.x
current_y = measurements.player_measurements.transform.location.y
logging.info('Controller is Inputting:')
logging.info('Steer = %f Throttle = %f Brake = %f ', control.steer, control.throttle, control.brake)
current_timestamp = measurements.game_timestamp
logging.info('Timestamp %f', current_timestamp)
distance = math.sqrt(([target.location.x, target.location.y][0] - [current_x, current_y][0]) ** 2 + ([target.location.x, target.location.y][1] - [current_x, current_y][1]) ** 2)
logging.info('Status:')
logging.info('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f', float(distance), current_x, current_y, target.location.x, target.location.y)
collided_veh = 0
collided_ped = 0
collided_oth = 0
if measurements.player_measurements.collision_vehicles - self._previous_vehicle_collision > metrics_parameters['collision_vehicles']['threshold'] / 2.0:
collided_veh = 1
if measurements.player_measurements.collision_pedestrians - self._previous_pedestrian_collision > metrics_parameters['collision_pedestrians']['threshold'] / 2.0:
collided_ped = 1
if measurements.player_measurements.collision_other - self._previous_other_collision > metrics_parameters['collision_other']['threshold'] / 2.0:
collided_oth = 1
self._previous_pedestrian_collision = measurements.player_measurements.collision_pedestrians
self._previous_vehicle_collision = measurements.player_measurements.collision_vehicles
self._previous_other_collision = measurements.player_measurements.collision_other
(col_ped, col_veh, col_oth) = (collided_ped, collided_veh, collided_oth)
def is_on_burning_point(_map, location):
(ori_x, ori_y) = _map.get_lane_orientation([location.x, location.y, 38])
future_location_x = location.x
future_location_y = location.y
for i in range(3):
future_location_x += ori_x
future_location_y += ori_y
location_on_intersection_x = future_location_x + 2 * ori_x
location_on_intersection_y = future_location_y + 2 * ori_y
if not _map.is_point_on_intersection([future_location_x, future_location_y, 38]) and _map.is_point_on_intersection([location_on_intersection_x, location_on_intersection_y, 38]):
traffic_light_state = True
traffic_light_state = False
player_x = measurements.player_measurements.transform.location.x
player_y = measurements.player_measurements.transform.location.y
for agent in measurements.non_player_agents:
if agent.HasField('traffic_light'):
if not self._map.is_point_on_intersection([player_x, player_y, 38]):
x_agent = agent.traffic_light.transform.location.x
y_agent = agent.traffic_light.transform.location.y
(tl_vector, tl_dist) = get_vec_dist(x_agent, y_agent, player_x, player_y)
if self._is_traffic_light_active(agent, measurements.player_measurements.transform.orientation):
if is_on_burning_point(self._map, measurements.player_measurements.transform.location) and tl_dist < 6.0:
if agent.traffic_light.state != 0:
traffic_light_state = 'red'
else:
traffic_light_state = 'green'
traffic_light_state = None
if traffic_light_state == 'red' and not_count == 0:
number_red_lights += 1
not_count = 20
elif traffic_light_state == 'green' and not_count == 0:
number_green_lights += 1
not_count = 20
else:
not_count -= 1
not_count = max(0, not_count)
if distance < self._distance_for_success:
success = True
elif current_timestamp - initial_timestamp > time_out * 1000:
fail = True
elif collision_as_failure and (col_ped or col_veh or col_oth):
fail = True
elif traffic_light_as_failure and traffic_light_state == 'red':
fail = True
logging.info('Traffic Lights:')
logging.info('red %f green %f, total %f', number_red_lights, number_green_lights, number_red_lights + number_green_lights)
frame += 1
measurement_vec.append(measurements.player_measurements)
control_vec.append(control)
if success:
return (1, measurement_vec, control_vec, float(current_timestamp - initial_timestamp) / 1000.0, distance, col_ped, col_veh, col_oth, number_red_lights, number_green_lights)
return (0, measurement_vec, control_vec, time_out, distance, col_ped, col_veh, col_oth, number_red_lights, number_green_lights)
|
coiltraine
|
positive
|
def __init__(self, path: str):
data_path: str = os.path.join(path, 'raw', 'ACM.mat')
_url: str = 'https://data.dgl.ai/dataset/ACM.mat'
if os.path.exists(data_path) and os.path.isfile(data_path):
print(f'Using cached file {data_path}')
else:
dgl.data.utils.download(_url, data_path)
data = scipy.io.loadmat(data_path)
p_vs_l = data['PvsL']
p_vs_a = data['PvsA']
p_vs_t = data['PvsT']
p_vs_c = data['PvsC']
conf_ids = [0, 1, 9, 10, 13]
label_ids = [0, 1, 2, 2, 1]
p_vs_c_filter = p_vs_c[:, conf_ids]
p_selected = (p_vs_c_filter.sum(1) != 0).A1.nonzero()[0]
p_vs_l = p_vs_l[p_selected]
p_vs_a = p_vs_a[p_selected]
p_vs_t = p_vs_t[p_selected]
p_vs_c = p_vs_c[p_selected]
hg = dgl.heterograph({('paper', 'pa', 'author'): p_vs_a.nonzero(), ('author', 'ap', 'paper'): p_vs_a.transpose().nonzero(), ('paper', 'pf', 'field'): p_vs_l.nonzero(), ('field', 'fp', 'paper'): p_vs_l.transpose().nonzero()})
hg.nodes['paper'].data['feat'] = torch.tensor(p_vs_t.toarray(), dtype=torch.float)
(pc_p, pc_c) = p_vs_c.nonzero()
labels = np.zeros(len(p_selected), dtype=np.int64)
for (conf_id, label_id) in zip(conf_ids, label_ids):
labels[pc_p[pc_c == conf_id]] = label_id
hg.nodes['paper'].data['label'] = torch.LongTensor(labels)
float_mask = np.zeros(len(pc_p))
for conf_id in conf_ids:
pc_c_mask = pc_c == conf_id
float_mask[pc_c_mask] = np.random.permutation(np.linspace(0, 1, pc_c_mask.sum()))
train_idx = np.where(float_mask <= 0.2)[0]
val_idx = np.where((float_mask > 0.2) & (float_mask <= 0.3))[0]
test_idx = np.where(float_mask > 0.3)[0]
num_nodes = hg.number_of_nodes('paper')
<DeepExtract>
mask = torch.zeros(num_nodes)
mask[train_idx] = 1
hg.nodes['paper'].data['train_mask'] = mask.bool()
</DeepExtract>
<DeepExtract>
mask = torch.zeros(num_nodes)
mask[val_idx] = 1
hg.nodes['paper'].data['val_mask'] = mask.bool()
</DeepExtract>
<DeepExtract>
mask = torch.zeros(num_nodes)
mask[test_idx] = 1
hg.nodes['paper'].data['test_mask'] = mask.bool()
</DeepExtract>
super(ACMHANDataset, self).__init__([hg])
self.schema.meta_paths = (('pa', 'ap'), ('pf', 'fp'))
self.schema['target_node_type'] = 'paper'
|
def __init__(self, path: str):
data_path: str = os.path.join(path, 'raw', 'ACM.mat')
_url: str = 'https://data.dgl.ai/dataset/ACM.mat'
if os.path.exists(data_path) and os.path.isfile(data_path):
print(f'Using cached file {data_path}')
else:
dgl.data.utils.download(_url, data_path)
data = scipy.io.loadmat(data_path)
p_vs_l = data['PvsL']
p_vs_a = data['PvsA']
p_vs_t = data['PvsT']
p_vs_c = data['PvsC']
conf_ids = [0, 1, 9, 10, 13]
label_ids = [0, 1, 2, 2, 1]
p_vs_c_filter = p_vs_c[:, conf_ids]
p_selected = (p_vs_c_filter.sum(1) != 0).A1.nonzero()[0]
p_vs_l = p_vs_l[p_selected]
p_vs_a = p_vs_a[p_selected]
p_vs_t = p_vs_t[p_selected]
p_vs_c = p_vs_c[p_selected]
hg = dgl.heterograph({('paper', 'pa', 'author'): p_vs_a.nonzero(), ('author', 'ap', 'paper'): p_vs_a.transpose().nonzero(), ('paper', 'pf', 'field'): p_vs_l.nonzero(), ('field', 'fp', 'paper'): p_vs_l.transpose().nonzero()})
hg.nodes['paper'].data['feat'] = torch.tensor(p_vs_t.toarray(), dtype=torch.float)
(pc_p, pc_c) = p_vs_c.nonzero()
labels = np.zeros(len(p_selected), dtype=np.int64)
for (conf_id, label_id) in zip(conf_ids, label_ids):
labels[pc_p[pc_c == conf_id]] = label_id
hg.nodes['paper'].data['label'] = torch.LongTensor(labels)
float_mask = np.zeros(len(pc_p))
for conf_id in conf_ids:
pc_c_mask = pc_c == conf_id
float_mask[pc_c_mask] = np.random.permutation(np.linspace(0, 1, pc_c_mask.sum()))
train_idx = np.where(float_mask <= 0.2)[0]
val_idx = np.where((float_mask > 0.2) & (float_mask <= 0.3))[0]
test_idx = np.where(float_mask > 0.3)[0]
num_nodes = hg.number_of_nodes('paper')
mask = torch.zeros(num_nodes)
mask[train_idx] = 1
hg.nodes['paper'].data['train_mask'] = mask.bool()
mask = torch.zeros(num_nodes)
mask[val_idx] = 1
hg.nodes['paper'].data['val_mask'] = mask.bool()
mask = torch.zeros(num_nodes)
mask[test_idx] = 1
hg.nodes['paper'].data['test_mask'] = mask.bool()
super(ACMHANDataset, self).__init__([hg])
self.schema.meta_paths = (('pa', 'ap'), ('pf', 'fp'))
self.schema['target_node_type'] = 'paper'
|
AutoGL
|
positive
|
def network(network_name):
x_image = tf.placeholder(tf.float32, shape=[None, self.img_size, self.img_size, self.Num_colorChannel])
x_normalize = (x_image - 255.0 / 2) / (255.0 / 2)
with tf.variable_scope(network_name):
<DeepExtract>
w_conv1 = tf.get_variable('_w_conv1', shape=self.first_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
</DeepExtract>
<DeepExtract>
b_conv1 = tf.get_variable('_b_conv1', shape=[self.first_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
</DeepExtract>
<DeepExtract>
w_conv2 = tf.get_variable('_w_conv2', shape=self.second_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
</DeepExtract>
<DeepExtract>
b_conv2 = tf.get_variable('_b_conv2', shape=[self.second_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
</DeepExtract>
<DeepExtract>
w_conv3 = tf.get_variable('_w_conv3', shape=self.third_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
</DeepExtract>
<DeepExtract>
b_conv3 = tf.get_variable('_b_conv3', shape=[self.third_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
</DeepExtract>
<DeepExtract>
w_fc1 = tf.get_variable('_w_fc1', shape=self.first_dense, initializer=tf.contrib.layers.xavier_initializer())
</DeepExtract>
<DeepExtract>
b_fc1 = tf.get_variable('_b_fc1', shape=[self.first_dense[1]], initializer=tf.contrib.layers.xavier_initializer())
</DeepExtract>
cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.lstm_size)
h_conv1 = tf.nn.relu(self.conv2d(x_normalize, w_conv1, 4) + b_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_conv1, w_conv2, 2) + b_conv2)
h_conv3 = tf.nn.relu(self.conv2d(h_conv2, w_conv3, 1) + b_conv3)
rnn_batch_size = tf.placeholder(dtype=tf.int32)
rnn_step_size = tf.placeholder(dtype=tf.int32)
h_flat = tf.reshape(h_conv3, [rnn_batch_size, rnn_step_size, self.flatten_size])
(rnn_out, rnn_state) = tf.nn.dynamic_rnn(inputs=h_flat, cell=cell, dtype=tf.float32)
rnn_out = rnn_out[:, -1, :]
rnn_out = tf.reshape(rnn_out, shape=[rnn_batch_size, -1])
output = tf.matmul(rnn_out, w_fc1) + b_fc1
return (x_image, output, rnn_batch_size, rnn_step_size)
|
def network(network_name):
x_image = tf.placeholder(tf.float32, shape=[None, self.img_size, self.img_size, self.Num_colorChannel])
x_normalize = (x_image - 255.0 / 2) / (255.0 / 2)
with tf.variable_scope(network_name):
w_conv1 = tf.get_variable('_w_conv1', shape=self.first_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv1 = tf.get_variable('_b_conv1', shape=[self.first_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
w_conv2 = tf.get_variable('_w_conv2', shape=self.second_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv2 = tf.get_variable('_b_conv2', shape=[self.second_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
w_conv3 = tf.get_variable('_w_conv3', shape=self.third_conv, initializer=tf.contrib.layers.xavier_initializer_conv2d())
b_conv3 = tf.get_variable('_b_conv3', shape=[self.third_conv[3]], initializer=tf.contrib.layers.xavier_initializer())
w_fc1 = tf.get_variable('_w_fc1', shape=self.first_dense, initializer=tf.contrib.layers.xavier_initializer())
b_fc1 = tf.get_variable('_b_fc1', shape=[self.first_dense[1]], initializer=tf.contrib.layers.xavier_initializer())
cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.lstm_size)
h_conv1 = tf.nn.relu(self.conv2d(x_normalize, w_conv1, 4) + b_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_conv1, w_conv2, 2) + b_conv2)
h_conv3 = tf.nn.relu(self.conv2d(h_conv2, w_conv3, 1) + b_conv3)
rnn_batch_size = tf.placeholder(dtype=tf.int32)
rnn_step_size = tf.placeholder(dtype=tf.int32)
h_flat = tf.reshape(h_conv3, [rnn_batch_size, rnn_step_size, self.flatten_size])
(rnn_out, rnn_state) = tf.nn.dynamic_rnn(inputs=h_flat, cell=cell, dtype=tf.float32)
rnn_out = rnn_out[:, -1, :]
rnn_out = tf.reshape(rnn_out, shape=[rnn_batch_size, -1])
output = tf.matmul(rnn_out, w_fc1) + b_fc1
return (x_image, output, rnn_batch_size, rnn_step_size)
|
DRL
|
positive
|
def fct_moveServos(self, time, nbre, pin, pos, orientation):
<DeepExtract>
self.serial.write(struct.pack('>B', self.SERVO_MOVESERVOS))
</DeepExtract>
if time < 10:
time = 1
else:
time = int(time / 10)
<DeepExtract>
(c1, c2) = (time & 255, time >> 8)
self.serial.write(struct.pack('>BB', c1, c2))
</DeepExtract>
<DeepExtract>
self.serial.write(struct.pack('>B', nbre))
</DeepExtract>
i = 0
while i < nbre:
if orientation[i] == 'indirect':
pos[i] = self.MAX_POS - pos[i]
<DeepExtract>
self.serial.write(struct.pack('>B', pin[i]))
</DeepExtract>
<DeepExtract>
self.serial.write(struct.pack('>B', pos[i]))
</DeepExtract>
i += 1
|
def fct_moveServos(self, time, nbre, pin, pos, orientation):
self.serial.write(struct.pack('>B', self.SERVO_MOVESERVOS))
if time < 10:
time = 1
else:
time = int(time / 10)
(c1, c2) = (time & 255, time >> 8)
self.serial.write(struct.pack('>BB', c1, c2))
self.serial.write(struct.pack('>B', nbre))
i = 0
while i < nbre:
if orientation[i] == 'indirect':
pos[i] = self.MAX_POS - pos[i]
self.serial.write(struct.pack('>B', pin[i]))
self.serial.write(struct.pack('>B', pos[i]))
i += 1
|
choreograph-git
|
positive
|
def main(args):
logger = logging.getLogger(__name__)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
cfg_orig = yaml.load(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
<DeepExtract>
cfg.immutable(False)
merge_cfg_from_file(args.rpn_cfg)
cfg.NUM_GPUS = 1
cfg.MODEL.RPN_ONLY = True
cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
with c2_utils.NamedCudaScope(0):
(boxes, scores) = rpn_engine.im_proposals(model, im)
(proposal_boxes, _proposal_scores) = (boxes, scores)
</DeepExtract>
workspace.ResetWorkspace()
else:
proposal_boxes = None
(cls_boxes, cls_segms, cls_keyps) = (None, None, None)
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
cfg.immutable(False)
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
weights_file = pkl
else:
weights_file = cfg.TEST.WEIGHTS
cfg.NUM_GPUS = 1
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(weights_file)
with c2_utils.NamedCudaScope(0):
(cls_boxes_, cls_segms_, cls_keyps_) = model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
out_name = os.path.join(args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf'))
logger.info('Processing {} -> {}'.format(args.im_file, out_name))
vis_utils.vis_one_image(im[:, :, ::-1], args.im_file, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_coco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2)
|
def main(args):
logger = logging.getLogger(__name__)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
cfg_orig = yaml.load(yaml.dump(cfg))
im = cv2.imread(args.im_file)
if args.rpn_pkl is not None:
cfg.immutable(False)
merge_cfg_from_file(args.rpn_cfg)
cfg.NUM_GPUS = 1
cfg.MODEL.RPN_ONLY = True
cfg.TEST.RPN_PRE_NMS_TOP_N = 10000
cfg.TEST.RPN_POST_NMS_TOP_N = 2000
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(args.rpn_pkl)
with c2_utils.NamedCudaScope(0):
(boxes, scores) = rpn_engine.im_proposals(model, im)
(proposal_boxes, _proposal_scores) = (boxes, scores)
workspace.ResetWorkspace()
else:
proposal_boxes = None
(cls_boxes, cls_segms, cls_keyps) = (None, None, None)
for i in range(0, len(args.models_to_run), 2):
pkl = args.models_to_run[i]
yml = args.models_to_run[i + 1]
cfg.immutable(False)
merge_cfg_from_cfg(cfg_orig)
merge_cfg_from_file(yml)
if len(pkl) > 0:
weights_file = pkl
else:
weights_file = cfg.TEST.WEIGHTS
cfg.NUM_GPUS = 1
assert_and_infer_cfg(cache_urls=False)
model = model_engine.initialize_model_from_cfg(weights_file)
with c2_utils.NamedCudaScope(0):
(cls_boxes_, cls_segms_, cls_keyps_) = model_engine.im_detect_all(model, im, proposal_boxes)
cls_boxes = cls_boxes_ if cls_boxes_ is not None else cls_boxes
cls_segms = cls_segms_ if cls_segms_ is not None else cls_segms
cls_keyps = cls_keyps_ if cls_keyps_ is not None else cls_keyps
workspace.ResetWorkspace()
out_name = os.path.join(args.output_dir, '{}'.format(os.path.basename(args.im_file) + '.pdf'))
logger.info('Processing {} -> {}'.format(args.im_file, out_name))
vis_utils.vis_one_image(im[:, :, ::-1], args.im_file, args.output_dir, cls_boxes, cls_segms, cls_keyps, dataset=dummy_coco_dataset, box_alpha=0.3, show_class=True, thresh=0.7, kp_thresh=2)
|
AIC2018_iamai
|
positive
|
def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False):
"""
Args:
rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
lengths: [batch]: tensor containing the lengthes of the input sequence
hx: [num_layers * num_directions, batch, hidden_size]: tensor containing the initial hidden state for each element in the batch.
masks: [seq_len, batch]: tensor containing the mask for each element in the batch.
batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
Returns:
"""
def check_decreasing(lengths):
(lens, order) = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
(_, rev_order) = torch.sort(order)
return (lens, Variable(order), Variable(rev_order))
<DeepExtract>
(lens, order) = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
check_res = None
else:
(_, rev_order) = torch.sort(order)
check_res = (lens, Variable(order), Variable(rev_order))
</DeepExtract>
if check_res is None:
lens = lengths
rev_order = None
else:
(lens, order, rev_order) = check_res
batch_dim = 0 if batch_first else 1
rnn_input = rnn_input.index_select(batch_dim, order)
if hx is not None:
if isinstance(hx, tuple):
(hx, cx) = hx
hx = hx.index_select(1, order)
cx = cx.index_select(1, order)
hx = (hx, cx)
else:
hx = hx.index_select(1, order)
lens = lens.tolist()
seq = rnn_utils.pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
if masks is not None:
if batch_first:
masks = masks[:, :lens[0]]
else:
masks = masks[:lens[0]]
return (seq, hx, rev_order, masks)
|
def prepare_rnn_seq(rnn_input, lengths, hx=None, masks=None, batch_first=False):
"""
Args:
rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
lengths: [batch]: tensor containing the lengthes of the input sequence
hx: [num_layers * num_directions, batch, hidden_size]: tensor containing the initial hidden state for each element in the batch.
masks: [seq_len, batch]: tensor containing the mask for each element in the batch.
batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
Returns:
"""
def check_decreasing(lengths):
(lens, order) = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
return None
else:
(_, rev_order) = torch.sort(order)
return (lens, Variable(order), Variable(rev_order))
(lens, order) = torch.sort(lengths, dim=0, descending=True)
if torch.ne(lens, lengths).sum() == 0:
check_res = None
else:
(_, rev_order) = torch.sort(order)
check_res = (lens, Variable(order), Variable(rev_order))
if check_res is None:
lens = lengths
rev_order = None
else:
(lens, order, rev_order) = check_res
batch_dim = 0 if batch_first else 1
rnn_input = rnn_input.index_select(batch_dim, order)
if hx is not None:
if isinstance(hx, tuple):
(hx, cx) = hx
hx = hx.index_select(1, order)
cx = cx.index_select(1, order)
hx = (hx, cx)
else:
hx = hx.index_select(1, order)
lens = lens.tolist()
seq = rnn_utils.pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
if masks is not None:
if batch_first:
masks = masks[:, :lens[0]]
else:
masks = masks[:lens[0]]
return (seq, hx, rev_order, masks)
|
DrRepair
|
positive
|
def test_delete_exam_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/course/1/exam/1/submit_exam', {}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/delete_exam', {'exam_id': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'exam was deleted')
|
def test_delete_exam_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/course/1/exam/1/submit_exam', {}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/delete_exam', {'exam_id': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'exam was deleted')
|
academicstoday-django
|
positive
|
def _get_combination_and_multiply(combination: str, tensors: List[torch.Tensor], weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError('Invalid combination: ' + combination)
<DeepExtract>
if combination[0].isdigit():
index = int(combination[0]) - 1
first_tensor = tensors[index]
else:
if len(combination[0]) != 3:
raise ConfigurationError('Invalid combination: ' + combination[0])
first_tensor = _get_combination(combination[0][0], tensors)
second_tensor = _get_combination(combination[0][2], tensors)
operation = combination[0][1]
if operation == '*':
first_tensor = first_tensor * second_tensor
elif operation == '/':
first_tensor = first_tensor / second_tensor
elif operation == '+':
first_tensor = first_tensor + second_tensor
elif operation == '-':
first_tensor = first_tensor - second_tensor
else:
raise ConfigurationError('Invalid operation: ' + operation)
</DeepExtract>
<DeepExtract>
if combination[2].isdigit():
index = int(combination[2]) - 1
second_tensor = tensors[index]
else:
if len(combination[2]) != 3:
raise ConfigurationError('Invalid combination: ' + combination[2])
first_tensor = _get_combination(combination[2][0], tensors)
second_tensor = _get_combination(combination[2][2], tensors)
operation = combination[2][1]
if operation == '*':
second_tensor = first_tensor * second_tensor
elif operation == '/':
second_tensor = first_tensor / second_tensor
elif operation == '+':
second_tensor = first_tensor + second_tensor
elif operation == '-':
second_tensor = first_tensor - second_tensor
else:
raise ConfigurationError('Invalid operation: ' + operation)
</DeepExtract>
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError('Tensors with dim > 4 not currently supported')
if first_tensor.dim() == 4:
expanded_dim = first_tensor.size().index(1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = second_tensor.size().index(1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError('Tensors with dim > 4 not currently supported')
if first_tensor.dim() == 4:
expanded_dim = first_tensor.size().index(1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = second_tensor.size().index(1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError('Invalid operation: ' + operation)
|
def _get_combination_and_multiply(combination: str, tensors: List[torch.Tensor], weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError('Invalid combination: ' + combination)
if combination[0].isdigit():
index = int(combination[0]) - 1
first_tensor = tensors[index]
else:
if len(combination[0]) != 3:
raise ConfigurationError('Invalid combination: ' + combination[0])
first_tensor = _get_combination(combination[0][0], tensors)
second_tensor = _get_combination(combination[0][2], tensors)
operation = combination[0][1]
if operation == '*':
first_tensor = first_tensor * second_tensor
elif operation == '/':
first_tensor = first_tensor / second_tensor
elif operation == '+':
first_tensor = first_tensor + second_tensor
elif operation == '-':
first_tensor = first_tensor - second_tensor
else:
raise ConfigurationError('Invalid operation: ' + operation)
if combination[2].isdigit():
index = int(combination[2]) - 1
second_tensor = tensors[index]
else:
if len(combination[2]) != 3:
raise ConfigurationError('Invalid combination: ' + combination[2])
first_tensor = _get_combination(combination[2][0], tensors)
second_tensor = _get_combination(combination[2][2], tensors)
operation = combination[2][1]
if operation == '*':
second_tensor = first_tensor * second_tensor
elif operation == '/':
second_tensor = first_tensor / second_tensor
elif operation == '+':
second_tensor = first_tensor + second_tensor
elif operation == '-':
second_tensor = first_tensor - second_tensor
else:
raise ConfigurationError('Invalid operation: ' + operation)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError('Tensors with dim > 4 not currently supported')
if first_tensor.dim() == 4:
expanded_dim = first_tensor.size().index(1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = second_tensor.size().index(1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError('Tensors with dim > 4 not currently supported')
if first_tensor.dim() == 4:
expanded_dim = first_tensor.size().index(1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = second_tensor.size().index(1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError('Invalid operation: ' + operation)
|
ACE
|
positive
|
def env_get_task_definition():
<DeepExtract>
if ENV_TASK_DEFINITION not in os.environ:
raise ValueError(f'Missing required environment variable {ENV_TASK_DEFINITION}')
value = os.environ[ENV_TASK_DEFINITION]
if os.getenv(ENV_GZIP_ENABLED, '0') == '1':
taskdef = env_unpack(value)
taskdef = json.loads(value)
</DeepExtract>
return TaskDefinition.deserialize(taskdef)
|
def env_get_task_definition():
if ENV_TASK_DEFINITION not in os.environ:
raise ValueError(f'Missing required environment variable {ENV_TASK_DEFINITION}')
value = os.environ[ENV_TASK_DEFINITION]
if os.getenv(ENV_GZIP_ENABLED, '0') == '1':
taskdef = env_unpack(value)
taskdef = json.loads(value)
return TaskDefinition.deserialize(taskdef)
|
cowait
|
positive
|
def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels)
<DeepExtract>
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_branch = branch_convs
</DeepExtract>
<DeepExtract>
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
self.fc_branch = branch_fcs
</DeepExtract>
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU(inplace=True)
|
def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels)
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(Bottleneck(inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.conv_branch = branch_convs
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
self.fc_branch = branch_fcs
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU(inplace=True)
|
ACSL
|
positive
|
def overlay_instances(self, *, boxes=None, labels=None, masks=None, ext_points=None, path=None, keypoints=None, assigned_colors=None, alpha=0.5):
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if ext_points is not None:
<DeepExtract>
if isinstance(ext_points, ExtremePoints):
ext_points = ext_points.tensor.numpy()
else:
ext_points = np.asarray(ext_points)
</DeepExtract>
if num_instances:
assert len(ext_points) == num_instances
else:
num_instances = len(ext_points)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(boxes=boxes, labels=labels, assigned_colors=assigned_colors)
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if ext_points is not None:
<DeepExtract>
for pt in ext_points[i]:
(x, y) = pt
self.draw_circle([x, y], color=color, radius=3)
return self.output
</DeepExtract>
if path is not None:
<DeepExtract>
for (i, poly) in enumerate(path[i]):
if i > 0:
prev_poly = path[i][i - 1]
offsets = poly - prev_poly
for j in range(len(offsets)):
self.output.ax.arrow(prev_poly[j, 0], prev_poly[j, 1], offsets[j, 0], offsets[j, 1], linestyle='-', linewidth=1, alpha=alpha)
self.output.ax.plot(poly[0:, 0], poly[0:, 1], color=color, marker='1', alpha=alpha)
return self.output
</DeepExtract>
if labels is not None:
if boxes is not None:
(x0, y0, x1, y1) = boxes[i]
text_pos = (x0, y0)
horiz_align = 'left'
elif masks is not None:
(x0, y0, x1, y1) = masks[i].bbox()
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = 'center'
else:
continue
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = 'center'
instance_area = (y1 - y0) * (x1 - x0)
if instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale:
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
self.draw_text(labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size)
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
|
def overlay_instances(self, *, boxes=None, labels=None, masks=None, ext_points=None, path=None, keypoints=None, assigned_colors=None, alpha=0.5):
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if ext_points is not None:
if isinstance(ext_points, ExtremePoints):
ext_points = ext_points.tensor.numpy()
else:
ext_points = np.asarray(ext_points)
if num_instances:
assert len(ext_points) == num_instances
else:
num_instances = len(ext_points)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(boxes=boxes, labels=labels, assigned_colors=assigned_colors)
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if ext_points is not None:
for pt in ext_points[i]:
(x, y) = pt
self.draw_circle([x, y], color=color, radius=3)
return self.output
if path is not None:
for (i, poly) in enumerate(path[i]):
if i > 0:
prev_poly = path[i][i - 1]
offsets = poly - prev_poly
for j in range(len(offsets)):
self.output.ax.arrow(prev_poly[j, 0], prev_poly[j, 1], offsets[j, 0], offsets[j, 1], linestyle='-', linewidth=1, alpha=alpha)
self.output.ax.plot(poly[0:, 0], poly[0:, 1], color=color, marker='1', alpha=alpha)
return self.output
if labels is not None:
if boxes is not None:
(x0, y0, x1, y1) = boxes[i]
text_pos = (x0, y0)
horiz_align = 'left'
elif masks is not None:
(x0, y0, x1, y1) = masks[i].bbox()
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = 'center'
else:
continue
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = 'center'
instance_area = (y1 - y0) * (x1 - x0)
if instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale:
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
self.draw_text(labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size)
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
|
dance
|
positive
|
def remove_env_vars_with_matching_secrets(self):
"""
In the case of a matching key between env vars and kubernetes secrets,
prefer the Kubernetes Secret and remove the matching env var.
"""
<DeepExtract>
value = self._node['app_data'].get('component_parameters', {}).get(ENV_VARIABLES, default_value)
env_vars = None if value == 'None' else value
</DeepExtract>
<DeepExtract>
value = self._node['app_data'].get('component_parameters', {}).get(KUBERNETES_SECRETS, default_value)
secrets = None if value == 'None' else value
</DeepExtract>
if isinstance(env_vars, ElyraPropertyList) and isinstance(secrets, ElyraPropertyList):
new_list = ElyraPropertyList.difference(minuend=env_vars, subtrahend=secrets)
<DeepExtract>
if not ENV_VARIABLES:
raise ValueError('Key is required')
if new_list is None:
raise ValueError('Value is required')
self._node['app_data']['component_parameters'][ENV_VARIABLES] = new_list
</DeepExtract>
|
def remove_env_vars_with_matching_secrets(self):
"""
In the case of a matching key between env vars and kubernetes secrets,
prefer the Kubernetes Secret and remove the matching env var.
"""
value = self._node['app_data'].get('component_parameters', {}).get(ENV_VARIABLES, default_value)
env_vars = None if value == 'None' else value
value = self._node['app_data'].get('component_parameters', {}).get(KUBERNETES_SECRETS, default_value)
secrets = None if value == 'None' else value
if isinstance(env_vars, ElyraPropertyList) and isinstance(secrets, ElyraPropertyList):
new_list = ElyraPropertyList.difference(minuend=env_vars, subtrahend=secrets)
if not ENV_VARIABLES:
raise ValueError('Key is required')
if new_list is None:
raise ValueError('Value is required')
self._node['app_data']['component_parameters'][ENV_VARIABLES] = new_list
</DeepExtract>
|
elyra
|
positive
|
def create_training_file(docs, vocab_list, args, epoch_num):
epoch_filename = args.output_dir / 'epoch_{}.json'.format(epoch_num)
num_instances = 0
with epoch_filename.open('w') as epoch_file:
for doc_idx in trange(len(docs), desc='Document'):
<DeepExtract>
document = docs[doc_idx]
max_num_tokens = args.max_seq_len - 3
target_seq_length = max_num_tokens
if random() < args.short_seq_prob:
target_seq_length = randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = randrange(1, len(current_chunk))
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
random_document = docs.sample_doc(current_idx=doc_idx, sentence_weighted=True)
random_start = randrange(0, len(random_document))
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)]
(tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, args.masked_lm_prob, args.max_predictions_per_seq, args.do_whole_word_mask, vocab_list)
instance = {'tokens': tokens, 'segment_ids': segment_ids, 'is_random_next': is_random_next, 'masked_lm_positions': masked_lm_positions, 'masked_lm_labels': masked_lm_labels}
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
doc_instances = instances
</DeepExtract>
doc_instances = [json.dumps(instance) for instance in doc_instances]
for instance in doc_instances:
epoch_file.write(instance + '\n')
num_instances += 1
metrics_file = args.output_dir / 'epoch_{}_metrics.json'.format(epoch_num)
with metrics_file.open('w') as metrics_file:
metrics = {'num_training_examples': num_instances, 'max_seq_len': args.max_seq_len}
metrics_file.write(json.dumps(metrics))
|
def create_training_file(docs, vocab_list, args, epoch_num):
epoch_filename = args.output_dir / 'epoch_{}.json'.format(epoch_num)
num_instances = 0
with epoch_filename.open('w') as epoch_file:
for doc_idx in trange(len(docs), desc='Document'):
document = docs[doc_idx]
max_num_tokens = args.max_seq_len - 3
target_seq_length = max_num_tokens
if random() < args.short_seq_prob:
target_seq_length = randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = randrange(1, len(current_chunk))
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
random_document = docs.sample_doc(current_idx=doc_idx, sentence_weighted=True)
random_start = randrange(0, len(random_document))
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']
segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)]
(tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, args.masked_lm_prob, args.max_predictions_per_seq, args.do_whole_word_mask, vocab_list)
instance = {'tokens': tokens, 'segment_ids': segment_ids, 'is_random_next': is_random_next, 'masked_lm_positions': masked_lm_positions, 'masked_lm_labels': masked_lm_labels}
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
doc_instances = instances
doc_instances = [json.dumps(instance) for instance in doc_instances]
for instance in doc_instances:
epoch_file.write(instance + '\n')
num_instances += 1
metrics_file = args.output_dir / 'epoch_{}_metrics.json'.format(epoch_num)
with metrics_file.open('w') as metrics_file:
metrics = {'num_training_examples': num_instances, 'max_seq_len': args.max_seq_len}
metrics_file.write(json.dumps(metrics))
|
automatic-personality-prediction
|
positive
|
def _update_leaf_ready_data(self, leaf_start, start_index, new_nr_of_elements, records_to_rewrite):
self.buckets.seek(leaf_start)
self.buckets.write(struct.pack('<h', new_nr_of_elements))
<DeepExtract>
if 'l' == 'l':
start_position = leaf_start + self.leaf_heading_size + start_index * self.single_leaf_record_size
elif 'l' == 'n':
start_position = leaf_start + self.node_heading_size + start_index * (self.pointer_size + self.key_size)
</DeepExtract>
self.buckets.seek(start_position)
self.buckets.write(struct.pack('<' + (new_nr_of_elements - start_index) * self.single_leaf_record_format, *records_to_rewrite))
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
|
def _update_leaf_ready_data(self, leaf_start, start_index, new_nr_of_elements, records_to_rewrite):
self.buckets.seek(leaf_start)
self.buckets.write(struct.pack('<h', new_nr_of_elements))
if 'l' == 'l':
start_position = leaf_start + self.leaf_heading_size + start_index * self.single_leaf_record_size
elif 'l' == 'n':
start_position = leaf_start + self.node_heading_size + start_index * (self.pointer_size + self.key_size)
self.buckets.seek(start_position)
self.buckets.write(struct.pack('<' + (new_nr_of_elements - start_index) * self.single_leaf_record_format, *records_to_rewrite))
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
|
codernitydb
|
positive
|
def breaches_records_to_pdf(breaches_records, pdf_path=None, figure_width=10, logger='bar'):
"""Plots figures of the breaches annotated in the records into a PDF file.
Parameters
----------
breaches_records
A least of records annotated with breaches, as returned by the
pdf_path
Either the path to a PDF, or a file handle (open in wb mode) or None
for this method to return binary PDF data.
logger
Either "bar" for a progress bar, None for no logging, or any Proglog
logger. The bar name is "sequence".
"""
pdf_io = BytesIO() if pdf_path is None else pdf_path
logger = proglog.default_bar_logger(logger, min_time_interval=0.2)
with PdfPages(pdf_io) as pdf:
for record in logger.iter_bar(sequence=breaches_records):
<DeepExtract>
translator = GraphicTranslator()
graphic_record = translator.translate_record(record)
(ax, _) = graphic_record.plot(ax=ax, figure_width=figure_width, strand_in_label_threshold=7)
ax.set_title(record.id, loc='left', fontweight='bold')
ax.set_ylim(top=ax.get_ylim()[1] + 1)
ax = ax
</DeepExtract>
pdf.savefig(ax.figure, bbox_inches='tight')
plt.close(ax.figure)
if pdf_path is None:
return pdf_io.getvalue()
|
def breaches_records_to_pdf(breaches_records, pdf_path=None, figure_width=10, logger='bar'):
"""Plots figures of the breaches annotated in the records into a PDF file.
Parameters
----------
breaches_records
A least of records annotated with breaches, as returned by the
pdf_path
Either the path to a PDF, or a file handle (open in wb mode) or None
for this method to return binary PDF data.
logger
Either "bar" for a progress bar, None for no logging, or any Proglog
logger. The bar name is "sequence".
"""
pdf_io = BytesIO() if pdf_path is None else pdf_path
logger = proglog.default_bar_logger(logger, min_time_interval=0.2)
with PdfPages(pdf_io) as pdf:
for record in logger.iter_bar(sequence=breaches_records):
translator = GraphicTranslator()
graphic_record = translator.translate_record(record)
(ax, _) = graphic_record.plot(ax=ax, figure_width=figure_width, strand_in_label_threshold=7)
ax.set_title(record.id, loc='left', fontweight='bold')
ax.set_ylim(top=ax.get_ylim()[1] + 1)
ax = ax
pdf.savefig(ax.figure, bbox_inches='tight')
plt.close(ax.figure)
if pdf_path is None:
return pdf_io.getvalue()
|
DnaChisel
|
positive
|
def __getitem__(self, index_tuple):
(index, ratio) = index_tuple
single_db = [self._roidb[index]]
(blobs, valid) = get_minibatch(single_db)
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
<DeepExtract>
(data_height, data_width) = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1:
size_crop = math.ceil(data_width / ratio)
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
elif box_region - size_crop < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else npr.choice(range(y_s_min, y_s_max + 1))
else:
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else npr.choice(range(min_y, min_y + y_s_add + 1))
blobs['data'] = blobs['data'][:, y_s:y_s + size_crop, :]
blobs['im_info'][0] = size_crop
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else:
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
elif box_region - size_crop < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else npr.choice(range(min_x, min_x + x_s_add + 1))
blobs['data'] = blobs['data'][:, :, x_s:x_s + size_crop]
blobs['im_info'][1] = size_crop
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
</DeepExtract>
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
blobs['roidb'] = blob_utils.serialize(blobs['roidb'])
return blobs
|
def __getitem__(self, index_tuple):
(index, ratio) = index_tuple
single_db = [self._roidb[index]]
(blobs, valid) = get_minibatch(single_db)
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
(data_height, data_width) = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1:
size_crop = math.ceil(data_width / ratio)
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
elif box_region - size_crop < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else npr.choice(range(y_s_min, y_s_max + 1))
else:
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else npr.choice(range(min_y, min_y + y_s_add + 1))
blobs['data'] = blobs['data'][:, y_s:y_s + size_crop, :]
blobs['im_info'][0] = size_crop
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else:
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
elif box_region - size_crop < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else npr.choice(range(min_x, min_x + x_s_add + 1))
blobs['data'] = blobs['data'][:, :, x_s:x_s + size_crop]
blobs['im_info'][1] = size_crop
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
blobs['roidb'] = blob_utils.serialize(blobs['roidb'])
return blobs
|
DIoU-pytorch-detectron
|
positive
|
def calculate_field_type(field_name, field_values, field_position, num_fields, num_samples=100, random=True):
"""
For each field, returns highest-scoring field type of first num_samples non-empty
instances.
"""
field_values = field_values.dropna().apply(unicode)
num_samples = min(len(field_values), num_samples)
field_sample = random_sample(field_values, num_samples) if random else field_values[:num_samples]
<DeepExtract>
type_scores = defaultdict(int)
type_scores[DT.STRING.value] = 0
matched = False
for d in header_string_types:
header_string_type = d['type']
weight = d['weight']
operation = d['operation']
mappings = d['mappings']
for (datatype, strings) in mappings.iteritems():
for s in strings:
if operation(field_name, s):
type_scores[datatype] += weight * num_samples
type_scores_from_name = type_scores
type_scores_from_name = type_scores
</DeepExtract>
<DeepExtract>
type_scores = defaultdict(int)
type_scores[DT.STRING.value] = 0
type_instances = []
for field_type in field_types:
type_instances.extend(field_type.instances())
for field_value in field_sample:
for type_instance in type_instances:
if type_instance.test(field_value):
type_scores[type_instance.name] += type_instance.weight
type_scores_from_values = type_scores
</DeepExtract>
final_type_scores = defaultdict(int)
for (t, score) in type_scores_from_name.iteritems():
final_type_scores[t] += score
for (t, score) in type_scores_from_values.iteritems():
final_type_scores[t] += score
score_tuples = []
normalized_type_scores = {}
total_score = sum(final_type_scores.values())
if total_score:
for (type_name, score) in final_type_scores.iteritems():
score_tuples.append((type_name, score))
normalized_type_scores[type_name] = float(score) / total_score
final_field_type = max(score_tuples, key=lambda t: t[1])[0]
return (final_field_type, normalized_type_scores)
else:
return (DT.STRING.value, normalized_type_scores)
|
def calculate_field_type(field_name, field_values, field_position, num_fields, num_samples=100, random=True):
"""
For each field, returns highest-scoring field type of first num_samples non-empty
instances.
"""
field_values = field_values.dropna().apply(unicode)
num_samples = min(len(field_values), num_samples)
field_sample = random_sample(field_values, num_samples) if random else field_values[:num_samples]
type_scores = defaultdict(int)
type_scores[DT.STRING.value] = 0
matched = False
for d in header_string_types:
header_string_type = d['type']
weight = d['weight']
operation = d['operation']
mappings = d['mappings']
for (datatype, strings) in mappings.iteritems():
for s in strings:
if operation(field_name, s):
type_scores[datatype] += weight * num_samples
type_scores_from_name = type_scores
type_scores_from_name = type_scores
type_scores = defaultdict(int)
type_scores[DT.STRING.value] = 0
type_instances = []
for field_type in field_types:
type_instances.extend(field_type.instances())
for field_value in field_sample:
for type_instance in type_instances:
if type_instance.test(field_value):
type_scores[type_instance.name] += type_instance.weight
type_scores_from_values = type_scores
final_type_scores = defaultdict(int)
for (t, score) in type_scores_from_name.iteritems():
final_type_scores[t] += score
for (t, score) in type_scores_from_values.iteritems():
final_type_scores[t] += score
score_tuples = []
normalized_type_scores = {}
total_score = sum(final_type_scores.values())
if total_score:
for (type_name, score) in final_type_scores.iteritems():
score_tuples.append((type_name, score))
normalized_type_scores[type_name] = float(score) / total_score
final_field_type = max(score_tuples, key=lambda t: t[1])[0]
return (final_field_type, normalized_type_scores)
else:
return (DT.STRING.value, normalized_type_scores)
|
DIVE-backend
|
positive
|
def __exit__(self, exc_type, exc_value, traceback):
<DeepExtract>
if not self.has_lock():
return False
return self.signal(self._local_tokens.pop())
</DeepExtract>
return True if exc_type is None else False
|
def __exit__(self, exc_type, exc_value, traceback):
if not self.has_lock():
return False
return self.signal(self._local_tokens.pop())
return True if exc_type is None else False
|
eoj3
|
positive
|
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
<DeepExtract>
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
(points_sweep, times_sweep) = points[mask]
lidar_path = self.root_path / info['sweeps'][k]['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if info['sweeps'][k]['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = info['sweeps'][k]['transform_matrix'].dot(np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = info['sweeps'][k]['time_lag'] * np.ones((1, points_sweep.shape[1]))
(points_sweep, times_sweep) = (points_sweep.T, cur_times.T)
</DeepExtract>
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
|
def get_lidar_with_sweeps(self, index, max_sweeps=1):
info = self.infos[index]
lidar_path = self.root_path / info['lidar_path']
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
sweep_points_list = [points]
sweep_times_list = [np.zeros((points.shape[0], 1))]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
(points_sweep, times_sweep) = points[mask]
lidar_path = self.root_path / info['sweeps'][k]['lidar_path']
points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if info['sweeps'][k]['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = info['sweeps'][k]['transform_matrix'].dot(np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = info['sweeps'][k]['time_lag'] * np.ones((1, points_sweep.shape[1]))
(points_sweep, times_sweep) = (points_sweep.T, cur_times.T)
sweep_points_list.append(points_sweep)
sweep_times_list.append(times_sweep)
points = np.concatenate(sweep_points_list, axis=0)
times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype)
points = np.concatenate((points, times), axis=1)
return points
|
CaDDN
|
positive
|
def create_topic(self, req, resp):
CreateTopicValidator.validate(req)
req_inter = RequestInternal(req.method, '/%s/%s' % (URISEC_TOPIC, req.topic_name))
req_inter.data = TopicEncoder.encode(req)
<DeepExtract>
if req.request_id is not None:
req_inter.header['x-mns-user-request-id'] = req.request_id
if self.http.is_keep_alive():
req_inter.header['Connection'] = 'Keep-Alive'
if req_inter.data != '':
req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8')
req_inter.header['content-type'] = 'text/xml;charset=UTF-8'
req_inter.header['x-mns-version'] = self.version
req_inter.header['host'] = self.host
req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version())
req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri)
if self.security_token != '':
req_inter.header['security-token'] = self.security_token
</DeepExtract>
resp_inter = self.http.send_request(req_inter)
resp.status = resp_inter.status
resp.header = resp_inter.header
<DeepExtract>
if resp_inter.status >= 200 and resp_inter.status < 400:
resp.error_data = ''
else:
resp.error_data = resp_inter.data
if resp_inter.status >= 400 and resp_inter.status <= 600:
(excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id())
if reqId is None:
reqId = resp.header['x-mns-request-id']
raise MNSServerException(excType, excMessage, reqId, hostId, subErr)
else:
raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id())
</DeepExtract>
if resp.error_data == '':
resp.topic_url = self.lower_header(resp.header)['location']
if self.logger:
self.logger.info('CreateTopic RequestId:%s TopicName:%s TopicURl:%s' % (resp.get_requestid(), req.topic_name, resp.topic_url))
|
def create_topic(self, req, resp):
CreateTopicValidator.validate(req)
req_inter = RequestInternal(req.method, '/%s/%s' % (URISEC_TOPIC, req.topic_name))
req_inter.data = TopicEncoder.encode(req)
if req.request_id is not None:
req_inter.header['x-mns-user-request-id'] = req.request_id
if self.http.is_keep_alive():
req_inter.header['Connection'] = 'Keep-Alive'
if req_inter.data != '':
req_inter.header['content-md5'] = base64.b64encode(hashlib.md5(req_inter.data).hexdigest().encode('utf-8')).decode('utf-8')
req_inter.header['content-type'] = 'text/xml;charset=UTF-8'
req_inter.header['x-mns-version'] = self.version
req_inter.header['host'] = self.host
req_inter.header['date'] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
req_inter.header['user-agent'] = 'aliyun-sdk-python/%s(%s/%s;%s)' % (pkg_info.version, platform.system(), platform.release(), platform.python_version())
req_inter.header['Authorization'] = self.get_signature(req_inter.method, req_inter.header, req_inter.uri)
if self.security_token != '':
req_inter.header['security-token'] = self.security_token
resp_inter = self.http.send_request(req_inter)
resp.status = resp_inter.status
resp.header = resp_inter.header
if resp_inter.status >= 200 and resp_inter.status < 400:
resp.error_data = ''
else:
resp.error_data = resp_inter.data
if resp_inter.status >= 400 and resp_inter.status <= 600:
(excType, excMessage, reqId, hostId, subErr) = decoder.decodeError(resp.error_data, req_inter.get_req_id())
if reqId is None:
reqId = resp.header['x-mns-request-id']
raise MNSServerException(excType, excMessage, reqId, hostId, subErr)
else:
raise MNSClientNetworkException('UnkownError', resp_inter.data, req_inter.get_req_id())
if resp.error_data == '':
resp.topic_url = self.lower_header(resp.header)['location']
if self.logger:
self.logger.info('CreateTopic RequestId:%s TopicName:%s TopicURl:%s' % (resp.get_requestid(), req.topic_name, resp.topic_url))
|
AutomationTest
|
positive
|
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
<DeepExtract>
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
self.zero_grad()
if not True:
self.meters['train_wall'].start()
(logging_outputs, sample_sizes, ooms) = ([], [], 0)
for (i, sample) in enumerate(dummy_batch):
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
try:
(loss, sample_size, logging_output) = self.task.get_loss(self.model, self.criterion, sample)
if ignore_grad:
loss *= 0
if self.args.distributed_world_size > 1:
if i < len(dummy_batch) - 1:
self.model.need_reduction = False
else:
self.model.need_reduction = True
self.optimizer.backward(loss)
if not ignore_grad:
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
ooms += 1
self.zero_grad()
else:
raise e
if True:
return None
if self.args.distributed_world_size > 1:
(logging_outputs, sample_sizes, ooms) = zip(*distributed_utils.all_gather_list([logging_outputs, sample_sizes, ooms]))
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
ooms = sum(ooms)
if ooms == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping update')
self.zero_grad()
return None
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
sample_size = self.criterion.__class__.grad_denom(sample_sizes)
if not all((k in logging_output for k in ['ntokens', 'nsentences'])):
raise Exception('Please update the {}.aggregate_logging_outputs() method to return ntokens and nsentences'.format(self.criterion.__class__.__name__))
try:
self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
self.optimizer.step()
self._num_updates += 1
self.lr_scheduler.step_update(self._num_updates)
ntokens = logging_output.get('ntokens', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.0)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(1.0 if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.0)
self.meters['oom'].update(ooms)
self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)
if 'nll_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
except OverflowError as e:
print('| WARNING: overflow detected, ' + str(e))
self.zero_grad()
logging_output = None
if self.args.fp16:
self.meters['loss_scale'].reset()
self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)
self.meters['train_wall'].stop()
return logging_output
</DeepExtract>
<DeepExtract>
self.optimizer.zero_grad()
</DeepExtract>
|
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
self.zero_grad()
if not True:
self.meters['train_wall'].start()
(logging_outputs, sample_sizes, ooms) = ([], [], 0)
for (i, sample) in enumerate(dummy_batch):
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
try:
(loss, sample_size, logging_output) = self.task.get_loss(self.model, self.criterion, sample)
if ignore_grad:
loss *= 0
if self.args.distributed_world_size > 1:
if i < len(dummy_batch) - 1:
self.model.need_reduction = False
else:
self.model.need_reduction = True
self.optimizer.backward(loss)
if not ignore_grad:
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
ooms += 1
self.zero_grad()
else:
raise e
if True:
return None
if self.args.distributed_world_size > 1:
(logging_outputs, sample_sizes, ooms) = zip(*distributed_utils.all_gather_list([logging_outputs, sample_sizes, ooms]))
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
ooms = sum(ooms)
if ooms == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping update')
self.zero_grad()
return None
logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
sample_size = self.criterion.__class__.grad_denom(sample_sizes)
if not all((k in logging_output for k in ['ntokens', 'nsentences'])):
raise Exception('Please update the {}.aggregate_logging_outputs() method to return ntokens and nsentences'.format(self.criterion.__class__.__name__))
try:
self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size))
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
self.optimizer.step()
self._num_updates += 1
self.lr_scheduler.step_update(self._num_updates)
ntokens = logging_output.get('ntokens', 0)
nsentences = logging_output.get('nsentences', 0)
self.meters['wps'].update(ntokens)
self.meters['ups'].update(1.0)
self.meters['wpb'].update(ntokens)
self.meters['bsz'].update(nsentences)
self.meters['gnorm'].update(grad_norm)
self.meters['clip'].update(1.0 if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0.0)
self.meters['oom'].update(ooms)
self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size)
if 'nll_loss' in logging_output:
self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens)
except OverflowError as e:
print('| WARNING: overflow detected, ' + str(e))
self.zero_grad()
logging_output = None
if self.args.fp16:
self.meters['loss_scale'].reset()
self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale)
self.meters['train_wall'].stop()
return logging_output
self.optimizer.zero_grad()
</DeepExtract>
|
control-length
|
positive
|
def run(self):
if os.name == 'nt':
<DeepExtract>
config_body = textwrap.dedent(' <?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n <configuration>\n <windows>\n <assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">\n <probing privatePath="libs" />\n </assemblyBinding>\n </windows>\n </configuration>\n ')
dirpath = os.path.dirname(self.get_ext_fullpath(os.path.join('cocotb', 'simulator')))
os.makedirs(dirpath, exist_ok=True)
with open(self.get_ext_fullpath(os.path.join('cocotb', 'simulator')) + '.2.config', 'w', encoding='utf-8') as f:
f.write(config_body)
</DeepExtract>
super().run()
|
def run(self):
if os.name == 'nt':
config_body = textwrap.dedent(' <?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n <configuration>\n <windows>\n <assemblyBinding xmlns="urn:schemas-microsoft-com:asm.v1">\n <probing privatePath="libs" />\n </assemblyBinding>\n </windows>\n </configuration>\n ')
dirpath = os.path.dirname(self.get_ext_fullpath(os.path.join('cocotb', 'simulator')))
os.makedirs(dirpath, exist_ok=True)
with open(self.get_ext_fullpath(os.path.join('cocotb', 'simulator')) + '.2.config', 'w', encoding='utf-8') as f:
f.write(config_body)
super().run()
|
cocotb
|
positive
|
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
<DeepExtract>
obs_coord = ind2sub(safe_eval(obs_inds, t))
y2x = pairwise_distances(obs_coord, state_coord, shape if periodic else None)
</DeepExtract>
if direction == 'x2y':
def obs_taperer(batch):
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return (batches, obs_taperer)
elif direction == 'y2x':
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
|
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
obs_coord = ind2sub(safe_eval(obs_inds, t))
y2x = pairwise_distances(obs_coord, state_coord, shape if periodic else None)
if direction == 'x2y':
def obs_taperer(batch):
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return (batches, obs_taperer)
elif direction == 'y2x':
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
|
DAPPER
|
positive
|
@mock.patch('eodag.plugins.search.qssearch.QueryStringSearch._request', autospec=True)
def test_plugins_search_querystringseach_discover_product_types(self, mock__request):
"""QueryStringSearch.discover_product_types must return a well formatted dict"""
provider = 'astraea_eod'
<DeepExtract>
search_plugin = next(self.plugins_manager.get_search_plugins(product_type=self.product_type, provider=provider))
</DeepExtract>
results_entry = search_plugin.config.discover_product_types['results_entry']
search_plugin.config.discover_product_types['results_entry'] = cached_parse('collections[?billing=="free"]')
mock__request.return_value = mock.Mock()
mock__request.return_value.json.return_value = {'collections': [{'id': 'foo_collection', 'title': 'The FOO collection', 'billing': 'free'}, {'id': 'bar_collection', 'title': 'The BAR non-free collection', 'billing': 'non-free'}]}
conf_update_dict = search_plugin.discover_product_types()
self.assertIn('foo_collection', conf_update_dict['providers_config'])
self.assertIn('foo_collection', conf_update_dict['product_types_config'])
self.assertNotIn('bar_collection', conf_update_dict['providers_config'])
self.assertNotIn('bar_collection', conf_update_dict['product_types_config'])
self.assertEqual(conf_update_dict['providers_config']['foo_collection']['productType'], 'foo_collection')
self.assertEqual(conf_update_dict['product_types_config']['foo_collection']['title'], 'The FOO collection')
search_plugin.config.discover_product_types['results_entry'] = results_entry
|
@mock.patch('eodag.plugins.search.qssearch.QueryStringSearch._request', autospec=True)
def test_plugins_search_querystringseach_discover_product_types(self, mock__request):
"""QueryStringSearch.discover_product_types must return a well formatted dict"""
provider = 'astraea_eod'
search_plugin = next(self.plugins_manager.get_search_plugins(product_type=self.product_type, provider=provider))
results_entry = search_plugin.config.discover_product_types['results_entry']
search_plugin.config.discover_product_types['results_entry'] = cached_parse('collections[?billing=="free"]')
mock__request.return_value = mock.Mock()
mock__request.return_value.json.return_value = {'collections': [{'id': 'foo_collection', 'title': 'The FOO collection', 'billing': 'free'}, {'id': 'bar_collection', 'title': 'The BAR non-free collection', 'billing': 'non-free'}]}
conf_update_dict = search_plugin.discover_product_types()
self.assertIn('foo_collection', conf_update_dict['providers_config'])
self.assertIn('foo_collection', conf_update_dict['product_types_config'])
self.assertNotIn('bar_collection', conf_update_dict['providers_config'])
self.assertNotIn('bar_collection', conf_update_dict['product_types_config'])
self.assertEqual(conf_update_dict['providers_config']['foo_collection']['productType'], 'foo_collection')
self.assertEqual(conf_update_dict['product_types_config']['foo_collection']['title'], 'The FOO collection')
search_plugin.config.discover_product_types['results_entry'] = results_entry
|
eodag
|
positive
|
def put_file_range(ase, offsets, data, timeout=None):
"""Puts a range of bytes into the remote file
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.upload.Offsets offsets: upload offsets
:param bytes data: data
:param int timeout: timeout
"""
<DeepExtract>
if not isinstance(ase.name, pathlib.Path):
ase.name = pathlib.Path(ase.name)
dirname = '/'.join(ase.name.parts[:len(ase.name.parts) - 1])
if len(dirname) == 0:
dirname = None
if len(ase.name.parts) > 0:
fname = ase.name.parts[-1]
else:
fname = None
(fname, snapshot) = blobxfer.util.parse_fileshare_or_file_snapshot_parameter(fname)
(dir, fpath, _) = (dirname, fname, snapshot)
</DeepExtract>
ase.client.update_range(share_name=ase.container, directory_name=dir, file_name=fpath, data=data, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, timeout=timeout)
|
def put_file_range(ase, offsets, data, timeout=None):
"""Puts a range of bytes into the remote file
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.upload.Offsets offsets: upload offsets
:param bytes data: data
:param int timeout: timeout
"""
if not isinstance(ase.name, pathlib.Path):
ase.name = pathlib.Path(ase.name)
dirname = '/'.join(ase.name.parts[:len(ase.name.parts) - 1])
if len(dirname) == 0:
dirname = None
if len(ase.name.parts) > 0:
fname = ase.name.parts[-1]
else:
fname = None
(fname, snapshot) = blobxfer.util.parse_fileshare_or_file_snapshot_parameter(fname)
(dir, fpath, _) = (dirname, fname, snapshot)
ase.client.update_range(share_name=ase.container, directory_name=dir, file_name=fpath, data=data, start_range=offsets.range_start, end_range=offsets.range_end, validate_content=False, timeout=timeout)
|
blobxfer
|
positive
|
def destroy(self):
"""Close the connection, and close any associated
CBS authentication session.
"""
try:
<DeepExtract>
try:
if not self._lock.acquire(timeout=timeout):
raise compat.TimeoutException('Failed to acquire connection lock.')
except TypeError:
self._lock.acquire()
</DeepExtract>
_logger.debug('Unlocked connection %r to close.', self.container_id)
<DeepExtract>
_logger.info('Shutting down connection %r.', self.container_id)
self._closing = True
if self._cbs:
self.auth.close_authenticator()
self._cbs = None
self._conn.destroy()
self.auth.close()
_logger.info('Connection shutdown complete %r.', self.container_id)
</DeepExtract>
finally:
<DeepExtract>
try:
self._lock.release()
except (RuntimeError, threading.ThreadError):
pass
except:
_logger.debug('Got error when attempting to release connection lock.')
try:
self._lock.release()
except (RuntimeError, threading.ThreadError):
pass
raise
</DeepExtract>
uamqp._Platform.deinitialize()
|
def destroy(self):
"""Close the connection, and close any associated
CBS authentication session.
"""
try:
try:
if not self._lock.acquire(timeout=timeout):
raise compat.TimeoutException('Failed to acquire connection lock.')
except TypeError:
self._lock.acquire()
_logger.debug('Unlocked connection %r to close.', self.container_id)
_logger.info('Shutting down connection %r.', self.container_id)
self._closing = True
if self._cbs:
self.auth.close_authenticator()
self._cbs = None
self._conn.destroy()
self.auth.close()
_logger.info('Connection shutdown complete %r.', self.container_id)
finally:
try:
self._lock.release()
except (RuntimeError, threading.ThreadError):
pass
except:
_logger.debug('Got error when attempting to release connection lock.')
try:
self._lock.release()
except (RuntimeError, threading.ThreadError):
pass
raise
uamqp._Platform.deinitialize()
|
azure-uamqp-python
|
positive
|
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True, input_constructor=None, ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
<DeepExtract>
model.start_flops_count = start_flops_count.__get__(model)
model.stop_flops_count = stop_flops_count.__get__(model)
model.reset_flops_count = reset_flops_count.__get__(model)
model.compute_average_flops_cost = compute_average_flops_cost.__get__(model)
model.reset_flops_count()
model.apply(add_flops_mask_variable_or_reset)
flops_model = model
</DeepExtract>
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
<DeepExtract>
total_flops = flops_model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / flops_model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
flops_model.apply(add_extra_repr)
print(flops_model, file=ost)
flops_model.apply(del_extra_repr)
</DeepExtract>
flops_count = flops_model.compute_average_flops_cost()
<DeepExtract>
params_num = sum((p.numel() for p in flops_model.parameters() if p.requires_grad))
params_count = params_num
</DeepExtract>
flops_model.stop_flops_count()
if as_strings:
return (flops_to_string(flops_count), params_to_string(params_count))
return (flops_count, params_count)
|
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True, input_constructor=None, ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
model.start_flops_count = start_flops_count.__get__(model)
model.stop_flops_count = stop_flops_count.__get__(model)
model.reset_flops_count = reset_flops_count.__get__(model)
model.compute_average_flops_cost = compute_average_flops_cost.__get__(model)
model.reset_flops_count()
model.apply(add_flops_mask_variable_or_reset)
flops_model = model
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
total_flops = flops_model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / flops_model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
flops_model.apply(add_extra_repr)
print(flops_model, file=ost)
flops_model.apply(del_extra_repr)
flops_count = flops_model.compute_average_flops_cost()
params_num = sum((p.numel() for p in flops_model.parameters() if p.requires_grad))
params_count = params_num
flops_model.stop_flops_count()
if as_strings:
return (flops_to_string(flops_count), params_to_string(params_count))
return (flops_count, params_count)
|
D2Det
|
positive
|
def _eval(self, model, test_set, val_set, user_based):
metric_avg_results = OrderedDict()
metric_user_results = OrderedDict()
<DeepExtract>
if len(self.rating_metrics) == 0:
(avg_results, user_results) = ([], [])
avg_results = []
user_results = []
(u_indices, i_indices, r_values) = test_set.uir_tuple
r_preds = np.fromiter(tqdm((model.rate(user_idx, item_idx).item() for (user_idx, item_idx) in zip(u_indices, i_indices)), desc='Rating', disable=not self.verbose, miniters=100, total=len(u_indices)), dtype='float')
gt_mat = test_set.csr_matrix
pd_mat = csr_matrix((r_preds, (u_indices, i_indices)), shape=gt_mat.shape)
for mt in self.rating_metrics:
if user_based:
user_results.append({user_idx: mt.compute(gt_ratings=gt_mat.getrow(user_idx).data, pd_ratings=pd_mat.getrow(user_idx).data).item() for user_idx in test_set.user_indices})
avg_results.append(sum(user_results[-1].values()) / len(user_results[-1]))
else:
user_results.append({})
avg_results.append(mt.compute(gt_ratings=r_values, pd_ratings=r_preds))
(avg_results, user_results) = (avg_results, user_results)
</DeepExtract>
for (i, mt) in enumerate(self.rating_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
<DeepExtract>
if len(self.ranking_metrics) == 0:
(avg_results, user_results) = ([], [])
avg_results = []
user_results = [{} for _ in enumerate(self.ranking_metrics)]
gt_mat = test_set.csr_matrix
train_mat = self.train_set.csr_matrix
val_mat = None if val_set is None else val_set.csr_matrix
def pos_items(csr_row):
(avg_results, user_results) = [item_idx for (item_idx, rating) in zip(csr_row.indices, csr_row.data) if rating >= self.rating_threshold]
for user_idx in tqdm(test_set.user_indices, desc='Ranking', disable=not self.verbose, miniters=100):
test_pos_items = pos_items(gt_mat.getrow(user_idx))
if len(test_pos_items) == 0:
continue
u_gt_pos = np.zeros(test_set.num_items, dtype='int')
u_gt_pos[test_pos_items] = 1
val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx))
train_pos_items = [] if self.train_set.is_unk_user(user_idx) else pos_items(train_mat.getrow(user_idx))
u_gt_neg = np.ones(test_set.num_items, dtype='int')
u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0
item_indices = None if self.exclude_unknowns else np.arange(test_set.num_items)
(item_rank, item_scores) = model.rank(user_idx, item_indices)
for (i, mt) in enumerate(self.ranking_metrics):
mt_score = mt.compute(gt_pos=u_gt_pos, gt_neg=u_gt_neg, pd_rank=item_rank, pd_scores=item_scores)
user_results[i][user_idx] = mt_score
for (i, mt) in enumerate(self.ranking_metrics):
avg_results.append(sum(user_results[i].values()) / len(user_results[i]))
(avg_results, user_results) = (avg_results, user_results)
</DeepExtract>
for (i, mt) in enumerate(self.ranking_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
return Result(model.name, metric_avg_results, metric_user_results)
|
def _eval(self, model, test_set, val_set, user_based):
metric_avg_results = OrderedDict()
metric_user_results = OrderedDict()
if len(self.rating_metrics) == 0:
(avg_results, user_results) = ([], [])
avg_results = []
user_results = []
(u_indices, i_indices, r_values) = test_set.uir_tuple
r_preds = np.fromiter(tqdm((model.rate(user_idx, item_idx).item() for (user_idx, item_idx) in zip(u_indices, i_indices)), desc='Rating', disable=not self.verbose, miniters=100, total=len(u_indices)), dtype='float')
gt_mat = test_set.csr_matrix
pd_mat = csr_matrix((r_preds, (u_indices, i_indices)), shape=gt_mat.shape)
for mt in self.rating_metrics:
if user_based:
user_results.append({user_idx: mt.compute(gt_ratings=gt_mat.getrow(user_idx).data, pd_ratings=pd_mat.getrow(user_idx).data).item() for user_idx in test_set.user_indices})
avg_results.append(sum(user_results[-1].values()) / len(user_results[-1]))
else:
user_results.append({})
avg_results.append(mt.compute(gt_ratings=r_values, pd_ratings=r_preds))
(avg_results, user_results) = (avg_results, user_results)
for (i, mt) in enumerate(self.rating_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
if len(self.ranking_metrics) == 0:
(avg_results, user_results) = ([], [])
avg_results = []
user_results = [{} for _ in enumerate(self.ranking_metrics)]
gt_mat = test_set.csr_matrix
train_mat = self.train_set.csr_matrix
val_mat = None if val_set is None else val_set.csr_matrix
def pos_items(csr_row):
(avg_results, user_results) = [item_idx for (item_idx, rating) in zip(csr_row.indices, csr_row.data) if rating >= self.rating_threshold]
for user_idx in tqdm(test_set.user_indices, desc='Ranking', disable=not self.verbose, miniters=100):
test_pos_items = pos_items(gt_mat.getrow(user_idx))
if len(test_pos_items) == 0:
continue
u_gt_pos = np.zeros(test_set.num_items, dtype='int')
u_gt_pos[test_pos_items] = 1
val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx))
train_pos_items = [] if self.train_set.is_unk_user(user_idx) else pos_items(train_mat.getrow(user_idx))
u_gt_neg = np.ones(test_set.num_items, dtype='int')
u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0
item_indices = None if self.exclude_unknowns else np.arange(test_set.num_items)
(item_rank, item_scores) = model.rank(user_idx, item_indices)
for (i, mt) in enumerate(self.ranking_metrics):
mt_score = mt.compute(gt_pos=u_gt_pos, gt_neg=u_gt_neg, pd_rank=item_rank, pd_scores=item_scores)
user_results[i][user_idx] = mt_score
for (i, mt) in enumerate(self.ranking_metrics):
avg_results.append(sum(user_results[i].values()) / len(user_results[i]))
(avg_results, user_results) = (avg_results, user_results)
for (i, mt) in enumerate(self.ranking_metrics):
metric_avg_results[mt.name] = avg_results[i]
metric_user_results[mt.name] = user_results[i]
return Result(model.name, metric_avg_results, metric_user_results)
|
cornac
|
positive
|
def perf_index_throughput(self):
"""Test throughput of indexed access."""
for logN in range(3, 6):
items = 10 ** logN
<DeepExtract>
sl = SkipList()
maxkey = 100 * items
for i in range(items):
sl.insert(random.randint(0, maxkey), i)
sl = sl
</DeepExtract>
load = random.sample(range(items), int(0.2 * len(sl)))
count = 0
t0 = t1 = time.time()
while count < len(load) and t1 - t0 < 1:
sl[load[count]]
count += 1
if count % 100 == 0:
t1 = time.time()
throughput = count / (t1 - t0)
self.add_result(throughput, params={'logN': logN})
|
def perf_index_throughput(self):
"""Test throughput of indexed access."""
for logN in range(3, 6):
items = 10 ** logN
sl = SkipList()
maxkey = 100 * items
for i in range(items):
sl.insert(random.randint(0, maxkey), i)
sl = sl
load = random.sample(range(items), int(0.2 * len(sl)))
count = 0
t0 = t1 = time.time()
while count < len(load) and t1 - t0 < 1:
sl[load[count]]
count += 1
if count % 100 == 0:
t1 = time.time()
throughput = count / (t1 - t0)
self.add_result(throughput, params={'logN': logN})
|
bluepass
|
positive
|
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
scores = inputs[0].data
bbox_deltas = inputs[1].data
im_info = inputs[2].data
(height, width) = scores.shape[-2:]
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y, copy=False)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
<DeepExtract>
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
bbox_deltas[im_i, :, :, :] = bbox_deltas[im_i, :, :, :].transpose((1, 2, 0)).reshape((-1, 4))
scores[im_i, :, :, :] = scores[im_i, :, :, :].transpose((1, 2, 0)).reshape((-1, 1))
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores[im_i, :, :, :]):
order = np.argsort(-scores[im_i, :, :, :].squeeze())
else:
inds = np.argpartition(-scores[im_i, :, :, :].squeeze(), pre_nms_topN)[:pre_nms_topN]
order = np.argsort(-scores[im_i, :, :, :][inds].squeeze())
order = inds[order]
bbox_deltas[im_i, :, :, :] = bbox_deltas[im_i, :, :, :][order, :]
all_anchors = all_anchors[order, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][order]
proposals = box_utils.bbox_transform(all_anchors, bbox_deltas[im_i, :, :, :], (1.0, 1.0, 1.0, 1.0))
proposals = box_utils.clip_tiled_boxes(proposals, im_info[im_i, :][:2])
keep = _filter_boxes(proposals, min_size, im_info[im_i, :])
proposals = proposals[keep, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][keep]
if nms_thresh > 0:
keep = box_utils.nms(np.hstack((proposals, scores[im_i, :, :, :])), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][keep]
(im_i_boxes, im_i_probs) = (proposals, scores[im_i, :, :, :])
</DeepExtract>
batch_inds = im_i * np.ones((im_i_boxes.shape[0], 1), dtype=np.float32)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
|
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
scores = inputs[0].data
bbox_deltas = inputs[1].data
im_info = inputs[2].data
(height, width) = scores.shape[-2:]
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
(shift_x, shift_y) = np.meshgrid(shift_x, shift_y, copy=False)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
bbox_deltas[im_i, :, :, :] = bbox_deltas[im_i, :, :, :].transpose((1, 2, 0)).reshape((-1, 4))
scores[im_i, :, :, :] = scores[im_i, :, :, :].transpose((1, 2, 0)).reshape((-1, 1))
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores[im_i, :, :, :]):
order = np.argsort(-scores[im_i, :, :, :].squeeze())
else:
inds = np.argpartition(-scores[im_i, :, :, :].squeeze(), pre_nms_topN)[:pre_nms_topN]
order = np.argsort(-scores[im_i, :, :, :][inds].squeeze())
order = inds[order]
bbox_deltas[im_i, :, :, :] = bbox_deltas[im_i, :, :, :][order, :]
all_anchors = all_anchors[order, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][order]
proposals = box_utils.bbox_transform(all_anchors, bbox_deltas[im_i, :, :, :], (1.0, 1.0, 1.0, 1.0))
proposals = box_utils.clip_tiled_boxes(proposals, im_info[im_i, :][:2])
keep = _filter_boxes(proposals, min_size, im_info[im_i, :])
proposals = proposals[keep, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][keep]
if nms_thresh > 0:
keep = box_utils.nms(np.hstack((proposals, scores[im_i, :, :, :])), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores[im_i, :, :, :] = scores[im_i, :, :, :][keep]
(im_i_boxes, im_i_probs) = (proposals, scores[im_i, :, :, :])
batch_inds = im_i * np.ones((im_i_boxes.shape[0], 1), dtype=np.float32)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
|
AIC2018_iamai
|
positive
|
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A 2-tuple of sets. The first set, contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, contains uncompleted futures.
"""
with _AcquireFutures(fs):
done = set((f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]))
not_done = set(fs) - done
if return_when == FIRST_COMPLETED and done:
return (done, not_done)
elif return_when == FIRST_EXCEPTION and done:
if any((f for f in done if not f.cancelled() and f.exception() is not None)):
return (done, not_done)
if len(done) == len(fs):
return (done, not_done)
<DeepExtract>
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum((f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs))
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError('Invalid return condition: %r' % return_when)
for f in fs:
f._waiters.append(waiter)
waiter = waiter
</DeepExtract>
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return (done, set(fs) - done)
|
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A 2-tuple of sets. The first set, contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, contains uncompleted futures.
"""
with _AcquireFutures(fs):
done = set((f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]))
not_done = set(fs) - done
if return_when == FIRST_COMPLETED and done:
return (done, not_done)
elif return_when == FIRST_EXCEPTION and done:
if any((f for f in done if not f.cancelled() and f.exception() is not None)):
return (done, not_done)
if len(done) == len(fs):
return (done, not_done)
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum((f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs))
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError('Invalid return condition: %r' % return_when)
for f in fs:
f._waiters.append(waiter)
waiter = waiter
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return (done, set(fs) - done)
|
evergreen
|
positive
|
def export_deploy_viz(codebuild_execution_id, group_by_pid, puppet_account_id):
output_file_name_prefix = codebuild_execution_id.split(':')[1]
<DeepExtract>
with betterboto_client.ClientContextManager('codebuild') as codebuild:
build = codebuild.batch_get_builds(ids=[codebuild_execution_id]).get('builds')[0]
location = build.get('artifacts').get('location').split(':')
bucket = location[5].split('/')[0]
key = location[5].replace(f'{bucket}/', '')
zip_file_location = f'{output_file_name_prefix}.zip'
if os.path.exists(zip_file_location):
print(f'Found zip file, skipping download')
else:
print(f'Downloading zip file')
with betterboto_client.ClientContextManager('s3') as s3:
print(f'getting {bucket} {key}')
s3.download_file(Bucket=bucket, Key=key, Filename=zip_file_location)
if os.path.exists(output_file_name_prefix):
print(f'Found output folder, skipping unzip')
else:
print(f'Unziping')
os.makedirs(output_file_name_prefix)
with zipfile.ZipFile(zip_file_location, 'r') as zip_ref:
zip_ref.extractall(output_file_name_prefix)
path_to_results = f'{output_file_name_prefix}/results'
</DeepExtract>
<DeepExtract>
results = list()
groups = dict()
time_format = '%Y-%m-%d %H:%M:%S'
earliest_time = datetime.strptime('4022-09-14 00:54:33', time_format)
latest_time = datetime.strptime('2000-09-14 00:54:33', time_format)
for starter in glob.glob(f'{path_to_results}/start/*.json'):
start = serialisation_utils.json_loads(open(starter, 'r').read())
name = os.path.basename(starter)
end = None
task_id = name.replace('.json', '')
task_id = '-'.join(task_id.split('-')[1:])
processing_time = dict(duration='unknown')
if os.path.exists(f'{path_to_results}/success/{name}'):
result = 'success'
end = serialisation_utils.json_loads(open(f'{path_to_results}/success/{name}', 'r').read())
elif os.path.exists(f'{path_to_results}/failure/{name}'):
result = 'failure'
end = serialisation_utils.json_loads(open(f'{path_to_results}/failure/{name}', 'r').read())
if os.path.exists(f'{path_to_results}/processing_time/{name}'):
processing_time = serialisation_utils.json_loads(open(f'{path_to_results}/processing_time/{name}', 'r').read())
if end:
body = ''
starting_time = start.get('datetime')
ending_time = end.get('datetime')
pid = start.get('pid')
groups[pid] = dict(id=pid, content=pid, value=pid)
for (name2, value2) in start.get('params_for_results', {}).items():
body += f'<b>{name2}</b>:{value2}<br />'
body += f'<b>pid</b>:{pid}<br />'
body += f'<b>start</b>:{starting_time}<br />'
body += f'<b>end</b>:{ending_time}<br />'
body += f"<b>duration</b>:{processing_time.get('duration')}<br />"
content = f'<h2>{task_id}</h2><dl>{body}</dl>'
task_type = task_id.split('_')[0]
class_name = f'{result} {task_type}'
starting_time = datetime.strptime(starting_time, time_format)
ending_time = datetime.strptime(ending_time, time_format)
if starting_time < earliest_time:
earliest_time = starting_time
if ending_time > latest_time:
latest_time = ending_time
if group_by_pid:
d = dict(id=task_id, group=pid, content=task_id, title=content, start=start.get('datetime'), end=end.get('datetime'), className=class_name)
else:
d = dict(id=task_id, content=task_id, title=content, start=start.get('datetime'), end=end.get('datetime'), className=class_name)
results.append(d)
else:
print_utils.warn(f'Did not find an end for {name}')
earliest_time = earliest_time - timedelta(minutes=2)
latest_time = latest_time + timedelta(minutes=2)
groups = list(groups.values())
(results, groups, start, end) = (results, groups, earliest_time.strftime(time_format), latest_time.strftime(time_format))
</DeepExtract>
if group_by_pid:
params = 'container, items, groups, options'
else:
params = 'container, items, options'
output = viz_template.CONTENT.format(DATASET=results, START=start, END=end, GROUPS=groups, PARAMS=params)
f = open(f'{output_file_name_prefix}.html', 'w')
f.write(output)
f.close()
|
def export_deploy_viz(codebuild_execution_id, group_by_pid, puppet_account_id):
output_file_name_prefix = codebuild_execution_id.split(':')[1]
with betterboto_client.ClientContextManager('codebuild') as codebuild:
build = codebuild.batch_get_builds(ids=[codebuild_execution_id]).get('builds')[0]
location = build.get('artifacts').get('location').split(':')
bucket = location[5].split('/')[0]
key = location[5].replace(f'{bucket}/', '')
zip_file_location = f'{output_file_name_prefix}.zip'
if os.path.exists(zip_file_location):
print(f'Found zip file, skipping download')
else:
print(f'Downloading zip file')
with betterboto_client.ClientContextManager('s3') as s3:
print(f'getting {bucket} {key}')
s3.download_file(Bucket=bucket, Key=key, Filename=zip_file_location)
if os.path.exists(output_file_name_prefix):
print(f'Found output folder, skipping unzip')
else:
print(f'Unziping')
os.makedirs(output_file_name_prefix)
with zipfile.ZipFile(zip_file_location, 'r') as zip_ref:
zip_ref.extractall(output_file_name_prefix)
path_to_results = f'{output_file_name_prefix}/results'
results = list()
groups = dict()
time_format = '%Y-%m-%d %H:%M:%S'
earliest_time = datetime.strptime('4022-09-14 00:54:33', time_format)
latest_time = datetime.strptime('2000-09-14 00:54:33', time_format)
for starter in glob.glob(f'{path_to_results}/start/*.json'):
start = serialisation_utils.json_loads(open(starter, 'r').read())
name = os.path.basename(starter)
end = None
task_id = name.replace('.json', '')
task_id = '-'.join(task_id.split('-')[1:])
processing_time = dict(duration='unknown')
if os.path.exists(f'{path_to_results}/success/{name}'):
result = 'success'
end = serialisation_utils.json_loads(open(f'{path_to_results}/success/{name}', 'r').read())
elif os.path.exists(f'{path_to_results}/failure/{name}'):
result = 'failure'
end = serialisation_utils.json_loads(open(f'{path_to_results}/failure/{name}', 'r').read())
if os.path.exists(f'{path_to_results}/processing_time/{name}'):
processing_time = serialisation_utils.json_loads(open(f'{path_to_results}/processing_time/{name}', 'r').read())
if end:
body = ''
starting_time = start.get('datetime')
ending_time = end.get('datetime')
pid = start.get('pid')
groups[pid] = dict(id=pid, content=pid, value=pid)
for (name2, value2) in start.get('params_for_results', {}).items():
body += f'<b>{name2}</b>:{value2}<br />'
body += f'<b>pid</b>:{pid}<br />'
body += f'<b>start</b>:{starting_time}<br />'
body += f'<b>end</b>:{ending_time}<br />'
body += f"<b>duration</b>:{processing_time.get('duration')}<br />"
content = f'<h2>{task_id}</h2><dl>{body}</dl>'
task_type = task_id.split('_')[0]
class_name = f'{result} {task_type}'
starting_time = datetime.strptime(starting_time, time_format)
ending_time = datetime.strptime(ending_time, time_format)
if starting_time < earliest_time:
earliest_time = starting_time
if ending_time > latest_time:
latest_time = ending_time
if group_by_pid:
d = dict(id=task_id, group=pid, content=task_id, title=content, start=start.get('datetime'), end=end.get('datetime'), className=class_name)
else:
d = dict(id=task_id, content=task_id, title=content, start=start.get('datetime'), end=end.get('datetime'), className=class_name)
results.append(d)
else:
print_utils.warn(f'Did not find an end for {name}')
earliest_time = earliest_time - timedelta(minutes=2)
latest_time = latest_time + timedelta(minutes=2)
groups = list(groups.values())
(results, groups, start, end) = (results, groups, earliest_time.strftime(time_format), latest_time.strftime(time_format))
if group_by_pid:
params = 'container, items, groups, options'
else:
params = 'container, items, options'
output = viz_template.CONTENT.format(DATASET=results, START=start, END=end, GROUPS=groups, PARAMS=params)
f = open(f'{output_file_name_prefix}.html', 'w')
f.write(output)
f.close()
|
aws-service-catalog-puppet
|
positive
|
def test():
<DeepExtract>
net = ResNet(BasicBlock, [2, 2, 2, 2])
</DeepExtract>
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
def test():
net = ResNet(BasicBlock, [2, 2, 2, 2])
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
awesome-attention-mechanism-in-cv
|
positive
|
def Process(self, request_iterator, context):
"""Processes incoming EnvironmentRequests.
For each EnvironmentRequest the internal message is extracted and handled.
The response for that message is then placed in a EnvironmentResponse which
is returned to the client.
An error status will be returned if an unknown message type is received or
if the message is invalid for the current world state.
Args:
request_iterator: Message iterator provided by gRPC.
context: Context provided by gRPC.
Yields:
EnvironmentResponse: Response for each incoming EnvironmentRequest.
"""
env_factory = CatchGameFactory(_INITIAL_SEED)
env = None
is_joined = False
skip_next_frame = False
action_manager = spec_manager.SpecManager(_action_spec())
observation_manager = spec_manager.SpecManager(_observation_spec())
for request in request_iterator:
environment_response = dm_env_rpc_pb2.EnvironmentResponse()
try:
message_type = request.WhichOneof('payload')
internal_request = getattr(request, message_type)
<DeepExtract>
if not env:
if message_type not in ['create_world', 'leave_world']:
raise RuntimeError('Cannot {} when no world exists.'.format(message_type))
else:
if message_type == 'create_world':
raise RuntimeError('This example does not support creating multiple worlds.')
if is_joined:
if message_type == 'destroy_world':
raise RuntimeError('Cannot destroy world when still joined.')
elif message_type == 'reset_world':
raise RuntimeError('This example does not support reset_world when not joined.')
elif message_type in ['step', 'reset']:
raise RuntimeError('Cannot {} when world not joined.'.format(message_type))
</DeepExtract>
if message_type == 'create_world':
<DeepExtract>
unrecognized_settings = [setting for setting in request.create_world.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
</DeepExtract>
seed = request.create_world.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.CreateWorldResponse(world_name=_WORLD_NAME)
elif message_type == 'join_world':
<DeepExtract>
unrecognized_settings = [setting for setting in request.join_world.settings if setting not in []]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
</DeepExtract>
if is_joined:
raise RuntimeError(f'Tried to join world "{internal_request.world_name}" but already joined to world "{_WORLD_NAME}"')
if internal_request.world_name != _WORLD_NAME:
raise RuntimeError(f'Tried to join world "{internal_request.world_name}" but the only supported world is "{_WORLD_NAME}"')
response = dm_env_rpc_pb2.JoinWorldResponse()
for (uid, action) in _action_spec().items():
response.specs.actions[uid].CopyFrom(action)
for (uid, observation) in _observation_spec().items():
response.specs.observations[uid].CopyFrom(observation)
is_joined = True
elif message_type == 'step':
if skip_next_frame:
skip_next_frame = False
else:
unpacked_actions = action_manager.unpack(internal_request.actions)
paddle_action = unpacked_actions.get(_ACTION_PADDLE, _DEFAULT_ACTION)
if paddle_action not in _VALID_ACTIONS:
raise RuntimeError(f'Invalid paddle action value: "{paddle_action}"!')
env.update(paddle_action)
response = dm_env_rpc_pb2.StepResponse()
packed_observations = observation_manager.pack({_OBSERVATION_BOARD: env.draw_board(), _OBSERVATION_REWARD: env.reward()})
for requested_observation in internal_request.requested_observations:
response.observations[requested_observation].CopyFrom(packed_observations[requested_observation])
if env.has_terminated():
response.state = dm_env_rpc_pb2.EnvironmentStateType.TERMINATED
else:
response.state = dm_env_rpc_pb2.EnvironmentStateType.RUNNING
if env.has_terminated():
env = env_factory.new_game()
skip_next_frame = True
elif message_type == 'reset':
<DeepExtract>
unrecognized_settings = [setting for setting in request.reset.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
</DeepExtract>
seed = request.reset.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.ResetResponse()
for (uid, action) in _action_spec().items():
response.specs.actions[uid].CopyFrom(action)
for (uid, observation) in _observation_spec().items():
response.specs.observations[uid].CopyFrom(observation)
elif message_type == 'reset_world':
<DeepExtract>
unrecognized_settings = [setting for setting in request.reset_world.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
</DeepExtract>
seed = request.reset_world.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.ResetWorldResponse()
elif message_type == 'leave_world':
is_joined = False
response = dm_env_rpc_pb2.LeaveWorldResponse()
elif message_type == 'destroy_world':
if internal_request.world_name != _WORLD_NAME:
raise RuntimeError('Tried to destroy world "{}" but we only support world "{}"'.format(internal_request.world_name, _WORLD_NAME))
env = None
response = dm_env_rpc_pb2.DestroyWorldResponse()
else:
raise RuntimeError('Unhandled message: {}'.format(message_type))
getattr(environment_response, message_type).CopyFrom(response)
except Exception as e:
environment_response.error.CopyFrom(status_pb2.Status(code=code_pb2.INTERNAL, message=str(e)))
yield environment_response
|
def Process(self, request_iterator, context):
"""Processes incoming EnvironmentRequests.
For each EnvironmentRequest the internal message is extracted and handled.
The response for that message is then placed in a EnvironmentResponse which
is returned to the client.
An error status will be returned if an unknown message type is received or
if the message is invalid for the current world state.
Args:
request_iterator: Message iterator provided by gRPC.
context: Context provided by gRPC.
Yields:
EnvironmentResponse: Response for each incoming EnvironmentRequest.
"""
env_factory = CatchGameFactory(_INITIAL_SEED)
env = None
is_joined = False
skip_next_frame = False
action_manager = spec_manager.SpecManager(_action_spec())
observation_manager = spec_manager.SpecManager(_observation_spec())
for request in request_iterator:
environment_response = dm_env_rpc_pb2.EnvironmentResponse()
try:
message_type = request.WhichOneof('payload')
internal_request = getattr(request, message_type)
if not env:
if message_type not in ['create_world', 'leave_world']:
raise RuntimeError('Cannot {} when no world exists.'.format(message_type))
else:
if message_type == 'create_world':
raise RuntimeError('This example does not support creating multiple worlds.')
if is_joined:
if message_type == 'destroy_world':
raise RuntimeError('Cannot destroy world when still joined.')
elif message_type == 'reset_world':
raise RuntimeError('This example does not support reset_world when not joined.')
elif message_type in ['step', 'reset']:
raise RuntimeError('Cannot {} when world not joined.'.format(message_type))
if message_type == 'create_world':
unrecognized_settings = [setting for setting in request.create_world.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
seed = request.create_world.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.CreateWorldResponse(world_name=_WORLD_NAME)
elif message_type == 'join_world':
unrecognized_settings = [setting for setting in request.join_world.settings if setting not in []]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
if is_joined:
raise RuntimeError(f'Tried to join world "{internal_request.world_name}" but already joined to world "{_WORLD_NAME}"')
if internal_request.world_name != _WORLD_NAME:
raise RuntimeError(f'Tried to join world "{internal_request.world_name}" but the only supported world is "{_WORLD_NAME}"')
response = dm_env_rpc_pb2.JoinWorldResponse()
for (uid, action) in _action_spec().items():
response.specs.actions[uid].CopyFrom(action)
for (uid, observation) in _observation_spec().items():
response.specs.observations[uid].CopyFrom(observation)
is_joined = True
elif message_type == 'step':
if skip_next_frame:
skip_next_frame = False
else:
unpacked_actions = action_manager.unpack(internal_request.actions)
paddle_action = unpacked_actions.get(_ACTION_PADDLE, _DEFAULT_ACTION)
if paddle_action not in _VALID_ACTIONS:
raise RuntimeError(f'Invalid paddle action value: "{paddle_action}"!')
env.update(paddle_action)
response = dm_env_rpc_pb2.StepResponse()
packed_observations = observation_manager.pack({_OBSERVATION_BOARD: env.draw_board(), _OBSERVATION_REWARD: env.reward()})
for requested_observation in internal_request.requested_observations:
response.observations[requested_observation].CopyFrom(packed_observations[requested_observation])
if env.has_terminated():
response.state = dm_env_rpc_pb2.EnvironmentStateType.TERMINATED
else:
response.state = dm_env_rpc_pb2.EnvironmentStateType.RUNNING
if env.has_terminated():
env = env_factory.new_game()
skip_next_frame = True
elif message_type == 'reset':
unrecognized_settings = [setting for setting in request.reset.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
seed = request.reset.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.ResetResponse()
for (uid, action) in _action_spec().items():
response.specs.actions[uid].CopyFrom(action)
for (uid, observation) in _observation_spec().items():
response.specs.observations[uid].CopyFrom(observation)
elif message_type == 'reset_world':
unrecognized_settings = [setting for setting in request.reset_world.settings if setting not in _VALID_CREATE_AND_RESET_SETTINGS]
if unrecognized_settings:
raise ValueError(f'Unrecognized settings provided! Invalid settings: {unrecognized_settings}')
seed = request.reset_world.settings.get('seed', None)
if seed is not None:
env_factory.reset_seed(tensor_utils.unpack_tensor(seed))
env = env_factory.new_game()
skip_next_frame = True
response = dm_env_rpc_pb2.ResetWorldResponse()
elif message_type == 'leave_world':
is_joined = False
response = dm_env_rpc_pb2.LeaveWorldResponse()
elif message_type == 'destroy_world':
if internal_request.world_name != _WORLD_NAME:
raise RuntimeError('Tried to destroy world "{}" but we only support world "{}"'.format(internal_request.world_name, _WORLD_NAME))
env = None
response = dm_env_rpc_pb2.DestroyWorldResponse()
else:
raise RuntimeError('Unhandled message: {}'.format(message_type))
getattr(environment_response, message_type).CopyFrom(response)
except Exception as e:
environment_response.error.CopyFrom(status_pb2.Status(code=code_pb2.INTERNAL, message=str(e)))
yield environment_response
|
dm_env_rpc
|
positive
|
def get_ppdb_data(self):
if not os.path.exists(os.path.join(_pickled_data_folder, 'ppdb.pickle')):
print('PPDB data did not exist, now generating ppdb.pickle')
<DeepExtract>
if not os.path.exists(_pickled_data_folder):
print('Creating base-directory: ' + _pickled_data_folder)
os.makedirs(_pickled_data_folder)
ppdb = self.process_ppdb_data()
if ppdb:
with open(os.path.join(_pickled_data_folder, 'ppdb.pickle'), 'wb') as f:
pickle.dump(ppdb, f, pickle.HIGHEST_PROTOCOL)
print('Done generating ppdb.pickle')
else:
quit('Aborting - an error occurred')
</DeepExtract>
with open(os.path.join(_pickled_data_folder, 'ppdb.pickle'), 'rb') as f:
return pickle.load(f, encoding='utf-8')
|
def get_ppdb_data(self):
if not os.path.exists(os.path.join(_pickled_data_folder, 'ppdb.pickle')):
print('PPDB data did not exist, now generating ppdb.pickle')
if not os.path.exists(_pickled_data_folder):
print('Creating base-directory: ' + _pickled_data_folder)
os.makedirs(_pickled_data_folder)
ppdb = self.process_ppdb_data()
if ppdb:
with open(os.path.join(_pickled_data_folder, 'ppdb.pickle'), 'wb') as f:
pickle.dump(ppdb, f, pickle.HIGHEST_PROTOCOL)
print('Done generating ppdb.pickle')
else:
quit('Aborting - an error occurred')
with open(os.path.join(_pickled_data_folder, 'ppdb.pickle'), 'rb') as f:
return pickle.load(f, encoding='utf-8')
|
coling2018_fake-news-challenge
|
positive
|
def ReadFemResp(dct_file='2002FemResp.dct', dat_file='2002FemResp.dat.gz', **options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
<DeepExtract>
df.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
df['agemarry'] = (df.cmmarrhx - df.cmbirth) / 12.0
df['age'] = (df.cmintvw - df.cmbirth) / 12.0
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm) for cm in df.cmbirth]
df['year'] = pd.DatetimeIndex(dates).year - 1900
df['decade'] = df.year // 10
df['fives'] = df.year // 5
</DeepExtract>
return df
|
def ReadFemResp(dct_file='2002FemResp.dct', dat_file='2002FemResp.dat.gz', **options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
df.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
df['agemarry'] = (df.cmmarrhx - df.cmbirth) / 12.0
df['age'] = (df.cmintvw - df.cmbirth) / 12.0
month0 = pd.to_datetime('1899-12-15')
dates = [month0 + pd.DateOffset(months=cm) for cm in df.cmbirth]
df['year'] = pd.DatetimeIndex(dates).year - 1900
df['decade'] = df.year // 10
df['fives'] = df.year // 5
return df
|
DataExploration
|
positive
|
def test_acquire_by_app_ids(self, client):
<DeepExtract>
Site = client.Site
App = client.App
site = Site.objects.create(name='theta', path='/projects/foo')
app = App.objects.create(site_id=site.id, name='one', serialized_class='txt', source_code='txt')
(site, app1) = (site, app)
</DeepExtract>
app2 = client.App.objects.create(site_id=site.id, name='two', serialized_class='txt', source_code='txt')
<DeepExtract>
jobs = [self.job(client, i, app1) for i in range(5)]
jobs = client.Job.objects.bulk_create(jobs)
for job in jobs:
job.state = state
client.Job.objects.bulk_update(jobs)
</DeepExtract>
<DeepExtract>
jobs = [self.job(client, i, app2) for i in range(5)]
jobs = client.Job.objects.bulk_create(jobs)
for job in jobs:
job.state = state
client.Job.objects.bulk_update(jobs)
</DeepExtract>
<DeepExtract>
batch_job = client.BatchJob.objects.create(site_id=site.id, project='datascience', queue='default', num_nodes=128, wall_time_min=30, job_mode='mpi')
sess = client.Session.objects.create(batch_job_id=batch_job.id, site_id=site.id)
sess = sess
</DeepExtract>
acquired = sess.acquire_jobs(max_num_jobs=20, app_ids={app2.id})
assert len(acquired) == 5
for job in acquired:
assert job.app_id == app2.id
|
def test_acquire_by_app_ids(self, client):
Site = client.Site
App = client.App
site = Site.objects.create(name='theta', path='/projects/foo')
app = App.objects.create(site_id=site.id, name='one', serialized_class='txt', source_code='txt')
(site, app1) = (site, app)
app2 = client.App.objects.create(site_id=site.id, name='two', serialized_class='txt', source_code='txt')
jobs = [self.job(client, i, app1) for i in range(5)]
jobs = client.Job.objects.bulk_create(jobs)
for job in jobs:
job.state = state
client.Job.objects.bulk_update(jobs)
jobs = [self.job(client, i, app2) for i in range(5)]
jobs = client.Job.objects.bulk_create(jobs)
for job in jobs:
job.state = state
client.Job.objects.bulk_update(jobs)
batch_job = client.BatchJob.objects.create(site_id=site.id, project='datascience', queue='default', num_nodes=128, wall_time_min=30, job_mode='mpi')
sess = client.Session.objects.create(batch_job_id=batch_job.id, site_id=site.id)
sess = sess
acquired = sess.acquire_jobs(max_num_jobs=20, app_ids={app2.id})
assert len(acquired) == 5
for job in acquired:
assert job.app_id == app2.id
|
balsam
|
positive
|
def multigammaln(self, a, p):
<DeepExtract>
p = torch.tensor(p).type(self.floatx())
</DeepExtract>
<DeepExtract>
p_ = p.type('int32')
</DeepExtract>
a = a[..., None]
<DeepExtract>
i = torch.tensor(self.range(1, p_ + 1)).type(self.floatx())
</DeepExtract>
term1 = p * (p - 1) / 4.0 * self.log(np.pi)
<DeepExtract>
raise NotImplementedError
</DeepExtract>
return term1 + self.sum(term2, axis=-1)
|
def multigammaln(self, a, p):
p = torch.tensor(p).type(self.floatx())
p_ = p.type('int32')
a = a[..., None]
i = torch.tensor(self.range(1, p_ + 1)).type(self.floatx())
term1 = p * (p - 1) / 4.0 * self.log(np.pi)
raise NotImplementedError
return term1 + self.sum(term2, axis=-1)
|
deepx
|
positive
|
@cli.action('run-tests', XcodeProjectArgument.XCODE_PROJECT_PATH, XcodeProjectArgument.XCODE_WORKSPACE_PATH, XcodeProjectArgument.TARGET_NAME, XcodeProjectArgument.CONFIGURATION_NAME, XcodeProjectArgument.SCHEME_NAME, XcodeProjectArgument.CLEAN, TestArgument.DISABLE_CODE_COVERAGE, TestArgument.GRACEFUL_EXIT, TestArgument.MAX_CONCURRENT_DEVICES, TestArgument.MAX_CONCURRENT_SIMULATORS, TestArgument.TEST_DEVICES, TestArgument.TEST_ONLY, TestArgument.TEST_SDK, TestResultArgument.OUTPUT_DIRECTORY, TestResultArgument.OUTPUT_EXTENSION, XcodeArgument.TEST_FLAGS, XcodeArgument.TEST_XCARGS, XcprettyArgument.DISABLE, XcprettyArgument.OPTIONS)
def run_test(self, xcode_project_path: Optional[pathlib.Path]=None, xcode_workspace_path: Optional[pathlib.Path]=None, target_name: Optional[str]=None, configuration_name: Optional[str]=None, scheme_name: Optional[str]=None, clean: bool=False, devices: Optional[List[str]]=None, disable_code_coverage: bool=False, max_concurrent_devices: Optional[int]=TestArgument.MAX_CONCURRENT_DEVICES.get_default(), max_concurrent_simulators: Optional[int]=TestArgument.MAX_CONCURRENT_SIMULATORS.get_default(), test_only: Optional[str]=TestArgument.TEST_ONLY.get_default(), test_sdk: str=TestArgument.TEST_SDK.get_default(), test_xcargs: Optional[str]=XcodeArgument.TEST_XCARGS.get_default(), test_flags: Optional[str]=XcodeArgument.TEST_FLAGS.get_default(), disable_xcpretty: bool=False, xcpretty_options: str=XcprettyArgument.OPTIONS.get_default(), output_dir: pathlib.Path=TestResultArgument.OUTPUT_DIRECTORY.get_default(), output_extension: str=TestResultArgument.OUTPUT_EXTENSION.get_default(), graceful_exit: bool=False):
"""
Run unit or UI tests for given Xcode project or workspace
"""
<DeepExtract>
if xcode_workspace_path is None and xcode_workspace is None:
XcodeProjectArgument.XCODE_WORKSPACE_PATH.raise_argument_error('Workspace or project argument needs to be specified')
</DeepExtract>
<DeepExtract>
if devices:
try:
simulators = Simulator.find_simulators(devices)
except ValueError as ve:
raise TestArgument.TEST_DEVICES.raise_argument_error(str(ve)) from ve
elif 'macos' in test_sdk.lower():
simulators = []
else:
simulators = [self.get_default_test_destination(should_print=False)]
self.echo(Colors.BLUE(f"Running tests on {('simulators:' if simulators else 'macOS')}"))
for s in simulators:
self.echo('- %s %s (%s)', s.runtime, s.name, s.udid)
self.echo('')
simulators = simulators
</DeepExtract>
<DeepExtract>
try:
xcodebuild = Xcodebuild(xcode_workspace=xcode_workspace_path, xcode_project=xcode_project_path, scheme_name=scheme_name, target_name=target_name, configuration_name=configuration_name, xcpretty=Xcpretty(xcpretty_options) if not disable_xcpretty else None)
except ValueError as error:
raise XcodeProjectException(*error.args) from error
</DeepExtract>
clean and self._clean(xcodebuild)
self.echo(Colors.BLUE(f'Run tests for {(xcodebuild.workspace or xcodebuild.xcode_project).name}\n'))
xcresult_collector = XcResultCollector()
xcresult_collector.ignore_results(Xcode.DERIVED_DATA_PATH)
try:
xcodebuild.test(test_sdk, simulators, enable_code_coverage=not disable_code_coverage, only_testing=test_only, xcargs=test_xcargs, custom_flags=test_flags, max_concurrent_devices=max_concurrent_devices, max_concurrent_simulators=max_concurrent_simulators)
except IOError:
testing_failed = True
self.echo(Colors.RED('\nTest run failed\n'))
else:
testing_failed = False
self.echo(Colors.GREEN('\nTest run completed successfully\n'))
xcresult_collector.gather_results(Xcode.DERIVED_DATA_PATH)
output_dir.mkdir(parents=True, exist_ok=True)
<DeepExtract>
for simulator in simulators:
simulator_description = f'{simulator.runtime}_{simulator.name}'
log_path = simulator.get_logs_path()
if not log_path.exists():
self.logger.debug('No logs found for simulator %s', simulator)
continue
unsafe_destination_name = f'{simulator_description}{log_path.suffix}'
destination_path = output_dir / re.sub('[^\\w.]', '_', unsafe_destination_name)
try:
shutil.copy(log_path, destination_path)
except OSError:
self.logger.exception('Saving simulator %s logs to %s failed', simulator_description, destination_path)
else:
self.logger.debug('Saved simulator %s logs to %s', simulator_description, destination_path)
</DeepExtract>
if not xcresult_collector.get_collected_results():
raise XcodeProjectException('Did not find any test results')
<DeepExtract>
if True:
self.logger.info(Colors.GREEN('Found test results at'))
for xcresult in xcresult_collector.get_collected_results():
self.logger.info('- %s', xcresult)
self.logger.info('')
xcresult = xcresult_collector.get_merged_xcresult()
try:
test_suites = XcResultConverter.xcresult_to_junit(xcresult)
finally:
if output_dir:
shutil.copytree(xcresult, output_dir / xcresult.name)
xcresult_collector.forget_merged_result()
(test_suites, xcresult) = (test_suites, xcresult)
</DeepExtract>
message = f'Executed {test_suites.tests} tests with {test_suites.failures} failures and {test_suites.errors} errors in {test_suites.time:.2f} seconds.\n'
self.echo(Colors.BLUE(message))
TestSuitePrinter(self.echo).print_test_suites(test_suites)
<DeepExtract>
result_path = output_dir / f'{xcresult.stem}.{output_extension}'
test_suites.save_xml(result_path)
self.echo(Colors.GREEN('Saved JUnit XML report to %s'), result_path)
</DeepExtract>
if not graceful_exit:
if testing_failed or (test_suites and test_suites.has_failed_tests()):
raise XcodeProjectException('Tests failed')
|
@cli.action('run-tests', XcodeProjectArgument.XCODE_PROJECT_PATH, XcodeProjectArgument.XCODE_WORKSPACE_PATH, XcodeProjectArgument.TARGET_NAME, XcodeProjectArgument.CONFIGURATION_NAME, XcodeProjectArgument.SCHEME_NAME, XcodeProjectArgument.CLEAN, TestArgument.DISABLE_CODE_COVERAGE, TestArgument.GRACEFUL_EXIT, TestArgument.MAX_CONCURRENT_DEVICES, TestArgument.MAX_CONCURRENT_SIMULATORS, TestArgument.TEST_DEVICES, TestArgument.TEST_ONLY, TestArgument.TEST_SDK, TestResultArgument.OUTPUT_DIRECTORY, TestResultArgument.OUTPUT_EXTENSION, XcodeArgument.TEST_FLAGS, XcodeArgument.TEST_XCARGS, XcprettyArgument.DISABLE, XcprettyArgument.OPTIONS)
def run_test(self, xcode_project_path: Optional[pathlib.Path]=None, xcode_workspace_path: Optional[pathlib.Path]=None, target_name: Optional[str]=None, configuration_name: Optional[str]=None, scheme_name: Optional[str]=None, clean: bool=False, devices: Optional[List[str]]=None, disable_code_coverage: bool=False, max_concurrent_devices: Optional[int]=TestArgument.MAX_CONCURRENT_DEVICES.get_default(), max_concurrent_simulators: Optional[int]=TestArgument.MAX_CONCURRENT_SIMULATORS.get_default(), test_only: Optional[str]=TestArgument.TEST_ONLY.get_default(), test_sdk: str=TestArgument.TEST_SDK.get_default(), test_xcargs: Optional[str]=XcodeArgument.TEST_XCARGS.get_default(), test_flags: Optional[str]=XcodeArgument.TEST_FLAGS.get_default(), disable_xcpretty: bool=False, xcpretty_options: str=XcprettyArgument.OPTIONS.get_default(), output_dir: pathlib.Path=TestResultArgument.OUTPUT_DIRECTORY.get_default(), output_extension: str=TestResultArgument.OUTPUT_EXTENSION.get_default(), graceful_exit: bool=False):
"""
Run unit or UI tests for given Xcode project or workspace
"""
if xcode_workspace_path is None and xcode_workspace is None:
XcodeProjectArgument.XCODE_WORKSPACE_PATH.raise_argument_error('Workspace or project argument needs to be specified')
if devices:
try:
simulators = Simulator.find_simulators(devices)
except ValueError as ve:
raise TestArgument.TEST_DEVICES.raise_argument_error(str(ve)) from ve
elif 'macos' in test_sdk.lower():
simulators = []
else:
simulators = [self.get_default_test_destination(should_print=False)]
self.echo(Colors.BLUE(f"Running tests on {('simulators:' if simulators else 'macOS')}"))
for s in simulators:
self.echo('- %s %s (%s)', s.runtime, s.name, s.udid)
self.echo('')
simulators = simulators
try:
xcodebuild = Xcodebuild(xcode_workspace=xcode_workspace_path, xcode_project=xcode_project_path, scheme_name=scheme_name, target_name=target_name, configuration_name=configuration_name, xcpretty=Xcpretty(xcpretty_options) if not disable_xcpretty else None)
except ValueError as error:
raise XcodeProjectException(*error.args) from error
clean and self._clean(xcodebuild)
self.echo(Colors.BLUE(f'Run tests for {(xcodebuild.workspace or xcodebuild.xcode_project).name}\n'))
xcresult_collector = XcResultCollector()
xcresult_collector.ignore_results(Xcode.DERIVED_DATA_PATH)
try:
xcodebuild.test(test_sdk, simulators, enable_code_coverage=not disable_code_coverage, only_testing=test_only, xcargs=test_xcargs, custom_flags=test_flags, max_concurrent_devices=max_concurrent_devices, max_concurrent_simulators=max_concurrent_simulators)
except IOError:
testing_failed = True
self.echo(Colors.RED('\nTest run failed\n'))
else:
testing_failed = False
self.echo(Colors.GREEN('\nTest run completed successfully\n'))
xcresult_collector.gather_results(Xcode.DERIVED_DATA_PATH)
output_dir.mkdir(parents=True, exist_ok=True)
for simulator in simulators:
simulator_description = f'{simulator.runtime}_{simulator.name}'
log_path = simulator.get_logs_path()
if not log_path.exists():
self.logger.debug('No logs found for simulator %s', simulator)
continue
unsafe_destination_name = f'{simulator_description}{log_path.suffix}'
destination_path = output_dir / re.sub('[^\\w.]', '_', unsafe_destination_name)
try:
shutil.copy(log_path, destination_path)
except OSError:
self.logger.exception('Saving simulator %s logs to %s failed', simulator_description, destination_path)
else:
self.logger.debug('Saved simulator %s logs to %s', simulator_description, destination_path)
if not xcresult_collector.get_collected_results():
raise XcodeProjectException('Did not find any test results')
if True:
self.logger.info(Colors.GREEN('Found test results at'))
for xcresult in xcresult_collector.get_collected_results():
self.logger.info('- %s', xcresult)
self.logger.info('')
xcresult = xcresult_collector.get_merged_xcresult()
try:
test_suites = XcResultConverter.xcresult_to_junit(xcresult)
finally:
if output_dir:
shutil.copytree(xcresult, output_dir / xcresult.name)
xcresult_collector.forget_merged_result()
(test_suites, xcresult) = (test_suites, xcresult)
message = f'Executed {test_suites.tests} tests with {test_suites.failures} failures and {test_suites.errors} errors in {test_suites.time:.2f} seconds.\n'
self.echo(Colors.BLUE(message))
TestSuitePrinter(self.echo).print_test_suites(test_suites)
result_path = output_dir / f'{xcresult.stem}.{output_extension}'
test_suites.save_xml(result_path)
self.echo(Colors.GREEN('Saved JUnit XML report to %s'), result_path)
if not graceful_exit:
if testing_failed or (test_suites and test_suites.has_failed_tests()):
raise XcodeProjectException('Tests failed')
|
cli-tools
|
positive
|
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
<DeepExtract>
assert sum_size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / sum_size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (sum_size - 1)
bias_var = sumvar / sum_size
self._tmp_running_mean = self._add_weighted(self._tmp_running_mean, mean.data, alpha=self._moving_average_fraction)
self._tmp_running_var = self._add_weighted(self._tmp_running_var, unbias_var.data, alpha=self._moving_average_fraction)
self._running_iter = self._add_weighted(self._running_iter, 1, alpha=self._moving_average_fraction)
self.running_mean = self._tmp_running_mean / self._running_iter
self.running_var = self._tmp_running_var / self._running_iter
(mean, inv_std) = (mean, bias_var.clamp(self.eps) ** (-0.5))
</DeepExtract>
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
|
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
assert sum_size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / sum_size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (sum_size - 1)
bias_var = sumvar / sum_size
self._tmp_running_mean = self._add_weighted(self._tmp_running_mean, mean.data, alpha=self._moving_average_fraction)
self._tmp_running_var = self._add_weighted(self._tmp_running_var, unbias_var.data, alpha=self._moving_average_fraction)
self._running_iter = self._add_weighted(self._running_iter, 1, alpha=self._moving_average_fraction)
self.running_mean = self._tmp_running_mean / self._running_iter
self.running_var = self._tmp_running_var / self._running_iter
(mean, inv_std) = (mean, bias_var.clamp(self.eps) ** (-0.5))
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))
return outputs
|
3D-SDN
|
positive
|
def test_clean_up_interrupt(self):
<DeepExtract>
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, command) = (app, command)
</DeepExtract>
app.clean_up = mock.MagicMock(name='clean_up')
ret = app.run(['interrupt'])
self.assertNotEqual(ret, 0)
app.clean_up.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
call_args = app.clean_up.call_args_list[0]
self.assertEqual(mock.call(mock.ANY, 130, mock.ANY), call_args)
(args, kwargs) = call_args
self.assertIsInstance(args[2], KeyboardInterrupt)
|
def test_clean_up_interrupt(self):
cmd_mgr = commandmanager.CommandManager('cliff.tests')
command = mock.MagicMock(spec=c_cmd.Command)
command_inst = mock.MagicMock(spec=c_cmd.Command)
command_inst.run.return_value = 0
command.return_value = command_inst
cmd_mgr.add_command('mock', command)
err_command = mock.Mock(name='err_command', spec=c_cmd.Command)
err_command_inst = mock.Mock(spec=c_cmd.Command)
err_command_inst.run = mock.Mock(side_effect=RuntimeError('test exception'))
err_command.return_value = err_command_inst
cmd_mgr.add_command('error', err_command)
interrupt_command = mock.Mock(name='interrupt_command', spec=c_cmd.Command)
interrupt_command_inst = mock.Mock(spec=c_cmd.Command)
interrupt_command_inst.run = mock.Mock(side_effect=KeyboardInterrupt)
interrupt_command.return_value = interrupt_command_inst
cmd_mgr.add_command('interrupt', interrupt_command)
pipeclose_command = mock.Mock(name='pipeclose_command', spec=c_cmd.Command)
pipeclose_command_inst = mock.Mock(spec=c_cmd.Command)
pipeclose_command_inst.run = mock.Mock(side_effect=BrokenPipeError)
pipeclose_command.return_value = pipeclose_command_inst
cmd_mgr.add_command('pipe-close', pipeclose_command)
app = application.App('testing interactive mode', '1', cmd_mgr, stderr=mock.Mock(), **kwargs)
(app, command) = (app, command)
app.clean_up = mock.MagicMock(name='clean_up')
ret = app.run(['interrupt'])
self.assertNotEqual(ret, 0)
app.clean_up.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
call_args = app.clean_up.call_args_list[0]
self.assertEqual(mock.call(mock.ANY, 130, mock.ANY), call_args)
(args, kwargs) = call_args
self.assertIsInstance(args[2], KeyboardInterrupt)
|
cliff
|
positive
|
def plot(self, ax, gr: GenomeRange, **kwargs):
<DeepExtract>
gr = to_gr(gr)
if gr.chrom not in list(self.interval_tree):
gr.change_chrom_names()
regions = [(region.begin, region.end, region.data) for region in sorted(self.interval_tree[gr.chrom][gr.start - 10000:gr.end + 10000])]
</DeepExtract>
for (start, end, color) in regions:
if self.properties['color'] != 'bed_rgb':
color = self.properties['color']
if type(color) is not str:
color = rgb2hex(*color)
ax.axvspan(start, end, color=color, alpha=self.properties['alpha'])
if self.properties['border_line'] == 'yes':
(ymin, ymax) = ax.get_ylim()
ax.vlines([start, end], ymin, ymax, linestyle=self.properties['border_line_style'], linewidth=self.properties['border_line_width'], color=self.properties['border_line_color'], alpha=self.properties['border_line_alpha'])
|
def plot(self, ax, gr: GenomeRange, **kwargs):
gr = to_gr(gr)
if gr.chrom not in list(self.interval_tree):
gr.change_chrom_names()
regions = [(region.begin, region.end, region.data) for region in sorted(self.interval_tree[gr.chrom][gr.start - 10000:gr.end + 10000])]
for (start, end, color) in regions:
if self.properties['color'] != 'bed_rgb':
color = self.properties['color']
if type(color) is not str:
color = rgb2hex(*color)
ax.axvspan(start, end, color=color, alpha=self.properties['alpha'])
if self.properties['border_line'] == 'yes':
(ymin, ymax) = ax.get_ylim()
ax.vlines([start, end], ymin, ymax, linestyle=self.properties['border_line_style'], linewidth=self.properties['border_line_width'], color=self.properties['border_line_color'], alpha=self.properties['border_line_alpha'])
|
CoolBox
|
positive
|
def _do_auth_hmac_cb(self, uuid):
"""Perform mutual HMAC_CB authentication."""
<DeepExtract>
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
wwwauth = ''.join(result)
</DeepExtract>
headers = [('WWW-Authenticate', wwwauth)]
auth = self.environ.get('HTTP_AUTHORIZATION')
if auth is None:
raise HTTPReturn(http.UNAUTHORIZED, headers)
try:
<DeepExtract>
options = {}
p1 = auth.find(sep1)
if p1 == -1:
(method, options) = (auth, options)
head = auth[:p1].strip()
optvals = auth[p1 + 1:].split(sep2)
for optval in optvals:
optval = optval.strip()
mobj = _re_optval.match(optval)
if mobj is None:
raise ValueError('Illegal option string')
key = mobj.group(1)
value = mobj.group(2)
if value.startswith('"'):
value = value[1:-1]
options[key] = value
(method, options) = (head, options)
</DeepExtract>
except ValueError:
raise HTTPReturn(http.UNAUTHORIZED, headers)
if method != 'HMAC_CB':
raise HTTPReturn(http.UNAUTHORIZED, headers)
if 'name' in options:
name = options['name']
if not self.allow_pairing:
raise HTTPReturn('403 Pairing Disabled')
from bluepass.ctrlapi import ControlApiServer
bus = instance(ControlApiServer)
kxid = crypto.random_cookie()
pin = '{0:06d}'.format(crypto.random_int(1000000))
for (_, protocol) in bus.connections:
if not getattr(protocol, '_ctrlapi_authenticated', False):
continue
approved = protocol.call_method('get_pairing_approval', name, uuid, pin, kxid)
break
if not approved:
raise HTTPReturn('403 Approval Denied')
restrictions = {}
self.key_exchanges[kxid] = (time.time(), restrictions, pin)
<DeepExtract>
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
wwwauth = ''.join(result)
</DeepExtract>
headers = [('WWW-Authenticate', wwwauth)]
raise HTTPReturn(http.UNAUTHORIZED, headers)
elif 'kxid' in options:
kxid = options['kxid']
if kxid not in self.key_exchanges:
raise HTTPReturn(http.FORBIDDEN)
(starttime, restrictions, pin) = self.key_exchanges.pop(kxid)
signature = base64.try_decode(options.get('signature', ''))
if not signature:
raise HTTPReturn(http.FORBIDDEN)
now = time.time()
if now - starttime > 60:
raise HTTPReturn('403 Request Timeout')
<DeepExtract>
transport = self.environ['gruvi.transport']
sslinfo = transport.get_extra_info('sslinfo')
cb = sslinfo.get_channel_binding('tls-unique')
</DeepExtract>
check = crypto.hmac(adjust_pin(pin, +1).encode('ascii'), cb, 'sha1')
if check != signature:
raise HTTPReturn('403 Invalid PIN')
from bluepass.ctrlapi import ControlApiServer
bus = instance(ControlApiServer)
for (_, protocol) in bus.connections:
protocol.send_notification('PairingComplete', kxid)
signature = crypto.hmac(adjust_pin(pin, -1).encode('ascii'), cb, 'sha1')
signature = base64.encode(signature)
<DeepExtract>
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
authinfo = ''.join(result)
</DeepExtract>
self.headers.append(('Authentication-Info', authinfo))
else:
raise HTTPReturn(http.UNAUTHORIZED, headers)
|
def _do_auth_hmac_cb(self, uuid):
"""Perform mutual HMAC_CB authentication."""
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
wwwauth = ''.join(result)
headers = [('WWW-Authenticate', wwwauth)]
auth = self.environ.get('HTTP_AUTHORIZATION')
if auth is None:
raise HTTPReturn(http.UNAUTHORIZED, headers)
try:
options = {}
p1 = auth.find(sep1)
if p1 == -1:
(method, options) = (auth, options)
head = auth[:p1].strip()
optvals = auth[p1 + 1:].split(sep2)
for optval in optvals:
optval = optval.strip()
mobj = _re_optval.match(optval)
if mobj is None:
raise ValueError('Illegal option string')
key = mobj.group(1)
value = mobj.group(2)
if value.startswith('"'):
value = value[1:-1]
options[key] = value
(method, options) = (head, options)
except ValueError:
raise HTTPReturn(http.UNAUTHORIZED, headers)
if method != 'HMAC_CB':
raise HTTPReturn(http.UNAUTHORIZED, headers)
if 'name' in options:
name = options['name']
if not self.allow_pairing:
raise HTTPReturn('403 Pairing Disabled')
from bluepass.ctrlapi import ControlApiServer
bus = instance(ControlApiServer)
kxid = crypto.random_cookie()
pin = '{0:06d}'.format(crypto.random_int(1000000))
for (_, protocol) in bus.connections:
if not getattr(protocol, '_ctrlapi_authenticated', False):
continue
approved = protocol.call_method('get_pairing_approval', name, uuid, pin, kxid)
break
if not approved:
raise HTTPReturn('403 Approval Denied')
restrictions = {}
self.key_exchanges[kxid] = (time.time(), restrictions, pin)
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
wwwauth = ''.join(result)
headers = [('WWW-Authenticate', wwwauth)]
raise HTTPReturn(http.UNAUTHORIZED, headers)
elif 'kxid' in options:
kxid = options['kxid']
if kxid not in self.key_exchanges:
raise HTTPReturn(http.FORBIDDEN)
(starttime, restrictions, pin) = self.key_exchanges.pop(kxid)
signature = base64.try_decode(options.get('signature', ''))
if not signature:
raise HTTPReturn(http.FORBIDDEN)
now = time.time()
if now - starttime > 60:
raise HTTPReturn('403 Request Timeout')
transport = self.environ['gruvi.transport']
sslinfo = transport.get_extra_info('sslinfo')
cb = sslinfo.get_channel_binding('tls-unique')
check = crypto.hmac(adjust_pin(pin, +1).encode('ascii'), cb, 'sha1')
if check != signature:
raise HTTPReturn('403 Invalid PIN')
from bluepass.ctrlapi import ControlApiServer
bus = instance(ControlApiServer)
for (_, protocol) in bus.connections:
protocol.send_notification('PairingComplete', kxid)
signature = crypto.hmac(adjust_pin(pin, -1).encode('ascii'), cb, 'sha1')
signature = base64.encode(signature)
result = ['HMAC_CB']
for (key, 'HMAC_CB') in kwargs.items():
result.append(sep1)
result.append(' ' if sep1 != ' ' else '')
result.append(key)
result.append('="')
'HMAC_CB' = str('HMAC_CB')
'HMAC_CB' = 'HMAC_CB'.replace('\\', '\\\\').replace('"', '\\"')
result.append('HMAC_CB')
result.append('"')
sep1 = sep2
authinfo = ''.join(result)
self.headers.append(('Authentication-Info', authinfo))
else:
raise HTTPReturn(http.UNAUTHORIZED, headers)
|
bluepass
|
positive
|
def load(self, dtype_out_time, dtype_out_vert=False, region=False, plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = 'Loading data from disk for object={0}, dtype_out_time={1}, dtype_out_vert={2}, and region={3}'.format(self, dtype_out_time, dtype_out_vert, region)
logging.info(msg + ' ({})'.format(ctime()))
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
try:
<DeepExtract>
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
if self.dtype_in_vert == internal_names.ETA_STR and (not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop_vars([r for r in arr.coords.iterkeys() if r not in (internal_names.PFULL_STR, reg_pfull_str)])
arr = arr.rename({internal_names.PFULL_STR: internal_names.PFULL_STR + '_ref'})
if hasattr(arr, reg_pfull_str):
data = arr.rename({reg_pfull_str: internal_names.PFULL_STR})
data = arr
data = arr
data = ds[self.name]
</DeepExtract>
except IOError:
<DeepExtract>
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds = xr.open_dataset(data_tar.extractfile(self.file_name[dtype_out_time]))
data = ds[self.name]
</DeepExtract>
<DeepExtract>
try:
self.data_out.update({dtype_out_time: data})
except AttributeError:
self.data_out = {dtype_out_time: data}
</DeepExtract>
if mask_unphysical:
data = self.var.mask_unphysical(data)
if plot_units:
data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
return data
|
def load(self, dtype_out_time, dtype_out_vert=False, region=False, plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = 'Loading data from disk for object={0}, dtype_out_time={1}, dtype_out_vert={2}, and region={3}'.format(self, dtype_out_time, dtype_out_vert, region)
logging.info(msg + ' ({})'.format(ctime()))
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
try:
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
if self.dtype_in_vert == internal_names.ETA_STR and (not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop_vars([r for r in arr.coords.iterkeys() if r not in (internal_names.PFULL_STR, reg_pfull_str)])
arr = arr.rename({internal_names.PFULL_STR: internal_names.PFULL_STR + '_ref'})
if hasattr(arr, reg_pfull_str):
data = arr.rename({reg_pfull_str: internal_names.PFULL_STR})
data = arr
data = arr
data = ds[self.name]
except IOError:
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds = xr.open_dataset(data_tar.extractfile(self.file_name[dtype_out_time]))
data = ds[self.name]
try:
self.data_out.update({dtype_out_time: data})
except AttributeError:
self.data_out = {dtype_out_time: data}
if mask_unphysical:
data = self.var.mask_unphysical(data)
if plot_units:
data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
return data
|
aospy
|
positive
|
def shell(self, node: Optional[str], command: Optional[str]) -> int:
cmd = 'vagrant ssh'
if node:
cmd += f' {node}'
if command:
cmd += f" -c '{command}'"
found = False
for (node_name, node_state) in self.nodes_status:
if node == node_name:
if node_state != VagrantStateEnum.RUNNING:
raise DeploymentNodeNotRunningError(node_name)
found = True
break
if node and (not found):
raise DeploymentNodeDoesNotExistError(node)
<DeepExtract>
with self.safeenv() as env:
capture: Optional[int] = subprocess.PIPE
if True:
capture = None
proc = subprocess.run(shlex.split(cmd), stderr=capture, stdout=capture, env=env)
stdout = '' if True else proc.stdout.decode('utf-8')
stderr = '' if True else proc.stderr.decode('utf-8')
(retcode, _, _) = (proc.returncode, stdout, stderr)
</DeepExtract>
return retcode
|
def shell(self, node: Optional[str], command: Optional[str]) -> int:
cmd = 'vagrant ssh'
if node:
cmd += f' {node}'
if command:
cmd += f" -c '{command}'"
found = False
for (node_name, node_state) in self.nodes_status:
if node == node_name:
if node_state != VagrantStateEnum.RUNNING:
raise DeploymentNodeNotRunningError(node_name)
found = True
break
if node and (not found):
raise DeploymentNodeDoesNotExistError(node)
with self.safeenv() as env:
capture: Optional[int] = subprocess.PIPE
if True:
capture = None
proc = subprocess.run(shlex.split(cmd), stderr=capture, stdout=capture, env=env)
stdout = '' if True else proc.stdout.decode('utf-8')
stderr = '' if True else proc.stderr.decode('utf-8')
(retcode, _, _) = (proc.returncode, stdout, stderr)
return retcode
|
aquarium
|
positive
|
def _do_autolabel_torrent(torrent_id):
assert torrent_id in self._torrents
<DeepExtract>
assert torrent_id in self._torrents
label_ids = self._get_sorted_labels(cmp_length_then_value)
for id in label_ids:
if self._labels[id]['options']['autolabel_settings']:
if self._has_autolabel_match(torrent_id, id):
label_id = id
label_id = labelplus.common.label.ID_NONE
</DeepExtract>
if label_id != self._get_torrent_label_id(torrent_id):
<DeepExtract>
assert torrent_id in self._torrents
assert label_id == labelplus.common.label.ID_NONE or label_id in self._labels
if torrent_id in self._mappings:
self._remove_torrent_label(torrent_id)
if label_id == labelplus.common.label.ID_NONE:
if self._prefs['options']['reset_on_label_unset']:
self._reset_torrent_options(torrent_id)
else:
self._mappings[torrent_id] = label_id
self._index[label_id]['torrents'].append(torrent_id)
self._apply_torrent_options(torrent_id)
</DeepExtract>
log.debug('Setting torrent %r to label %r', torrent_id, label_id)
self._timestamp['mappings_changed'] = datetime.datetime.now()
return label_id
|
def _do_autolabel_torrent(torrent_id):
assert torrent_id in self._torrents
assert torrent_id in self._torrents
label_ids = self._get_sorted_labels(cmp_length_then_value)
for id in label_ids:
if self._labels[id]['options']['autolabel_settings']:
if self._has_autolabel_match(torrent_id, id):
label_id = id
label_id = labelplus.common.label.ID_NONE
if label_id != self._get_torrent_label_id(torrent_id):
assert torrent_id in self._torrents
assert label_id == labelplus.common.label.ID_NONE or label_id in self._labels
if torrent_id in self._mappings:
self._remove_torrent_label(torrent_id)
if label_id == labelplus.common.label.ID_NONE:
if self._prefs['options']['reset_on_label_unset']:
self._reset_torrent_options(torrent_id)
else:
self._mappings[torrent_id] = label_id
self._index[label_id]['torrents'].append(torrent_id)
self._apply_torrent_options(torrent_id)
log.debug('Setting torrent %r to label %r', torrent_id, label_id)
self._timestamp['mappings_changed'] = datetime.datetime.now()
return label_id
|
deluge-labelplus
|
positive
|
def process_template(self, template_name, data_map, handler_input, **kwargs):
"""Process template and data using provided list of
:py:class:`ask_sdk_core.view_resolver.AbstractTemplateLoader` and
:py:class:`ask_sdk_core.view_resolver.AbstractTemplateRenderer` to
generate skill response output.
The additional arguments can contain information for the loader
for eg: file extension of the templates.
:param template_name: name of response template
:type template_name: str
:param data_map: map contains injecting data
:type data_map: Dict[str, object]
:param handler_input: Handler Input instance with Request Envelope
containing Request.
:type handler_input: :py:class:`ask_sdk_core.handler_input.HandlerInput`
:param kwargs: Additional keyword arguments for loader and renderer.
:return: Skill Response output
:rtype: :py:class:`ask_sdk_model.response.Response`
"""
assert_not_null(template_name, 'Template Name')
assert_not_null(data_map, 'Data Map')
assert_not_null(self.template_loaders, 'Template Loaders list')
assert_not_null(self.renderer, 'Template Renderer')
<DeepExtract>
for template_loader in self.template_loaders:
try:
template_content = template_loader.load(handler_input, template_name, **kwargs)
if template_content is not None:
template_content = template_content
except Exception as e:
raise TemplateLoaderException('Failed to load the template: {} using {} with error : {}'.format(template_name, template_loader, str(e)))
raise TemplateLoaderException('Unable to load template: {} using provided loaders.'.format(template_name))
</DeepExtract>
<DeepExtract>
try:
response = self.renderer.render(template_content, data_map, **kwargs)
except Exception as e:
raise TemplateRendererException('Failed to render template: {} using {} with error: {}'.format(template_content, self.renderer, str(e)))
</DeepExtract>
return response
|
def process_template(self, template_name, data_map, handler_input, **kwargs):
"""Process template and data using provided list of
:py:class:`ask_sdk_core.view_resolver.AbstractTemplateLoader` and
:py:class:`ask_sdk_core.view_resolver.AbstractTemplateRenderer` to
generate skill response output.
The additional arguments can contain information for the loader
for eg: file extension of the templates.
:param template_name: name of response template
:type template_name: str
:param data_map: map contains injecting data
:type data_map: Dict[str, object]
:param handler_input: Handler Input instance with Request Envelope
containing Request.
:type handler_input: :py:class:`ask_sdk_core.handler_input.HandlerInput`
:param kwargs: Additional keyword arguments for loader and renderer.
:return: Skill Response output
:rtype: :py:class:`ask_sdk_model.response.Response`
"""
assert_not_null(template_name, 'Template Name')
assert_not_null(data_map, 'Data Map')
assert_not_null(self.template_loaders, 'Template Loaders list')
assert_not_null(self.renderer, 'Template Renderer')
for template_loader in self.template_loaders:
try:
template_content = template_loader.load(handler_input, template_name, **kwargs)
if template_content is not None:
template_content = template_content
except Exception as e:
raise TemplateLoaderException('Failed to load the template: {} using {} with error : {}'.format(template_name, template_loader, str(e)))
raise TemplateLoaderException('Unable to load template: {} using provided loaders.'.format(template_name))
try:
response = self.renderer.render(template_content, data_map, **kwargs)
except Exception as e:
raise TemplateRendererException('Failed to render template: {} using {} with error: {}'.format(template_content, self.renderer, str(e)))
return response
|
alexa-skills-kit-sdk-for-python
|
positive
|
def set_mesos_api_version(service_name, api_version, timeout=600):
"""Sets the mesos API version to the provided value, and then verifies that the scheduler comes back successfully"""
<DeepExtract>
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout * 1000)
def wait_for_response():
config = _get_config_once(service_name).json()['app']
config = wait_for_response()
if 'uris' in config:
del config['uris']
if 'version' in config:
del config['version']
config = config
</DeepExtract>
config['env']['MESOS_API_VERSION'] = api_version
<DeepExtract>
if 'env' in config:
log.info('Environment for marathon app {} ({} values):'.format(service_name, len(config['env'])))
for k in sorted(config['env']):
log.info(' {}={}'.format(k, config['env'][k]))
query_string = '?force=true' if force else ''
sdk_cmd.cluster_request('PUT', _api_url('apps/{}{}'.format(service_name, query_string)), log_args=False, json=config)
if wait_for_completed_deployment:
log.info('Waiting for Marathon deployment of {} to complete...'.format(service_name))
shakedown.deployment_wait(app_id=service_name, timeout=timeout)
</DeepExtract>
sdk_metrics.wait_for_scheduler_counter_value(service_name, 'offers.processed', 1, timeout_seconds=timeout)
|
def set_mesos_api_version(service_name, api_version, timeout=600):
"""Sets the mesos API version to the provided value, and then verifies that the scheduler comes back successfully"""
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout * 1000)
def wait_for_response():
config = _get_config_once(service_name).json()['app']
config = wait_for_response()
if 'uris' in config:
del config['uris']
if 'version' in config:
del config['version']
config = config
config['env']['MESOS_API_VERSION'] = api_version
if 'env' in config:
log.info('Environment for marathon app {} ({} values):'.format(service_name, len(config['env'])))
for k in sorted(config['env']):
log.info(' {}={}'.format(k, config['env'][k]))
query_string = '?force=true' if force else ''
sdk_cmd.cluster_request('PUT', _api_url('apps/{}{}'.format(service_name, query_string)), log_args=False, json=config)
if wait_for_completed_deployment:
log.info('Waiting for Marathon deployment of {} to complete...'.format(service_name))
shakedown.deployment_wait(app_id=service_name, timeout=timeout)
sdk_metrics.wait_for_scheduler_counter_value(service_name, 'offers.processed', 1, timeout_seconds=timeout)
|
dcos-jenkins-service
|
positive
|
@pytest.mark.usefixtures('products')
def test_get_only_aggregate(get_product_flat):
<DeepExtract>
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
</DeepExtract>
assert data == [[3]]
|
@pytest.mark.usefixtures('products')
def test_get_only_aggregate(get_product_flat):
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
assert data == [[3]]
|
django-data-browser
|
positive
|
def make_gt(img, labels, sigma=10, one_mask_per_point=False):
""" Make the ground-truth for landmark.
img: the original color image
labels: label with the Gaussian center(s) [[x0, y0],[x1, y1],...]
sigma: sigma of the Gaussian.
one_mask_per_point: masks for each point in different channels?
"""
(h, w) = img.shape[:2]
if labels is None:
<DeepExtract>
x = np.arange(0, (h, w)[1], 1, float)
y = np.arange(0, (h, w)[0], 1, float)
y = y[:, np.newaxis]
if (h // 2, w // 2) is None:
x0 = y0 = (h, w)[0] // 2
else:
x0 = (h // 2, w // 2)[0]
y0 = (h // 2, w // 2)[1]
gt = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2).astype(d_type)
</DeepExtract>
else:
labels = np.array(labels)
if labels.ndim == 1:
labels = labels[np.newaxis]
if one_mask_per_point:
gt = np.zeros(shape=(h, w, labels.shape[0]))
for ii in range(labels.shape[0]):
<DeepExtract>
x = np.arange(0, (h, w)[1], 1, float)
y = np.arange(0, (h, w)[0], 1, float)
y = y[:, np.newaxis]
if labels[ii, :] is None:
x0 = y0 = (h, w)[0] // 2
else:
x0 = labels[ii, :][0]
y0 = labels[ii, :][1]
gt[:, :, ii] = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2).astype(d_type)
</DeepExtract>
else:
gt = np.zeros(shape=(h, w), dtype=np.float64)
for ii in range(labels.shape[0]):
gt = np.maximum(gt, make_gaussian((h, w), center=labels[ii, :], sigma=sigma))
gt = gt.astype(dtype=img.dtype)
return gt
|
def make_gt(img, labels, sigma=10, one_mask_per_point=False):
""" Make the ground-truth for landmark.
img: the original color image
labels: label with the Gaussian center(s) [[x0, y0],[x1, y1],...]
sigma: sigma of the Gaussian.
one_mask_per_point: masks for each point in different channels?
"""
(h, w) = img.shape[:2]
if labels is None:
x = np.arange(0, (h, w)[1], 1, float)
y = np.arange(0, (h, w)[0], 1, float)
y = y[:, np.newaxis]
if (h // 2, w // 2) is None:
x0 = y0 = (h, w)[0] // 2
else:
x0 = (h // 2, w // 2)[0]
y0 = (h // 2, w // 2)[1]
gt = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2).astype(d_type)
else:
labels = np.array(labels)
if labels.ndim == 1:
labels = labels[np.newaxis]
if one_mask_per_point:
gt = np.zeros(shape=(h, w, labels.shape[0]))
for ii in range(labels.shape[0]):
x = np.arange(0, (h, w)[1], 1, float)
y = np.arange(0, (h, w)[0], 1, float)
y = y[:, np.newaxis]
if labels[ii, :] is None:
x0 = y0 = (h, w)[0] // 2
else:
x0 = labels[ii, :][0]
y0 = labels[ii, :][1]
gt[:, :, ii] = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2).astype(d_type)
else:
gt = np.zeros(shape=(h, w), dtype=np.float64)
for ii in range(labels.shape[0]):
gt = np.maximum(gt, make_gaussian((h, w), center=labels[ii, :], sigma=sigma))
gt = gt.astype(dtype=img.dtype)
return gt
|
CvStudio
|
positive
|
@_check_for_oracledb
def read_sql_table(table: str, con: 'oracledb.Connection', schema: Optional[str]=None, index_col: Optional[Union[str, List[str]]]=None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]]=None, chunksize: Optional[int]=None, dtype: Optional[Dict[str, pa.DataType]]=None, safe: bool=True, timestamp_as_object: bool=False) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Return a DataFrame corresponding the table.
Parameters
----------
table : str
Table name.
con : oracledb.Connection
Use oracledb.connect() to use credentials directly or wr.oracle.connect() to fetch it from the Glue Catalog.
schema : str, optional
Name of SQL schema in database to query (if database flavor supports this).
Uses default schema if None (default).
index_col : Union[str, List[str]], optional
Column(s) to set as index(MultiIndex).
params : Union[List, Tuple, Dict], optional
List of parameters to pass to execute method.
The syntax used to pass parameters is database driver dependent.
Check your database driver documentation for which of the five syntax styles,
described in PEP 249’s paramstyle, is supported.
chunksize : int, optional
If specified, return an iterator where chunksize is the number of rows to include in each chunk.
dtype : Dict[str, pyarrow.DataType], optional
Specifying the datatype for columns.
The keys should be the column names and the values should be the PyArrow types.
safe : bool
Check for overflows or other unsafe data type conversions.
timestamp_as_object : bool
Cast non-nanosecond timestamps (np.datetime64) to objects.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
Reading from Oracle Database using a Glue Catalog Connections
>>> import awswrangler as wr
>>> con = wr.oracle.connect(connection="MY_GLUE_CONNECTION")
>>> df = wr.oracle.read_sql_table(
... table="my_table",
... schema="test",
... con=con
... )
>>> con.close()
"""
<DeepExtract>
schema_str = f'"{schema}".' if schema else ''
table_identifier = f'{schema_str}"{table}"'
table_identifier = table_identifier
</DeepExtract>
sql: str = f'SELECT * FROM {table_identifier}'
return read_sql_query(sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe, timestamp_as_object=timestamp_as_object)
|
@_check_for_oracledb
def read_sql_table(table: str, con: 'oracledb.Connection', schema: Optional[str]=None, index_col: Optional[Union[str, List[str]]]=None, params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]]=None, chunksize: Optional[int]=None, dtype: Optional[Dict[str, pa.DataType]]=None, safe: bool=True, timestamp_as_object: bool=False) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Return a DataFrame corresponding the table.
Parameters
----------
table : str
Table name.
con : oracledb.Connection
Use oracledb.connect() to use credentials directly or wr.oracle.connect() to fetch it from the Glue Catalog.
schema : str, optional
Name of SQL schema in database to query (if database flavor supports this).
Uses default schema if None (default).
index_col : Union[str, List[str]], optional
Column(s) to set as index(MultiIndex).
params : Union[List, Tuple, Dict], optional
List of parameters to pass to execute method.
The syntax used to pass parameters is database driver dependent.
Check your database driver documentation for which of the five syntax styles,
described in PEP 249’s paramstyle, is supported.
chunksize : int, optional
If specified, return an iterator where chunksize is the number of rows to include in each chunk.
dtype : Dict[str, pyarrow.DataType], optional
Specifying the datatype for columns.
The keys should be the column names and the values should be the PyArrow types.
safe : bool
Check for overflows or other unsafe data type conversions.
timestamp_as_object : bool
Cast non-nanosecond timestamps (np.datetime64) to objects.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
Reading from Oracle Database using a Glue Catalog Connections
>>> import awswrangler as wr
>>> con = wr.oracle.connect(connection="MY_GLUE_CONNECTION")
>>> df = wr.oracle.read_sql_table(
... table="my_table",
... schema="test",
... con=con
... )
>>> con.close()
"""
schema_str = f'"{schema}".' if schema else ''
table_identifier = f'{schema_str}"{table}"'
table_identifier = table_identifier
sql: str = f'SELECT * FROM {table_identifier}'
return read_sql_query(sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe, timestamp_as_object=timestamp_as_object)
|
aws-data-wrangler
|
positive
|
def _create_table(database: str, table: str, description: Optional[str], parameters: Optional[Dict[str, str]], mode: str, catalog_versioning: bool, boto3_session: Optional[boto3.Session], table_input: Dict[str, Any], table_type: Optional[str], table_exist: bool, projection_enabled: bool, partitions_types: Optional[Dict[str, str]], columns_comments: Optional[Dict[str, str]], transaction_id: Optional[str], projection_types: Optional[Dict[str, str]], projection_ranges: Optional[Dict[str, str]], projection_values: Optional[Dict[str, str]], projection_intervals: Optional[Dict[str, str]], projection_digits: Optional[Dict[str, str]], projection_formats: Optional[Dict[str, str]], projection_storage_location_template: Optional[str], catalog_id: Optional[str]) -> None:
<DeepExtract>
if description is not None:
if 'Description' not in table_input or table_input['Description'] != description:
table_input['Description'] = description
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
if 'Parameters' not in table_input:
table_input['Parameters'] = {}
parameters = parameters if parameters else {}
for (k, v) in parameters.items():
<DeepExtract>
if v is not None:
if k not in table_input['Parameters'] or table_input['Parameters'][k] != v:
table_input['Parameters'][k] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
if projection_enabled is True:
table_input['Parameters']['projection.enabled'] = 'true'
partitions_types = partitions_types if partitions_types else {}
projection_types = projection_types if projection_types else {}
projection_ranges = projection_ranges if projection_ranges else {}
projection_values = projection_values if projection_values else {}
projection_intervals = projection_intervals if projection_intervals else {}
projection_digits = projection_digits if projection_digits else {}
projection_formats = projection_formats if projection_formats else {}
projection_types = {sanitize_column_name(k): v for (k, v) in projection_types.items()}
projection_ranges = {sanitize_column_name(k): v for (k, v) in projection_ranges.items()}
projection_values = {sanitize_column_name(k): v for (k, v) in projection_values.items()}
projection_intervals = {sanitize_column_name(k): v for (k, v) in projection_intervals.items()}
projection_digits = {sanitize_column_name(k): v for (k, v) in projection_digits.items()}
projection_formats = {sanitize_column_name(k): v for (k, v) in projection_formats.items()}
for (k, v) in projection_types.items():
dtype: Optional[str] = partitions_types.get(k)
if dtype is None and projection_storage_location_template is None:
raise exceptions.InvalidArgumentCombination(f'Column {k} appears as projected column but not as partitioned column.')
if dtype == 'date':
table_input['Parameters'][f'projection.{k}.format'] = 'yyyy-MM-dd'
elif dtype == 'timestamp':
table_input['Parameters'][f'projection.{k}.format'] = 'yyyy-MM-dd HH:mm:ss'
table_input['Parameters'][f'projection.{k}.interval.unit'] = 'SECONDS'
table_input['Parameters'][f'projection.{k}.interval'] = '1'
for (k, v) in projection_types.items():
<DeepExtract>
if v is not None:
if f'projection.{k}.type' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.type'] != v:
table_input['Parameters'][f'projection.{k}.type'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for (k, v) in projection_ranges.items():
<DeepExtract>
if v is not None:
if f'projection.{k}.range' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.range'] != v:
table_input['Parameters'][f'projection.{k}.range'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for (k, v) in projection_values.items():
<DeepExtract>
if v is not None:
if f'projection.{k}.values' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.values'] != v:
table_input['Parameters'][f'projection.{k}.values'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for (k, v) in projection_intervals.items():
<DeepExtract>
if str(v) is not None:
if f'projection.{k}.interval' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.interval'] != str(v):
table_input['Parameters'][f'projection.{k}.interval'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for (k, v) in projection_digits.items():
<DeepExtract>
if str(v) is not None:
if f'projection.{k}.digits' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.digits'] != str(v):
table_input['Parameters'][f'projection.{k}.digits'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for (k, v) in projection_formats.items():
<DeepExtract>
if str(v) is not None:
if f'projection.{k}.format' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.format'] != str(v):
table_input['Parameters'][f'projection.{k}.format'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
<DeepExtract>
if projection_storage_location_template is not None:
if 'storage.location.template' not in table_input['Parameters'] or table_input['Parameters']['storage.location.template'] != projection_storage_location_template:
table_input['Parameters']['storage.location.template'] = projection_storage_location_template
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
else:
table_input['Parameters']['projection.enabled'] = 'false'
columns_comments = columns_comments if columns_comments else {}
columns_comments = {sanitize_column_name(k): v for (k, v) in columns_comments.items()}
if columns_comments:
for col in table_input['StorageDescriptor']['Columns']:
name: str = col['Name']
if name in columns_comments:
<DeepExtract>
if columns_comments[name] is not None:
if 'Comment' not in col or col['Comment'] != columns_comments[name]:
col['Comment'] = columns_comments[name]
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
for par in table_input['PartitionKeys']:
name = par['Name']
if name in columns_comments:
<DeepExtract>
if columns_comments[name] is not None:
if 'Comment' not in par or par['Comment'] != columns_comments[name]:
par['Comment'] = columns_comments[name]
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
</DeepExtract>
_logger.debug('table_input: %s', table_input)
client_glue: boto3.client = _utils.client(service_name='glue', session=boto3_session)
skip_archive: bool = not catalog_versioning
if mode not in ('overwrite', 'append', 'overwrite_partitions', 'update'):
raise exceptions.InvalidArgument(f"{mode} is not a valid mode. It must be 'overwrite', 'append' or 'overwrite_partitions'.")
args: Dict[str, Any] = _catalog_id(catalog_id=catalog_id, **_transaction_id(transaction_id=transaction_id, DatabaseName=database, TableInput=table_input))
if table_exist:
_logger.debug('Updating table (%s)...', mode)
args['SkipArchive'] = skip_archive
if mode == 'overwrite':
if table_type != 'GOVERNED':
delete_all_partitions(table=table, database=database, catalog_id=catalog_id, boto3_session=boto3_session)
client_glue.update_table(**args)
elif mode == 'update':
client_glue.update_table(**args)
else:
try:
_logger.debug('Creating table (%s)...', mode)
client_glue.create_table(**args)
except client_glue.exceptions.AlreadyExistsException:
if mode == 'overwrite':
_utils.try_it(f=_overwrite_table, ex=client_glue.exceptions.AlreadyExistsException, client_glue=client_glue, catalog_id=catalog_id, database=database, table=table, table_input=table_input, transaction_id=transaction_id, boto3_session=boto3_session)
_logger.debug('Leaving table as is (%s)...', mode)
|
def _create_table(database: str, table: str, description: Optional[str], parameters: Optional[Dict[str, str]], mode: str, catalog_versioning: bool, boto3_session: Optional[boto3.Session], table_input: Dict[str, Any], table_type: Optional[str], table_exist: bool, projection_enabled: bool, partitions_types: Optional[Dict[str, str]], columns_comments: Optional[Dict[str, str]], transaction_id: Optional[str], projection_types: Optional[Dict[str, str]], projection_ranges: Optional[Dict[str, str]], projection_values: Optional[Dict[str, str]], projection_intervals: Optional[Dict[str, str]], projection_digits: Optional[Dict[str, str]], projection_formats: Optional[Dict[str, str]], projection_storage_location_template: Optional[str], catalog_id: Optional[str]) -> None:
if description is not None:
if 'Description' not in table_input or table_input['Description'] != description:
table_input['Description'] = description
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
if 'Parameters' not in table_input:
table_input['Parameters'] = {}
parameters = parameters if parameters else {}
for (k, v) in parameters.items():
if v is not None:
if k not in table_input['Parameters'] or table_input['Parameters'][k] != v:
table_input['Parameters'][k] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
if projection_enabled is True:
table_input['Parameters']['projection.enabled'] = 'true'
partitions_types = partitions_types if partitions_types else {}
projection_types = projection_types if projection_types else {}
projection_ranges = projection_ranges if projection_ranges else {}
projection_values = projection_values if projection_values else {}
projection_intervals = projection_intervals if projection_intervals else {}
projection_digits = projection_digits if projection_digits else {}
projection_formats = projection_formats if projection_formats else {}
projection_types = {sanitize_column_name(k): v for (k, v) in projection_types.items()}
projection_ranges = {sanitize_column_name(k): v for (k, v) in projection_ranges.items()}
projection_values = {sanitize_column_name(k): v for (k, v) in projection_values.items()}
projection_intervals = {sanitize_column_name(k): v for (k, v) in projection_intervals.items()}
projection_digits = {sanitize_column_name(k): v for (k, v) in projection_digits.items()}
projection_formats = {sanitize_column_name(k): v for (k, v) in projection_formats.items()}
for (k, v) in projection_types.items():
dtype: Optional[str] = partitions_types.get(k)
if dtype is None and projection_storage_location_template is None:
raise exceptions.InvalidArgumentCombination(f'Column {k} appears as projected column but not as partitioned column.')
if dtype == 'date':
table_input['Parameters'][f'projection.{k}.format'] = 'yyyy-MM-dd'
elif dtype == 'timestamp':
table_input['Parameters'][f'projection.{k}.format'] = 'yyyy-MM-dd HH:mm:ss'
table_input['Parameters'][f'projection.{k}.interval.unit'] = 'SECONDS'
table_input['Parameters'][f'projection.{k}.interval'] = '1'
for (k, v) in projection_types.items():
if v is not None:
if f'projection.{k}.type' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.type'] != v:
table_input['Parameters'][f'projection.{k}.type'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for (k, v) in projection_ranges.items():
if v is not None:
if f'projection.{k}.range' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.range'] != v:
table_input['Parameters'][f'projection.{k}.range'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for (k, v) in projection_values.items():
if v is not None:
if f'projection.{k}.values' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.values'] != v:
table_input['Parameters'][f'projection.{k}.values'] = v
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for (k, v) in projection_intervals.items():
if str(v) is not None:
if f'projection.{k}.interval' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.interval'] != str(v):
table_input['Parameters'][f'projection.{k}.interval'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for (k, v) in projection_digits.items():
if str(v) is not None:
if f'projection.{k}.digits' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.digits'] != str(v):
table_input['Parameters'][f'projection.{k}.digits'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for (k, v) in projection_formats.items():
if str(v) is not None:
if f'projection.{k}.format' not in table_input['Parameters'] or table_input['Parameters'][f'projection.{k}.format'] != str(v):
table_input['Parameters'][f'projection.{k}.format'] = str(v)
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
if projection_storage_location_template is not None:
if 'storage.location.template' not in table_input['Parameters'] or table_input['Parameters']['storage.location.template'] != projection_storage_location_template:
table_input['Parameters']['storage.location.template'] = projection_storage_location_template
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
else:
table_input['Parameters']['projection.enabled'] = 'false'
columns_comments = columns_comments if columns_comments else {}
columns_comments = {sanitize_column_name(k): v for (k, v) in columns_comments.items()}
if columns_comments:
for col in table_input['StorageDescriptor']['Columns']:
name: str = col['Name']
if name in columns_comments:
if columns_comments[name] is not None:
if 'Comment' not in col or col['Comment'] != columns_comments[name]:
col['Comment'] = columns_comments[name]
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
for par in table_input['PartitionKeys']:
name = par['Name']
if name in columns_comments:
if columns_comments[name] is not None:
if 'Comment' not in par or par['Comment'] != columns_comments[name]:
par['Comment'] = columns_comments[name]
if mode in ('append', 'overwrite_partitions'):
mode = 'update'
mode = mode
_logger.debug('table_input: %s', table_input)
client_glue: boto3.client = _utils.client(service_name='glue', session=boto3_session)
skip_archive: bool = not catalog_versioning
if mode not in ('overwrite', 'append', 'overwrite_partitions', 'update'):
raise exceptions.InvalidArgument(f"{mode} is not a valid mode. It must be 'overwrite', 'append' or 'overwrite_partitions'.")
args: Dict[str, Any] = _catalog_id(catalog_id=catalog_id, **_transaction_id(transaction_id=transaction_id, DatabaseName=database, TableInput=table_input))
if table_exist:
_logger.debug('Updating table (%s)...', mode)
args['SkipArchive'] = skip_archive
if mode == 'overwrite':
if table_type != 'GOVERNED':
delete_all_partitions(table=table, database=database, catalog_id=catalog_id, boto3_session=boto3_session)
client_glue.update_table(**args)
elif mode == 'update':
client_glue.update_table(**args)
else:
try:
_logger.debug('Creating table (%s)...', mode)
client_glue.create_table(**args)
except client_glue.exceptions.AlreadyExistsException:
if mode == 'overwrite':
_utils.try_it(f=_overwrite_table, ex=client_glue.exceptions.AlreadyExistsException, client_glue=client_glue, catalog_id=catalog_id, database=database, table=table, table_input=table_input, transaction_id=transaction_id, boto3_session=boto3_session)
_logger.debug('Leaving table as is (%s)...', mode)
|
aws-data-wrangler
|
positive
|
def save(self, *args, **kwargs):
<DeepExtract>
if self.object_id and self.content_type:
return
container_exists = self.__class__.objects.filter(name=self.name, object_id=None, content_type=None).exists()
if container_exists:
raise ValidationError("a container with name '{0}' already exists".format(self.name))
</DeepExtract>
if not self.uuid:
self.uuid = unicode(uuid())
if not self.name:
self.name = '{}-{}'.format(self._meta.module_name, self.uuid)
return super(AbstractContainer, self).save(*args, **kwargs)
|
def save(self, *args, **kwargs):
if self.object_id and self.content_type:
return
container_exists = self.__class__.objects.filter(name=self.name, object_id=None, content_type=None).exists()
if container_exists:
raise ValidationError("a container with name '{0}' already exists".format(self.name))
if not self.uuid:
self.uuid = unicode(uuid())
if not self.name:
self.name = '{}-{}'.format(self._meta.module_name, self.uuid)
return super(AbstractContainer, self).save(*args, **kwargs)
|
django-fancypages
|
positive
|
def get_test_tree():
root = Node.add_root()
var_def = VariableDefinition(name='A')
root.add_child(content_object=var_def)
root = Node.objects.get(id=root.id)
assignment_node = root.add_child(content_object=Assignment())
var = Variable(definition=var_def)
var_node = assignment_node.add_child(content_object=var)
<DeepExtract>
add_operator = BinaryOperator(operator='+')
add_operator.save()
mul_operator = BinaryOperator(operator='*')
if assignment_node is None:
add_node = Node.add_root(content_object=add_operator)
else:
add_node = Node.objects.get(id=assignment_node.id).add_child(content_object=add_operator)
number_const1 = NumberConstant(value=1)
number_const_node1 = add_node.add_child(content_object=number_const1)
add_node = Node.objects.get(id=add_node.id)
number_const2 = NumberConstant(value=2)
number_const3 = NumberConstant(value=3)
mul_node = add_node.add_child(content_object=mul_operator)
mul_node = Node.objects.get(id=mul_node.id)
mul_node.add_child(content_object=number_const2)
mul_node = Node.objects.get(id=mul_node.id)
mul_node.add_child(content_object=number_const3)
return Node.objects.get(id=add_node.id)
</DeepExtract>
root = Node.objects.get(id=root.id)
return root
|
def get_test_tree():
root = Node.add_root()
var_def = VariableDefinition(name='A')
root.add_child(content_object=var_def)
root = Node.objects.get(id=root.id)
assignment_node = root.add_child(content_object=Assignment())
var = Variable(definition=var_def)
var_node = assignment_node.add_child(content_object=var)
add_operator = BinaryOperator(operator='+')
add_operator.save()
mul_operator = BinaryOperator(operator='*')
if assignment_node is None:
add_node = Node.add_root(content_object=add_operator)
else:
add_node = Node.objects.get(id=assignment_node.id).add_child(content_object=add_operator)
number_const1 = NumberConstant(value=1)
number_const_node1 = add_node.add_child(content_object=number_const1)
add_node = Node.objects.get(id=add_node.id)
number_const2 = NumberConstant(value=2)
number_const3 = NumberConstant(value=3)
mul_node = add_node.add_child(content_object=mul_operator)
mul_node = Node.objects.get(id=mul_node.id)
mul_node.add_child(content_object=number_const2)
mul_node = Node.objects.get(id=mul_node.id)
mul_node.add_child(content_object=number_const3)
return Node.objects.get(id=add_node.id)
root = Node.objects.get(id=root.id)
return root
|
django-business-logic
|
positive
|
def test_percent(ansi_io: BufferedIO) -> None:
bar = ProgressBar(ansi_io, 50, 0)
bar.start()
bar.display()
bar.advance()
bar.advance()
output = [' 0/50 [>---------------------------] 0%', ' 1/50 [>---------------------------] 2%', ' 2/50 [=>--------------------------] 4%']
<DeepExtract>
output = ''
for (i, line) in enumerate(output):
if i:
count = line.count('\n')
if count:
output += f'\x1b[{count}A\x1b[1G\x1b[2K'
else:
output += '\x1b[1G\x1b[2K'
output += line
output = output
</DeepExtract>
assert expected == ansi_io.fetch_error()
|
def test_percent(ansi_io: BufferedIO) -> None:
bar = ProgressBar(ansi_io, 50, 0)
bar.start()
bar.display()
bar.advance()
bar.advance()
output = [' 0/50 [>---------------------------] 0%', ' 1/50 [>---------------------------] 2%', ' 2/50 [=>--------------------------] 4%']
output = ''
for (i, line) in enumerate(output):
if i:
count = line.count('\n')
if count:
output += f'\x1b[{count}A\x1b[1G\x1b[2K'
else:
output += '\x1b[1G\x1b[2K'
output += line
output = output
assert expected == ansi_io.fetch_error()
|
cleo
|
positive
|
def add_fpn_retinanet_outputs(model, blobs_in, dim_in, spatial_scales):
"""RetinaNet head. For classification and box regression, we can chose to
have the same conv tower or a separate tower. "bl_feat_list" stores the list
of feature blobs for bbox prediction. These blobs can be shared cls feature
blobs if we share the tower or else are independent blobs.
"""
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
A = len(cfg.RETINANET.ASPECT_RATIOS) * cfg.RETINANET.SCALES_PER_OCTAVE
<DeepExtract>
prior_prob = cfg.RETINANET.PRIOR_PROB
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)
if cfg.RETINANET.SOFTMAX:
bias = np.zeros((model.num_classes, 1), dtype=np.float32)
bias[0] = np.log((model.num_classes - 1) * (1 - prior_prob) / prior_prob)
bias = np.vstack([bias for _ in range(scales_per_octave * aspect_ratios)])
bias_init = ('GivenTensorFill', {'values': bias.astype(dtype=np.float32)})
else:
bias_init = ('ConstantFill', {'value': -np.log((1 - prior_prob) / prior_prob)})
bias_init = bias_init
</DeepExtract>
assert len(blobs_in) == k_max - k_min + 1
bbox_feat_list = []
cls_pred_dim = model.num_classes if cfg.RETINANET.SOFTMAX else model.num_classes - 1
bbox_regr_dim = 4 * (model.num_classes - 1) if cfg.RETINANET.CLASS_SPECIFIC_BBOX else 4
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_cls_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_cls_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
if lvl == k_min:
retnet_cls_pred = model.Conv(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=bias_init)
else:
retnet_cls_pred = model.ConvShared(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight='retnet_cls_pred_fpn{}_w'.format(k_min), bias='retnet_cls_pred_fpn{}_b'.format(k_min))
if not model.train:
if cfg.RETINANET.SOFTMAX:
model.net.GroupSpatialSoftmax(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl), num_classes=cls_pred_dim)
else:
model.net.Sigmoid(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl))
if cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
bbox_feat_list.append(bl_feat)
if not cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_bbox_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_bbox_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
bbox_feat_list.append(bl_feat)
for (i, lvl) in enumerate(range(k_min, k_max + 1)):
bbox_pred = 'retnet_bbox_pred_fpn{}'.format(lvl)
bl_feat = bbox_feat_list[i]
if lvl == k_min:
model.Conv(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
model.ConvShared(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight='retnet_bbox_pred_fpn{}_w'.format(k_min), bias='retnet_bbox_pred_fpn{}_b'.format(k_min))
|
def add_fpn_retinanet_outputs(model, blobs_in, dim_in, spatial_scales):
"""RetinaNet head. For classification and box regression, we can chose to
have the same conv tower or a separate tower. "bl_feat_list" stores the list
of feature blobs for bbox prediction. These blobs can be shared cls feature
blobs if we share the tower or else are independent blobs.
"""
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
A = len(cfg.RETINANET.ASPECT_RATIOS) * cfg.RETINANET.SCALES_PER_OCTAVE
prior_prob = cfg.RETINANET.PRIOR_PROB
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = len(cfg.RETINANET.ASPECT_RATIOS)
if cfg.RETINANET.SOFTMAX:
bias = np.zeros((model.num_classes, 1), dtype=np.float32)
bias[0] = np.log((model.num_classes - 1) * (1 - prior_prob) / prior_prob)
bias = np.vstack([bias for _ in range(scales_per_octave * aspect_ratios)])
bias_init = ('GivenTensorFill', {'values': bias.astype(dtype=np.float32)})
else:
bias_init = ('ConstantFill', {'value': -np.log((1 - prior_prob) / prior_prob)})
bias_init = bias_init
assert len(blobs_in) == k_max - k_min + 1
bbox_feat_list = []
cls_pred_dim = model.num_classes if cfg.RETINANET.SOFTMAX else model.num_classes - 1
bbox_regr_dim = 4 * (model.num_classes - 1) if cfg.RETINANET.CLASS_SPECIFIC_BBOX else 4
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_cls_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_cls_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_cls_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
if lvl == k_min:
retnet_cls_pred = model.Conv(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=bias_init)
else:
retnet_cls_pred = model.ConvShared(bl_feat, 'retnet_cls_pred_fpn{}'.format(lvl), dim_in, cls_pred_dim * A, 3, pad=1, stride=1, weight='retnet_cls_pred_fpn{}_w'.format(k_min), bias='retnet_cls_pred_fpn{}_b'.format(k_min))
if not model.train:
if cfg.RETINANET.SOFTMAX:
model.net.GroupSpatialSoftmax(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl), num_classes=cls_pred_dim)
else:
model.net.Sigmoid(retnet_cls_pred, 'retnet_cls_prob_fpn{}'.format(lvl))
if cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
bbox_feat_list.append(bl_feat)
if not cfg.RETINANET.SHARE_CLS_BBOX_TOWER:
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl]
for nconv in range(cfg.RETINANET.NUM_CONVS):
suffix = 'n{}_fpn{}'.format(nconv, lvl)
(dim_in, dim_out) = (dim_in, dim_in)
if lvl == k_min:
bl_out = model.Conv(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
bl_out = model.ConvShared(bl_in, 'retnet_bbox_conv_' + suffix, dim_in, dim_out, 3, stride=1, pad=1, weight='retnet_bbox_conv_n{}_fpn{}_w'.format(nconv, k_min), bias='retnet_bbox_conv_n{}_fpn{}_b'.format(nconv, k_min))
bl_in = model.Relu(bl_out, bl_out)
bl_feat = bl_in
bbox_feat_list.append(bl_feat)
for (i, lvl) in enumerate(range(k_min, k_max + 1)):
bbox_pred = 'retnet_bbox_pred_fpn{}'.format(lvl)
bl_feat = bbox_feat_list[i]
if lvl == k_min:
model.Conv(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight_init=('GaussianFill', {'std': 0.01}), bias_init=('ConstantFill', {'value': 0.0}))
else:
model.ConvShared(bl_feat, bbox_pred, dim_in, bbox_regr_dim * A, 3, pad=1, stride=1, weight='retnet_bbox_pred_fpn{}_w'.format(k_min), bias='retnet_bbox_pred_fpn{}_b'.format(k_min))
|
CBNet
|
positive
|
def plot_data():
import pickle
pkl_name = 'comp_planning.pkl'
offline_data = pickle.load(open(pkl_name, 'rb'))
rapid_traj = Rapid_trajectory_generator()
rapid_traj.input_data = offline_data['in']
rapid_traj.output_data = offline_data['tr']
tf_out = offline_data['dl']
<DeepExtract>
LINE_SIZE = 8
SIZE_BIAS = 32
SIZE_DIFF = 6
SMALL_SIZE = SIZE_BIAS - SIZE_DIFF
MEDIUM_SIZE = SIZE_BIAS
BIGGER_SIZE = SIZE_BIAS + SIZE_DIFF
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=SMALL_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.figure('comparison')
plt.subplot(3, 1, 1)
alias_tr = 'Tr'
alias_dl = 'Dl'
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 0], 'r--', label='%s_pos_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 1], 'k--', label='%s_pos_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 2], 'b--', label='%s_pos_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 0], 'r-', label='%s_pos_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 1], 'k-', label='%s_pos_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 2], 'b-', label='%s_pos_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.subplot(3, 1, 2)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 3], 'r--', label='%s_vel_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 4], 'k--', label='%s_vel_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 5], 'b--', label='%s_vel_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 3], 'r-', label='%s_vel_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 4], 'k-', label='%s_vel_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 5], 'b-', label='%s_vel_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.subplot(3, 1, 3)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 6], 'r--', label='%s_acc_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 7], 'k--', label='%s_acc_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 8], 'b--', label='%s_acc_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 6], 'r-', label='%s_acc_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 7], 'k-', label='%s_acc_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 8], 'b-', label='%s_acc_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.pause(0.1)
</DeepExtract>
plt.show()
|
def plot_data():
import pickle
pkl_name = 'comp_planning.pkl'
offline_data = pickle.load(open(pkl_name, 'rb'))
rapid_traj = Rapid_trajectory_generator()
rapid_traj.input_data = offline_data['in']
rapid_traj.output_data = offline_data['tr']
tf_out = offline_data['dl']
LINE_SIZE = 8
SIZE_BIAS = 32
SIZE_DIFF = 6
SMALL_SIZE = SIZE_BIAS - SIZE_DIFF
MEDIUM_SIZE = SIZE_BIAS
BIGGER_SIZE = SIZE_BIAS + SIZE_DIFF
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=SMALL_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc('figure', titlesize=BIGGER_SIZE)
plt.figure('comparison')
plt.subplot(3, 1, 1)
alias_tr = 'Tr'
alias_dl = 'Dl'
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 0], 'r--', label='%s_pos_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 1], 'k--', label='%s_pos_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 2], 'b--', label='%s_pos_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 0], 'r-', label='%s_pos_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 1], 'k-', label='%s_pos_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 2], 'b-', label='%s_pos_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.subplot(3, 1, 2)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 3], 'r--', label='%s_vel_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 4], 'k--', label='%s_vel_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 5], 'b--', label='%s_vel_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 3], 'r-', label='%s_vel_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 4], 'k-', label='%s_vel_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 5], 'b-', label='%s_vel_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.subplot(3, 1, 3)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 6], 'r--', label='%s_acc_x' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 7], 'k--', label='%s_acc_y' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['tr'][:, 8], 'b--', label='%s_acc_z' % alias_tr, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 6], 'r-', label='%s_acc_x' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 7], 'k-', label='%s_acc_y' % alias_dl, linewidth=LINE_SIZE)
plt.plot(offline_data['in'][:, 0], offline_data['dl'][:, 8], 'b-', label='%s_acc_z' % alias_dl, linewidth=LINE_SIZE)
plt.grid()
plt.legend(loc='upper left')
plt.pause(0.1)
plt.show()
|
crossgap_il_rl
|
positive
|
def __call__(self, masks, boxes):
if isinstance(boxes, BoxList):
boxes = [boxes]
if isinstance(masks, list):
masks = torch.stack(masks, dim=0)
assert len(masks.size()) == 4
scores = boxes[0].get_field('scores')
<DeepExtract>
offset_x = boxes[0].bbox.cpu().numpy()[:, 0]
offset_y = boxes[0].bbox.cpu().numpy()[:, 1]
widths = boxes[0].bbox.cpu().numpy()[:, 2] - boxes[0].bbox.cpu().numpy()[:, 0]
heights = boxes[0].bbox.cpu().numpy()[:, 3] - boxes[0].bbox.cpu().numpy()[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
resol = self.cfg.MODEL.ROI_KE_HEAD.RESOLUTION
if masks.detach().cpu().numpy().shape[-2:] == (1, resol):
xory_mode = 0
elif masks.detach().cpu().numpy().shape[-2:] == (resol, 1):
xory_mode = 1
else:
assert 0, 'invalid mode.'
masks.detach().cpu().numpy() = np.transpose(masks.detach().cpu().numpy(), [0, 2, 3, 1])
min_size = 0
num_kes = int(self.cfg.MODEL.ROI_KE_HEAD.NUM_KES / 2) + 2
d_preds = np.zeros((len(boxes[0].bbox.cpu().numpy()), 2, num_kes), dtype=np.float32)
d_scores = np.zeros(scores.cpu().numpy().shape, dtype=np.float32)
assert len(boxes[0].bbox.cpu().numpy()) == masks.detach().cpu().numpy().shape[0], 'shape mismatch {}, {}, {}, {}'.format(str(len(boxes[0].bbox.cpu().numpy())), str(boxes[0].bbox.cpu().numpy().shape), str(masks.detach().cpu().numpy().shape[0]), str(masks.detach().cpu().numpy().shape))
normal = 0
innormal = 0
for i in range(len(boxes[0].bbox.cpu().numpy())):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
np.set_printoptions(suppress=True)
if not xory_mode:
roi_map = cv2.resize(masks.detach().cpu().numpy()[i], (roi_map_width, 1), interpolation=cv2.INTER_CUBIC)
else:
roi_map = cv2.resize(masks.detach().cpu().numpy()[i], (1, roi_map_height), interpolation=cv2.INTER_CUBIC)
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
map_vis = np.transpose(masks.detach().cpu().numpy()[i], [2, 0, 1])
map_vis = scores_to_probs(map_vis.copy())
sum_score = []
if self.cfg.MODEL.ROI_KE_HEAD.RESCORING:
for k in range(num_kes):
if map_vis[k].shape[0] == 1:
x = np.arange(0, len(map_vis[k][0]), 1)
y = map_vis[k][0]
else:
x = np.arange(0, len(map_vis[k][:, 0]), 1)
y = map_vis[k][:, 0]
top = y.max()
atop = y.argmax()
lf2 = max(atop - 2, 0)
lf1 = max(atop - 1, 0)
rt2 = min(atop + 2, 55)
rt1 = min(atop + 1, 55)
sum_score.append(top + y[lf2] + y[lf1] + y[rt1] + y[rt2])
kes_score_mean = sum(sum_score) * 1.0 / len(sum_score)
gama = self.cfg.MODEL.ROI_KE_HEAD.RESCORING_GAMA
final_score = (scores.cpu().numpy()[i] * (2.0 - gama) + gama * kes_score_mean) * 0.5
d_scores[i] = final_score
else:
d_scores[i] = scores.cpu().numpy()[i]
w = roi_map.shape[2]
for k in range(num_kes):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert roi_map_probs[k, y_int, x_int] == roi_map_probs[k, :, :].max()
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
if not xory_mode:
d_preds[i, 0, k] = x + offset_x[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
else:
d_preds[i, 0, k] = y + offset_y[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
out_kes_d = kes_decode(d_preds)
(result, rescores) = (np.transpose(out_kes_d, [0, 2, 1]), d_scores)
</DeepExtract>
return (torch.from_numpy(result).to(masks.device), torch.from_numpy(rescores).to(masks.device))
|
def __call__(self, masks, boxes):
if isinstance(boxes, BoxList):
boxes = [boxes]
if isinstance(masks, list):
masks = torch.stack(masks, dim=0)
assert len(masks.size()) == 4
scores = boxes[0].get_field('scores')
offset_x = boxes[0].bbox.cpu().numpy()[:, 0]
offset_y = boxes[0].bbox.cpu().numpy()[:, 1]
widths = boxes[0].bbox.cpu().numpy()[:, 2] - boxes[0].bbox.cpu().numpy()[:, 0]
heights = boxes[0].bbox.cpu().numpy()[:, 3] - boxes[0].bbox.cpu().numpy()[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
resol = self.cfg.MODEL.ROI_KE_HEAD.RESOLUTION
if masks.detach().cpu().numpy().shape[-2:] == (1, resol):
xory_mode = 0
elif masks.detach().cpu().numpy().shape[-2:] == (resol, 1):
xory_mode = 1
else:
assert 0, 'invalid mode.'
masks.detach().cpu().numpy() = np.transpose(masks.detach().cpu().numpy(), [0, 2, 3, 1])
min_size = 0
num_kes = int(self.cfg.MODEL.ROI_KE_HEAD.NUM_KES / 2) + 2
d_preds = np.zeros((len(boxes[0].bbox.cpu().numpy()), 2, num_kes), dtype=np.float32)
d_scores = np.zeros(scores.cpu().numpy().shape, dtype=np.float32)
assert len(boxes[0].bbox.cpu().numpy()) == masks.detach().cpu().numpy().shape[0], 'shape mismatch {}, {}, {}, {}'.format(str(len(boxes[0].bbox.cpu().numpy())), str(boxes[0].bbox.cpu().numpy().shape), str(masks.detach().cpu().numpy().shape[0]), str(masks.detach().cpu().numpy().shape))
normal = 0
innormal = 0
for i in range(len(boxes[0].bbox.cpu().numpy())):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
np.set_printoptions(suppress=True)
if not xory_mode:
roi_map = cv2.resize(masks.detach().cpu().numpy()[i], (roi_map_width, 1), interpolation=cv2.INTER_CUBIC)
else:
roi_map = cv2.resize(masks.detach().cpu().numpy()[i], (1, roi_map_height), interpolation=cv2.INTER_CUBIC)
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
map_vis = np.transpose(masks.detach().cpu().numpy()[i], [2, 0, 1])
map_vis = scores_to_probs(map_vis.copy())
sum_score = []
if self.cfg.MODEL.ROI_KE_HEAD.RESCORING:
for k in range(num_kes):
if map_vis[k].shape[0] == 1:
x = np.arange(0, len(map_vis[k][0]), 1)
y = map_vis[k][0]
else:
x = np.arange(0, len(map_vis[k][:, 0]), 1)
y = map_vis[k][:, 0]
top = y.max()
atop = y.argmax()
lf2 = max(atop - 2, 0)
lf1 = max(atop - 1, 0)
rt2 = min(atop + 2, 55)
rt1 = min(atop + 1, 55)
sum_score.append(top + y[lf2] + y[lf1] + y[rt1] + y[rt2])
kes_score_mean = sum(sum_score) * 1.0 / len(sum_score)
gama = self.cfg.MODEL.ROI_KE_HEAD.RESCORING_GAMA
final_score = (scores.cpu().numpy()[i] * (2.0 - gama) + gama * kes_score_mean) * 0.5
d_scores[i] = final_score
else:
d_scores[i] = scores.cpu().numpy()[i]
w = roi_map.shape[2]
for k in range(num_kes):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert roi_map_probs[k, y_int, x_int] == roi_map_probs[k, :, :].max()
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
if not xory_mode:
d_preds[i, 0, k] = x + offset_x[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
else:
d_preds[i, 0, k] = y + offset_y[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
out_kes_d = kes_decode(d_preds)
(result, rescores) = (np.transpose(out_kes_d, [0, 2, 1]), d_scores)
return (torch.from_numpy(result).to(masks.device), torch.from_numpy(rescores).to(masks.device))
|
Box_Discretization_Network
|
positive
|
def generate_treatment_col(self, X_parents, link_type, snr, prob_category, method='logistic', var_name=None):
"""
Generates a single treatment variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
method (str): A type of method to generate the treatment signal and the corresponding propensities.
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.Series): 3-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment to each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if prob_category is None (treatment must be categorical)
ValueError: If prob_category is not a legitimate probability vector (non negative, sums to 1)
"""
if prob_category is None:
raise ValueError('Treatment variable must be categorical, therefore it must have a legitimate distribution over its possible values. Got None instead.')
CausalSimulator3._check_for_legitimate_probabilities(prob_category)
<DeepExtract>
if X_parents.empty:
X_new = pd.Series(np.random.normal(loc=0.0, scale=1.0, size=X_parents.index.size), index=X_parents.index)
beta = pd.Series(dtype=np.float64)
else:
linking_method = self.G_LINKING_METHODS.get(link_type)
if linking_method is None:
raise KeyError('link type must be one of {},got {} instead.'.format(list(self.G_LINKING_METHODS.keys()), link_type))
beta = self.linking_coefs.get(var_name)
(X_new, beta) = linking_method(X_parents, beta=beta)
(X_noised_cont, _, _) = self._noise_col(X_new, snr=snr)
X_final = self._discretize_col(X_noised_cont, None)
(x_continuous, beta) = (X_final, beta)
</DeepExtract>
generation_method = self.TREATMENT_METHODS.get(method)
if generation_method is None:
raise KeyError('The given method {method} is not supported, only {valid_methods}.'.format(valid_methods=list(self.TREATMENT_METHODS.keys()), method=method))
else:
params = self.params.get(var_name, {})
(propensity, treatment) = generation_method(x_continuous, prob_category, snr=snr, params=params)
return (treatment.astype(int), propensity.astype(float), beta)
|
def generate_treatment_col(self, X_parents, link_type, snr, prob_category, method='logistic', var_name=None):
"""
Generates a single treatment variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
method (str): A type of method to generate the treatment signal and the corresponding propensities.
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.Series): 3-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment to each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if prob_category is None (treatment must be categorical)
ValueError: If prob_category is not a legitimate probability vector (non negative, sums to 1)
"""
if prob_category is None:
raise ValueError('Treatment variable must be categorical, therefore it must have a legitimate distribution over its possible values. Got None instead.')
CausalSimulator3._check_for_legitimate_probabilities(prob_category)
if X_parents.empty:
X_new = pd.Series(np.random.normal(loc=0.0, scale=1.0, size=X_parents.index.size), index=X_parents.index)
beta = pd.Series(dtype=np.float64)
else:
linking_method = self.G_LINKING_METHODS.get(link_type)
if linking_method is None:
raise KeyError('link type must be one of {},got {} instead.'.format(list(self.G_LINKING_METHODS.keys()), link_type))
beta = self.linking_coefs.get(var_name)
(X_new, beta) = linking_method(X_parents, beta=beta)
(X_noised_cont, _, _) = self._noise_col(X_new, snr=snr)
X_final = self._discretize_col(X_noised_cont, None)
(x_continuous, beta) = (X_final, beta)
generation_method = self.TREATMENT_METHODS.get(method)
if generation_method is None:
raise KeyError('The given method {method} is not supported, only {valid_methods}.'.format(valid_methods=list(self.TREATMENT_METHODS.keys()), method=method))
else:
params = self.params.get(var_name, {})
(propensity, treatment) = generation_method(x_continuous, prob_category, snr=snr, params=params)
return (treatment.astype(int), propensity.astype(float), beta)
|
causallib
|
positive
|
def do_lazy_bind(self, blob):
"""
Performs lazy binding
"""
if blob is None:
return
log.debug('Binding lazy symbols')
s = BindingState(self.binary.arch.bits == 64)
s.index = 0
s.bind_handler = default_binding_handler
end = len(blob)
while s.index < end:
s.binding_type = 1
s.address = 0
s.sym_name = ''
s.sym_flags = 0
s.lib_ord = 0
s.done = False
s.addend = 0
s.segment_index = 0
s.seg_end_address = 0
<DeepExtract>
s = s
seg = self.binary.segments[s.segment_index]
s.seg_end_address = seg.vaddr + seg.memsize
end = len(blob)
while not s.done and s.index < end:
log.debug('Current address: %#x, blob index (offset): %#x', s.address, s.index)
raw_opcode = blob[s.index]
opcode = raw_opcode & OPCODE_MASK
immediate = raw_opcode & IMM_MASK
s.index += 1
try:
h = {0: n_opcode_done, 16: n_opcode_set_dylib_ordinal_imm, 32: n_opcode_set_dylib_ordinal_uleb, 48: n_opcode_set_dylib_special_imm, 64: n_opcode_set_trailing_flags_imm, 80: n_opcode_set_type_imm, 112: l_opcode_set_segment_and_offset_uleb, 144: l_opcode_do_bind}[opcode]
s = h(s, self.binary, immediate, blob)
except KeyError:
log.error('Invalid opcode for current binding: %#x', opcode)
return s
</DeepExtract>
log.debug('Done binding lazy symbols')
|
def do_lazy_bind(self, blob):
"""
Performs lazy binding
"""
if blob is None:
return
log.debug('Binding lazy symbols')
s = BindingState(self.binary.arch.bits == 64)
s.index = 0
s.bind_handler = default_binding_handler
end = len(blob)
while s.index < end:
s.binding_type = 1
s.address = 0
s.sym_name = ''
s.sym_flags = 0
s.lib_ord = 0
s.done = False
s.addend = 0
s.segment_index = 0
s.seg_end_address = 0
s = s
seg = self.binary.segments[s.segment_index]
s.seg_end_address = seg.vaddr + seg.memsize
end = len(blob)
while not s.done and s.index < end:
log.debug('Current address: %#x, blob index (offset): %#x', s.address, s.index)
raw_opcode = blob[s.index]
opcode = raw_opcode & OPCODE_MASK
immediate = raw_opcode & IMM_MASK
s.index += 1
try:
h = {0: n_opcode_done, 16: n_opcode_set_dylib_ordinal_imm, 32: n_opcode_set_dylib_ordinal_uleb, 48: n_opcode_set_dylib_special_imm, 64: n_opcode_set_trailing_flags_imm, 80: n_opcode_set_type_imm, 112: l_opcode_set_segment_and_offset_uleb, 144: l_opcode_do_bind}[opcode]
s = h(s, self.binary, immediate, blob)
except KeyError:
log.error('Invalid opcode for current binding: %#x', opcode)
return s
log.debug('Done binding lazy symbols')
|
cle
|
positive
|
def __init__(self):
<DeepExtract>
data = sys.stdin.read().strip().split('\n')
n = int(data[0])
j = int(data[1])
distMatrix = [[0] * n for _ in range(n)]
for i in range(n):
d = data[i + 2].split()
for k in range(n):
distMatrix[i][k] = int(d[k])
(n, j, distMatrix) = (n, j, distMatrix)
</DeepExtract>
<DeepExtract>
limbLength = float('inf')
if j > 0:
i = j - 1
else:
i = j + 1
for k in range(n):
if i != k and k != j:
currLength = (distMatrix[i][j] + distMatrix[j][k] - distMatrix[i][k]) // 2
if currLength < limbLength:
limbLength = currLength
limbLength = limbLength
</DeepExtract>
print(limbLength)
|
def __init__(self):
data = sys.stdin.read().strip().split('\n')
n = int(data[0])
j = int(data[1])
distMatrix = [[0] * n for _ in range(n)]
for i in range(n):
d = data[i + 2].split()
for k in range(n):
distMatrix[i][k] = int(d[k])
(n, j, distMatrix) = (n, j, distMatrix)
limbLength = float('inf')
if j > 0:
i = j - 1
else:
i = j + 1
for k in range(n):
if i != k and k != j:
currLength = (distMatrix[i][j] + distMatrix[j][k] - distMatrix[i][k]) // 2
if currLength < limbLength:
limbLength = currLength
limbLength = limbLength
print(limbLength)
|
Coursera-Bioinformatics
|
positive
|
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None, focal_loss=False, biases=True):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
data_format: 'NHWC' or 'NCHW'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
assert data_format == 'NHWC' or data_format == 'NCHW'
if data_format == 'NHWC':
num_in_channels = inputs.get_shape()[-1].value
elif data_format == 'NCHW':
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels]
<DeepExtract>
if focal_loss:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
elif use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu('weights', kernel_shape, initializer)
if weight_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
kernel = var
</DeepExtract>
(stride_h, stride_w) = stride
outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format)
if biases:
if focal_loss:
biases_initializer = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
else:
biases_initializer = tf.constant_initializer(0.0)
<DeepExtract>
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable('biases', [num_output_channels], initializer=biases_initializer, dtype=dtype)
biases = var
</DeepExtract>
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn and cfg.MODEL.NETWORK.USE_GN:
<DeepExtract>
dim_size = len(outputs.get_shape().as_list())
if dim_size == 3:
reduction_axes = (-2,)
elif dim_size == 4:
reduction_axes = (-3, -2)
else:
raise Exception('Non Implementation Fault!!!')
outputs = group_norm(outputs, groups=G, channels_axis=-1, reduction_axes=reduction_axes, epsilon=eps, scope='gn')
</DeepExtract>
elif bn:
<DeepExtract>
if cfg.MODEL.NETWORK.SYNC_BN:
outputs = sync_batch_norm(outputs, decay=bn_decay, is_training=is_training, scope='bn', num_dev=7, red_axises=[0, 1, 2])
outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1, 2], bn_decay, data_format)
</DeepExtract>
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None, focal_loss=False, biases=True):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
data_format: 'NHWC' or 'NCHW'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
assert data_format == 'NHWC' or data_format == 'NCHW'
if data_format == 'NHWC':
num_in_channels = inputs.get_shape()[-1].value
elif data_format == 'NCHW':
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels]
if focal_loss:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
elif use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
else:
initializer = tf.truncated_normal_initializer(stddev=stddev)
var = _variable_on_cpu('weights', kernel_shape, initializer)
if weight_decay is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), weight_decay, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
kernel = var
(stride_h, stride_w) = stride
outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format)
if biases:
if focal_loss:
biases_initializer = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
else:
biases_initializer = tf.constant_initializer(0.0)
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable('biases', [num_output_channels], initializer=biases_initializer, dtype=dtype)
biases = var
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn and cfg.MODEL.NETWORK.USE_GN:
dim_size = len(outputs.get_shape().as_list())
if dim_size == 3:
reduction_axes = (-2,)
elif dim_size == 4:
reduction_axes = (-3, -2)
else:
raise Exception('Non Implementation Fault!!!')
outputs = group_norm(outputs, groups=G, channels_axis=-1, reduction_axes=reduction_axes, epsilon=eps, scope='gn')
elif bn:
if cfg.MODEL.NETWORK.SYNC_BN:
outputs = sync_batch_norm(outputs, decay=bn_decay, is_training=is_training, scope='bn', num_dev=7, red_axises=[0, 1, 2])
outputs = batch_norm_template(outputs, is_training, 'bn', [0, 1, 2], bn_decay, data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
|
3DSSD
|
positive
|
@property
def output_shape(self):
if self._output_shape is None:
if K._BACKEND == 'tensorflow':
<DeepExtract>
x = self.get_input(train)
</DeepExtract>
return K.int_shape(x)
return self.input_shape
elif type(self._output_shape) in {tuple, list}:
nb_samples = self.input_shape[0] if self.input_shape else None
return (nb_samples,) + tuple(self._output_shape)
else:
shape = self._output_shape(self.input_shape)
if type(shape) not in {list, tuple}:
raise Exception('output_shape function must return a tuple')
return tuple(shape)
|
@property
def output_shape(self):
if self._output_shape is None:
if K._BACKEND == 'tensorflow':
x = self.get_input(train)
return K.int_shape(x)
return self.input_shape
elif type(self._output_shape) in {tuple, list}:
nb_samples = self.input_shape[0] if self.input_shape else None
return (nb_samples,) + tuple(self._output_shape)
else:
shape = self._output_shape(self.input_shape)
if type(shape) not in {list, tuple}:
raise Exception('output_shape function must return a tuple')
return tuple(shape)
|
encoder_decoder
|
positive
|
@validator
def executable_file(v):
<DeepExtract>
try:
f = os.path.expanduser(v)
except Exception as err:
raise ValueError('invalid file name `{0}`: {1}'.format(v, err))
</DeepExtract>
if os.access(f, os.R_OK | os.X_OK):
return f
else:
raise ValueError('cannot execute file `{v}`'.format(v=v))
|
@validator
def executable_file(v):
try:
f = os.path.expanduser(v)
except Exception as err:
raise ValueError('invalid file name `{0}`: {1}'.format(v, err))
if os.access(f, os.R_OK | os.X_OK):
return f
else:
raise ValueError('cannot execute file `{v}`'.format(v=v))
|
elasticluster
|
positive
|
def test_403_if_not_task(self):
@task_only
def view(request):
return HttpResponse('Hello')
request = self.factory.get('/')
<DeepExtract>
response = HttpResponse('Hello')
</DeepExtract>
self.assertEqual(response.status_code, 403)
|
def test_403_if_not_task(self):
@task_only
def view(request):
return HttpResponse('Hello')
request = self.factory.get('/')
response = HttpResponse('Hello')
self.assertEqual(response.status_code, 403)
|
djangae
|
positive
|
def test_exec_():
def f():
l = []
six.exec_('l.append(1)')
assert l == [1]
<DeepExtract>
pass
</DeepExtract>
ns = {}
six.exec_('x = 42', ns)
assert ns['x'] == 42
glob = {}
loc = {}
six.exec_('global y; y = 42; x = 12', glob, loc)
assert glob['y'] == 42
assert 'x' not in glob
assert loc['x'] == 12
assert 'y' not in loc
|
def test_exec_():
def f():
l = []
six.exec_('l.append(1)')
assert l == [1]
pass
ns = {}
six.exec_('x = 42', ns)
assert ns['x'] == 42
glob = {}
loc = {}
six.exec_('global y; y = 42; x = 12', glob, loc)
assert glob['y'] == 42
assert 'x' not in glob
assert loc['x'] == 12
assert 'y' not in loc
|
c4ddev
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.