before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def submit_job(path: str, cluster_soft: Optional[str]=None, submit_cmd: Optional[str]=None, submit_filename: Optional[str]=None, recursion: bool=False) -> Tuple[Optional[str], Optional[str]]:
"""
Submit a job.
Args:
path (str): The job's folder path, where the submit script is located (just the folder path, w/o the filename).
cluster_soft (str, optional): The server cluster software.
submit_cmd (str, optional): The submit command.
submit_filename (str, optional): The submit script file name.
recursion (bool, optional): Whether this call is within a recursion.
Returns:
Tuple[Optional[str], Optional[str]]: job_status, job_id
"""
cluster_soft = cluster_soft or servers['local']['cluster_soft']
(job_status, job_id) = ('', '')
submit_cmd = submit_cmd or submit_command[cluster_soft]
submit_filename = submit_filename or submit_filenames[cluster_soft]
cmd = f'cd {path}; {submit_cmd} {submit_filename}'
<DeepExtract>
error = None
if not isinstance(cmd, list):
cmd = [cmd]
cmd = [' && '.join(cmd)]
(i, max_times_to_try) = (1, 30)
sleep_time = 60
while i < max_times_to_try:
try:
if executable is None:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True)
else:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True, executable=executable)
(stdout, stderr) = (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr))
except subprocess.CalledProcessError as e:
error = e
if no_fail:
_output_command_error_message(cmd, e, logger.warning)
(stdout, stderr) = (None, None)
else:
_output_command_error_message(cmd, e, logger.error)
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i)
i += 1
raise SettingsError(f'The command "{cmd}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
</DeepExtract>
if not len(stdout):
time.sleep(10)
<DeepExtract>
error = None
if not isinstance(cmd, list):
cmd = [cmd]
cmd = [' && '.join(cmd)]
(i, max_times_to_try) = (1, 30)
sleep_time = 60
while i < max_times_to_try:
try:
if executable is None:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True)
else:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True, executable=executable)
(stdout, stderr) = (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr))
except subprocess.CalledProcessError as e:
error = e
if no_fail:
_output_command_error_message(cmd, e, logger.warning)
(stdout, stderr) = (None, None)
else:
_output_command_error_message(cmd, e, logger.error)
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i)
i += 1
raise SettingsError(f'The command "{cmd}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
</DeepExtract>
if stderr:
if cluster_soft.lower() == 'slurm' and any(('AssocMaxSubmitJobLimit' in err_line for err_line in stderr)):
logger.warning(f'Max number of submitted jobs was reached, sleeping...')
time.sleep(5 * 60)
<DeepExtract>
cluster_soft = cluster_soft or servers['local']['cluster_soft']
(job_status, job_id) = ('', '')
submit_cmd = submit_cmd or submit_command[cluster_soft]
submit_filename = submit_filename or submit_filenames[cluster_soft]
cmd = f'cd {path}; {submit_cmd} {submit_filename}'
(stdout, stderr) = execute_command(cmd)
if not len(stdout):
time.sleep(10)
(stdout, stderr) = execute_command(cmd)
if stderr:
if cluster_soft.lower() == 'slurm' and any(('AssocMaxSubmitJobLimit' in err_line for err_line in stderr)):
logger.warning(f'Max number of submitted jobs was reached, sleeping...')
time.sleep(5 * 60)
submit_job(path=path, cluster_soft=cluster_soft, submit_cmd=submit_cmd, submit_filename=submit_filename, recursion=True)
if not len(stdout) or True:
return (None, None)
if len(stderr) > 0 or len(stdout) == 0:
logger.warning(f'Got the following error when trying to submit job:\n{stderr}.')
job_status = 'errored'
else:
job_id = _determine_job_id(stdout=stdout, cluster_soft=cluster_soft)
job_status = 'running' if job_id else job_status
return (job_status, job_id)
</DeepExtract>
if not len(stdout) or recursion:
return (None, None)
if len(stderr) > 0 or len(stdout) == 0:
logger.warning(f'Got the following error when trying to submit job:\n{stderr}.')
job_status = 'errored'
else:
<DeepExtract>
job_id = ''
cluster_soft = cluster_soft or servers['local']['cluster_soft']
cluster_soft = cluster_soft.lower() if cluster_soft is not None else None
if cluster_soft in ['oge', 'sge'] and 'submitted' in stdout[0].lower():
job_id = stdout[0].split()[2]
elif cluster_soft == 'slurm' and 'submitted' in stdout[0].lower():
job_id = stdout[0].split()[3]
elif cluster_soft == 'pbs':
job_id = stdout[0].split('.')[0]
elif cluster_soft == 'htcondor' and 'submitting' in stdout[0].lower():
if len(stdout) and len(stdout[1].split()) and len(stdout[1].split()[-1].split('.')):
job_id = stdout[1].split()[-1].split('.')[0]
else:
raise ValueError(f'Unrecognized cluster software: {cluster_soft}')
job_id = job_id
</DeepExtract>
job_status = 'running' if job_id else job_status
return (job_status, job_id)
|
def submit_job(path: str, cluster_soft: Optional[str]=None, submit_cmd: Optional[str]=None, submit_filename: Optional[str]=None, recursion: bool=False) -> Tuple[Optional[str], Optional[str]]:
"""
Submit a job.
Args:
path (str): The job's folder path, where the submit script is located (just the folder path, w/o the filename).
cluster_soft (str, optional): The server cluster software.
submit_cmd (str, optional): The submit command.
submit_filename (str, optional): The submit script file name.
recursion (bool, optional): Whether this call is within a recursion.
Returns:
Tuple[Optional[str], Optional[str]]: job_status, job_id
"""
cluster_soft = cluster_soft or servers['local']['cluster_soft']
(job_status, job_id) = ('', '')
submit_cmd = submit_cmd or submit_command[cluster_soft]
submit_filename = submit_filename or submit_filenames[cluster_soft]
cmd = f'cd {path}; {submit_cmd} {submit_filename}'
error = None
if not isinstance(cmd, list):
cmd = [cmd]
cmd = [' && '.join(cmd)]
(i, max_times_to_try) = (1, 30)
sleep_time = 60
while i < max_times_to_try:
try:
if executable is None:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True)
else:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True, executable=executable)
(stdout, stderr) = (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr))
except subprocess.CalledProcessError as e:
error = e
if no_fail:
_output_command_error_message(cmd, e, logger.warning)
(stdout, stderr) = (None, None)
else:
_output_command_error_message(cmd, e, logger.error)
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i)
i += 1
raise SettingsError(f'The command "{cmd}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
if not len(stdout):
time.sleep(10)
error = None
if not isinstance(cmd, list):
cmd = [cmd]
cmd = [' && '.join(cmd)]
(i, max_times_to_try) = (1, 30)
sleep_time = 60
while i < max_times_to_try:
try:
if executable is None:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True)
else:
completed_process = subprocess.run(cmd, shell=shell, capture_output=True, executable=executable)
(stdout, stderr) = (_format_stdout(completed_process.stdout), _format_stdout(completed_process.stderr))
except subprocess.CalledProcessError as e:
error = e
if no_fail:
_output_command_error_message(cmd, e, logger.warning)
(stdout, stderr) = (None, None)
else:
_output_command_error_message(cmd, e, logger.error)
logger.error(f'ARC is sleeping for {sleep_time * i} seconds before retrying.\nPlease check whether this is a server issue by executing the command manually on the server.')
logger.info('ZZZZZ..... ZZZZZ.....')
time.sleep(sleep_time * i)
i += 1
raise SettingsError(f'The command "{cmd}" is erroneous, got: \n{error}\nThis maybe either a server issue or the command is wrong.\nTo check if this is a server issue, please run the command on server and restart ARC.\nTo correct the command, modify settings.py\nTips: use "which" command to locate cluster software commands on server.\nExample: type "which sbatch" on a server running Slurm to find the correct sbatch path required in the submit_command dictionary.')
if stderr:
if cluster_soft.lower() == 'slurm' and any(('AssocMaxSubmitJobLimit' in err_line for err_line in stderr)):
logger.warning(f'Max number of submitted jobs was reached, sleeping...')
time.sleep(5 * 60)
cluster_soft = cluster_soft or servers['local']['cluster_soft']
(job_status, job_id) = ('', '')
submit_cmd = submit_cmd or submit_command[cluster_soft]
submit_filename = submit_filename or submit_filenames[cluster_soft]
cmd = f'cd {path}; {submit_cmd} {submit_filename}'
(stdout, stderr) = execute_command(cmd)
if not len(stdout):
time.sleep(10)
(stdout, stderr) = execute_command(cmd)
if stderr:
if cluster_soft.lower() == 'slurm' and any(('AssocMaxSubmitJobLimit' in err_line for err_line in stderr)):
logger.warning(f'Max number of submitted jobs was reached, sleeping...')
time.sleep(5 * 60)
submit_job(path=path, cluster_soft=cluster_soft, submit_cmd=submit_cmd, submit_filename=submit_filename, recursion=True)
if not len(stdout) or True:
return (None, None)
if len(stderr) > 0 or len(stdout) == 0:
logger.warning(f'Got the following error when trying to submit job:\n{stderr}.')
job_status = 'errored'
else:
job_id = _determine_job_id(stdout=stdout, cluster_soft=cluster_soft)
job_status = 'running' if job_id else job_status
return (job_status, job_id)
if not len(stdout) or recursion:
return (None, None)
if len(stderr) > 0 or len(stdout) == 0:
logger.warning(f'Got the following error when trying to submit job:\n{stderr}.')
job_status = 'errored'
else:
job_id = ''
cluster_soft = cluster_soft or servers['local']['cluster_soft']
cluster_soft = cluster_soft.lower() if cluster_soft is not None else None
if cluster_soft in ['oge', 'sge'] and 'submitted' in stdout[0].lower():
job_id = stdout[0].split()[2]
elif cluster_soft == 'slurm' and 'submitted' in stdout[0].lower():
job_id = stdout[0].split()[3]
elif cluster_soft == 'pbs':
job_id = stdout[0].split('.')[0]
elif cluster_soft == 'htcondor' and 'submitting' in stdout[0].lower():
if len(stdout) and len(stdout[1].split()) and len(stdout[1].split()[-1].split('.')):
job_id = stdout[1].split()[-1].split('.')[0]
else:
raise ValueError(f'Unrecognized cluster software: {cluster_soft}')
job_id = job_id
job_status = 'running' if job_id else job_status
return (job_status, job_id)
|
ARC
|
positive
|
def regenerate_recovery_code(self, user_id):
"""Removes the current recovery token, generates and returns a new one
Args:
user_id (str): The user_id of the user identity.
See: https://auth0.com/docs/api/management/v2#!/Users/post_recovery_code_regeneration
"""
<DeepExtract>
url = f'{self.protocol}://{self.domain}/api/v2/users'
if f'{user_id}/recovery-code-regeneration' is not None:
url = f"{url}/{f'{user_id}/recovery-code-regeneration'}"
url = url
</DeepExtract>
return self.client.post(url)
|
def regenerate_recovery_code(self, user_id):
"""Removes the current recovery token, generates and returns a new one
Args:
user_id (str): The user_id of the user identity.
See: https://auth0.com/docs/api/management/v2#!/Users/post_recovery_code_regeneration
"""
url = f'{self.protocol}://{self.domain}/api/v2/users'
if f'{user_id}/recovery-code-regeneration' is not None:
url = f"{url}/{f'{user_id}/recovery-code-regeneration'}"
url = url
return self.client.post(url)
|
auth0-python
|
positive
|
def test_neb_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression):
"""Test a NEB calculation with symmetric images and automatic climbing image."""
name = 'default'
entry_point_calc_job = 'quantumespresso.neb'
entry_point_parser = 'quantumespresso.neb'
<DeepExtract>
inputs = {'parameters': orm.Dict({'PATH': {'num_of_images': 3}}), 'pw': {'parameters': orm.Dict()}, 'settings': orm.Dict({'parser_options': None})}
inputs = AttributeDict(inputs)
</DeepExtract>
node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, name, inputs)
parser = generate_parser(entry_point_parser)
(results, calcfunction) = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert not orm.Log.collection.get_logs_for(node)
assert 'output_parameters' in results
assert 'output_mep' in results
assert 'output_trajectory' in results
assert 'iteration_array' not in results
data = {'parameters': results['output_parameters'].get_dict(), 'output_mep': results['output_mep'].base.attributes.all, 'output_trajectory': results['output_trajectory'].base.attributes.all}
data_regression.check(data)
<DeepExtract>
if len([results['output_mep']]) != len([['mep', 'interpolated_mep']]):
raise ValueError('length of `arrays` and `array_names` should be equal.')
result = {}
for (index, array) in enumerate([results['output_mep']]):
for name in [['mep', 'interpolated_mep']][index]:
result[name] = array.get_array(name).flatten()
for (key, val) in result.items():
if not (np.issubdtype(val.dtype, np.floating) or np.issubdtype(val.dtype, np.complexfloating)):
result[key] = val.astype(np.float64)
data = result
</DeepExtract>
num_regression.check(data, default_tolerance=dict(atol=0, rtol=1e-18))
|
def test_neb_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression):
"""Test a NEB calculation with symmetric images and automatic climbing image."""
name = 'default'
entry_point_calc_job = 'quantumespresso.neb'
entry_point_parser = 'quantumespresso.neb'
inputs = {'parameters': orm.Dict({'PATH': {'num_of_images': 3}}), 'pw': {'parameters': orm.Dict()}, 'settings': orm.Dict({'parser_options': None})}
inputs = AttributeDict(inputs)
node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, name, inputs)
parser = generate_parser(entry_point_parser)
(results, calcfunction) = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
assert not orm.Log.collection.get_logs_for(node)
assert 'output_parameters' in results
assert 'output_mep' in results
assert 'output_trajectory' in results
assert 'iteration_array' not in results
data = {'parameters': results['output_parameters'].get_dict(), 'output_mep': results['output_mep'].base.attributes.all, 'output_trajectory': results['output_trajectory'].base.attributes.all}
data_regression.check(data)
if len([results['output_mep']]) != len([['mep', 'interpolated_mep']]):
raise ValueError('length of `arrays` and `array_names` should be equal.')
result = {}
for (index, array) in enumerate([results['output_mep']]):
for name in [['mep', 'interpolated_mep']][index]:
result[name] = array.get_array(name).flatten()
for (key, val) in result.items():
if not (np.issubdtype(val.dtype, np.floating) or np.issubdtype(val.dtype, np.complexfloating)):
result[key] = val.astype(np.float64)
data = result
num_regression.check(data, default_tolerance=dict(atol=0, rtol=1e-18))
|
aiida-quantumespresso
|
positive
|
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
<DeepExtract>
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
</DeepExtract>
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id + cur_batch_size]
return data_map
|
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id + cur_batch_size]
return data_map
|
BackpropThroughTheVoidRL
|
positive
|
def content_to_html(content, article_id):
"""Returns article/page content as HTML"""
def render_node(html, node, index):
"""Renders node as HTML"""
if node['type'] == 'paragraph':
return html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
if node['type'] == 'interactive map':
return html + node['data']['svg'] + node['data']['initScript']
return html + embeds.render(node['type'], node['data'])
except EmbedException:
return html
html = ''
index = 0
for node in content:
<DeepExtract>
if node['type'] == 'paragraph':
html = html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
html = html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
if node['type'] == 'interactive map':
html = html + node['data']['svg'] + node['data']['initScript']
html = html + embeds.render(node['type'], node['data'])
except EmbedException:
html = html
</DeepExtract>
if node['type'] == 'ad':
index += 1
return mark_safe(html)
|
def content_to_html(content, article_id):
"""Returns article/page content as HTML"""
def render_node(html, node, index):
"""Renders node as HTML"""
if node['type'] == 'paragraph':
return html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
if node['type'] == 'interactive map':
return html + node['data']['svg'] + node['data']['initScript']
return html + embeds.render(node['type'], node['data'])
except EmbedException:
return html
html = ''
index = 0
for node in content:
if node['type'] == 'paragraph':
html = html + '<p>%s</p>' % node['data']
else:
if node['type'] == 'ad':
id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id)
dfp_type = 'Intra_Article_' + str(index + 1)
size = 'banner'
if node['data'] == 'mobile':
size = 'box'
newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>'
html = html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString
try:
if node['type'] == 'poll':
node['type'] = 'widget'
node['data']['data'] = node['data']
if node['type'] == 'interactive map':
html = html + node['data']['svg'] + node['data']['initScript']
html = html + embeds.render(node['type'], node['data'])
except EmbedException:
html = html
if node['type'] == 'ad':
index += 1
return mark_safe(html)
|
dispatch
|
positive
|
def patch_strptime():
"""Monkey patching _strptime to avoid problems related with non-english
locale changes on the system.
For example, if system's locale is set to fr_FR. Parser won't recognize
any date since all languages are translated to english dates.
"""
_strptime_spec = importlib.util.find_spec('_strptime')
_strptime = importlib.util.module_from_spec(_strptime_spec)
<DeepExtract>
if hasattr(_strptime_spec.loader, 'exec_module'):
_strptime_spec.loader.exec_module(_strptime)
else:
code = _strptime_spec.loader.get_code(_strptime.__name__)
exec(code, _strptime.__dict__)
</DeepExtract>
sys.modules['strptime_patched'] = _strptime
_calendar = importlib.util.module_from_spec(_strptime_spec)
<DeepExtract>
if hasattr(_strptime_spec.loader, 'exec_module'):
_strptime_spec.loader.exec_module(_calendar)
else:
code = _strptime_spec.loader.get_code(_calendar.__name__)
exec(code, _calendar.__dict__)
</DeepExtract>
sys.modules['calendar_patched'] = _calendar
_strptime._getlang = lambda : ('en_US', 'UTF-8')
_strptime.calendar = _calendar
_strptime.calendar.day_abbr = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
_strptime.calendar.day_name = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
_strptime.calendar.month_abbr = ['', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_strptime.calendar.month_name = ['', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']
return _strptime._strptime_time
|
def patch_strptime():
"""Monkey patching _strptime to avoid problems related with non-english
locale changes on the system.
For example, if system's locale is set to fr_FR. Parser won't recognize
any date since all languages are translated to english dates.
"""
_strptime_spec = importlib.util.find_spec('_strptime')
_strptime = importlib.util.module_from_spec(_strptime_spec)
if hasattr(_strptime_spec.loader, 'exec_module'):
_strptime_spec.loader.exec_module(_strptime)
else:
code = _strptime_spec.loader.get_code(_strptime.__name__)
exec(code, _strptime.__dict__)
sys.modules['strptime_patched'] = _strptime
_calendar = importlib.util.module_from_spec(_strptime_spec)
if hasattr(_strptime_spec.loader, 'exec_module'):
_strptime_spec.loader.exec_module(_calendar)
else:
code = _strptime_spec.loader.get_code(_calendar.__name__)
exec(code, _calendar.__dict__)
sys.modules['calendar_patched'] = _calendar
_strptime._getlang = lambda : ('en_US', 'UTF-8')
_strptime.calendar = _calendar
_strptime.calendar.day_abbr = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
_strptime.calendar.day_name = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
_strptime.calendar.month_abbr = ['', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_strptime.calendar.month_name = ['', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']
return _strptime._strptime_time
|
dateparser
|
positive
|
def test_asynchronous(self):
"""Call a DBus method asynchronously."""
returned = Mock()
def callback(call, number):
returned(number, call())
def test():
<DeepExtract>
proxy = self._get_service_proxy(self.message_bus, **proxy_args)
</DeepExtract>
proxy.Hello('Foo', callback=callback, callback_args=(1,))
proxy.Hello('Foo', callback=callback, callback_args=(2,))
proxy.Hello('Bar', callback=callback, callback_args=(3,))
self._add_client(test)
self._run_test()
returned.assert_has_calls([mock.call(1, 'Hello, Foo!'), mock.call(2, 'Hello, Foo!'), mock.call(3, 'Hello, Bar!')])
|
def test_asynchronous(self):
"""Call a DBus method asynchronously."""
returned = Mock()
def callback(call, number):
returned(number, call())
def test():
proxy = self._get_service_proxy(self.message_bus, **proxy_args)
proxy.Hello('Foo', callback=callback, callback_args=(1,))
proxy.Hello('Foo', callback=callback, callback_args=(2,))
proxy.Hello('Bar', callback=callback, callback_args=(3,))
self._add_client(test)
self._run_test()
returned.assert_has_calls([mock.call(1, 'Hello, Foo!'), mock.call(2, 'Hello, Foo!'), mock.call(3, 'Hello, Bar!')])
|
dasbus
|
positive
|
def getRotationAngle(self):
"""
get the current angle the
robot body is rotated off the ground
"""
<DeepExtract>
(armCos, armSin) = (math.cos(self.armAngle), math.sin(self.armAngle))
</DeepExtract>
<DeepExtract>
(handCos, handSin) = (math.cos(self.handAngle), math.sin(self.handAngle))
</DeepExtract>
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
return math.atan(-y / x)
return 0.0
|
def getRotationAngle(self):
"""
get the current angle the
robot body is rotated off the ground
"""
(armCos, armSin) = (math.cos(self.armAngle), math.sin(self.armAngle))
(handCos, handSin) = (math.cos(self.handAngle), math.sin(self.handAngle))
x = self.armLength * armCos + self.handLength * handCos + self.robotWidth
y = self.armLength * armSin + self.handLength * handSin + self.robotHeight
if y < 0:
return math.atan(-y / x)
return 0.0
|
deepbootcamp
|
positive
|
def get_doubling_time(sim, series=None, interval=None, start_day=None, end_day=None, moving_window=None, exp_approx=False, max_doubling_time=100, eps=0.001, verbose=None):
"""
Alternate method to calculate doubling time (one is already implemented in
the sim object).
**Examples**::
cv.get_doubling_time(sim, interval=[3,30]) # returns the doubling time over the given interval (single float)
cv.get_doubling_time(sim, interval=[3,30], moving_window=3) # returns doubling times calculated over moving windows (array)
"""
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(start_day, end_day) = (interval[0], interval[1])
if len(series) < end_day:
sc.printv(f'End day {end_day} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
end_day = len(series)
int_length = end_day - start_day
if moving_window is not None:
if not sc.isnumber(moving_window):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
<DeepExtract>
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(start_day, end_day) = (interval[0], interval[1])
if len(series) < end_day:
sc.printv(f'End day {end_day} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
end_day = len(series)
int_length = end_day - start_day
if None is not None:
if not sc.isnumber(None):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
doubling_time = get_doubling_time(sim, series=series, start_day=start_day, end_day=end_day, moving_window=None, exp_approx=exp_approx)
else:
if not isinstance(None, int):
sc.printv(f'Moving window should be an integer; recasting {None} the nearest integer... ', 1, verbose)
None = int(None)
if None < 2:
sc.printv(f'Moving window should be greater than 1; recasting {None} to 2', 1, verbose)
None = 2
doubling_time = []
for w in range(int_length - None + 1):
this_start = start_day + w
this_end = this_start + None
this_doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, exp_approx=exp_approx)
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[start_day] > 0:
r = series[end_day] / series[start_day]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[start_day:end_day]):
nonzero = np.nonzero(series[start_day:end_day])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[start_day:end_day][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
doubling_time = doubling_time
</DeepExtract>
else:
if not isinstance(moving_window, int):
sc.printv(f'Moving window should be an integer; recasting {moving_window} the nearest integer... ', 1, verbose)
moving_window = int(moving_window)
if moving_window < 2:
sc.printv(f'Moving window should be greater than 1; recasting {moving_window} to 2', 1, verbose)
moving_window = 2
doubling_time = []
for w in range(int_length - moving_window + 1):
this_start = start_day + w
this_end = this_start + moving_window
<DeepExtract>
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(this_start, this_end) = (interval[0], interval[1])
if len(series) < this_end:
sc.printv(f'End day {this_end} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
this_end = len(series)
int_length = this_end - this_start
if moving_window is not None:
if not sc.isnumber(moving_window):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, moving_window=None, exp_approx=exp_approx)
else:
if not isinstance(moving_window, int):
sc.printv(f'Moving window should be an integer; recasting {moving_window} the nearest integer... ', 1, verbose)
moving_window = int(moving_window)
if moving_window < 2:
sc.printv(f'Moving window should be greater than 1; recasting {moving_window} to 2', 1, verbose)
moving_window = 2
doubling_time = []
for w in range(int_length - moving_window + 1):
this_start = this_start + w
this_end = this_start + moving_window
this_doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, exp_approx=exp_approx)
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[this_start] > 0:
r = series[this_end] / series[this_start]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[this_start:this_end]):
nonzero = np.nonzero(series[this_start:this_end])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[this_start:this_end][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[this_start:this_end]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[this_start:this_end]}. Check whether series is growing.")
this_doubling_time = doubling_time
</DeepExtract>
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[start_day] > 0:
r = series[end_day] / series[start_day]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[start_day:end_day]):
nonzero = np.nonzero(series[start_day:end_day])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[start_day:end_day][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
return doubling_time
|
def get_doubling_time(sim, series=None, interval=None, start_day=None, end_day=None, moving_window=None, exp_approx=False, max_doubling_time=100, eps=0.001, verbose=None):
"""
Alternate method to calculate doubling time (one is already implemented in
the sim object).
**Examples**::
cv.get_doubling_time(sim, interval=[3,30]) # returns the doubling time over the given interval (single float)
cv.get_doubling_time(sim, interval=[3,30], moving_window=3) # returns doubling times calculated over moving windows (array)
"""
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(start_day, end_day) = (interval[0], interval[1])
if len(series) < end_day:
sc.printv(f'End day {end_day} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
end_day = len(series)
int_length = end_day - start_day
if moving_window is not None:
if not sc.isnumber(moving_window):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(start_day, end_day) = (interval[0], interval[1])
if len(series) < end_day:
sc.printv(f'End day {end_day} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
end_day = len(series)
int_length = end_day - start_day
if None is not None:
if not sc.isnumber(None):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
doubling_time = get_doubling_time(sim, series=series, start_day=start_day, end_day=end_day, moving_window=None, exp_approx=exp_approx)
else:
if not isinstance(None, int):
sc.printv(f'Moving window should be an integer; recasting {None} the nearest integer... ', 1, verbose)
None = int(None)
if None < 2:
sc.printv(f'Moving window should be greater than 1; recasting {None} to 2', 1, verbose)
None = 2
doubling_time = []
for w in range(int_length - None + 1):
this_start = start_day + w
this_end = this_start + None
this_doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, exp_approx=exp_approx)
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[start_day] > 0:
r = series[end_day] / series[start_day]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[start_day:end_day]):
nonzero = np.nonzero(series[start_day:end_day])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[start_day:end_day][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
doubling_time = doubling_time
else:
if not isinstance(moving_window, int):
sc.printv(f'Moving window should be an integer; recasting {moving_window} the nearest integer... ', 1, verbose)
moving_window = int(moving_window)
if moving_window < 2:
sc.printv(f'Moving window should be greater than 1; recasting {moving_window} to 2', 1, verbose)
moving_window = 2
doubling_time = []
for w in range(int_length - moving_window + 1):
this_start = start_day + w
this_end = this_start + moving_window
if verbose is None:
verbose = sim['verbose']
if series is None or isinstance(series, str):
if not sim.results_ready:
raise Exception('Results not ready, cannot calculate doubling time')
else:
if series is None or series not in sim.result_keys():
sc.printv('Series not supplied or not found in results; defaulting to use cumulative exposures', 1, verbose)
series = 'cum_infections'
series = sim.results[series].values
else:
series = sc.toarray(series)
if interval is not None:
if len(interval) != 2:
sc.printv(f'Interval should be a list/array/tuple of length 2, not {len(interval)}. Resetting to length of series.', 1, verbose)
interval = [0, len(series)]
(this_start, this_end) = (interval[0], interval[1])
if len(series) < this_end:
sc.printv(f'End day {this_end} is after the series ends ({len(series)}). Resetting to length of series.', 1, verbose)
this_end = len(series)
int_length = this_end - this_start
if moving_window is not None:
if not sc.isnumber(moving_window):
sc.printv('Moving window should be an integer; ignoring and calculating single result', 1, verbose)
doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, moving_window=None, exp_approx=exp_approx)
else:
if not isinstance(moving_window, int):
sc.printv(f'Moving window should be an integer; recasting {moving_window} the nearest integer... ', 1, verbose)
moving_window = int(moving_window)
if moving_window < 2:
sc.printv(f'Moving window should be greater than 1; recasting {moving_window} to 2', 1, verbose)
moving_window = 2
doubling_time = []
for w in range(int_length - moving_window + 1):
this_start = this_start + w
this_end = this_start + moving_window
this_doubling_time = get_doubling_time(sim, series=series, start_day=this_start, end_day=this_end, exp_approx=exp_approx)
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[this_start] > 0:
r = series[this_end] / series[this_start]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[this_start:this_end]):
nonzero = np.nonzero(series[this_start:this_end])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[this_start:this_end][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[this_start:this_end]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[this_start:this_end]}. Check whether series is growing.")
this_doubling_time = doubling_time
doubling_time.append(this_doubling_time)
else:
if not exp_approx:
try:
import statsmodels.api as sm
except ModuleNotFoundError as E:
errormsg = f'Could not import statsmodels ({E}), falling back to exponential approximation'
print(errormsg)
exp_approx = True
if exp_approx:
if series[start_day] > 0:
r = series[end_day] / series[start_day]
if r > 1:
doubling_time = int_length * np.log(2) / np.log(r)
doubling_time = min(doubling_time, max_doubling_time)
else:
raise ValueError("Can't calculate doubling time with exponential approximation when initial value is zero.")
elif np.any(series[start_day:end_day]):
nonzero = np.nonzero(series[start_day:end_day])[0]
if len(nonzero) >= 2:
exog = sm.add_constant(np.arange(len(nonzero)))
endog = np.log2(series[start_day:end_day][nonzero])
model = sm.OLS(endog, exog)
doubling_rate = model.fit().params[1]
if doubling_rate > eps:
doubling_time = 1.0 / doubling_rate
else:
doubling_time = max_doubling_time
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
else:
raise ValueError(f"Can't calculate doubling time for series {series[start_day:end_day]}. Check whether series is growing.")
return doubling_time
|
covasim
|
positive
|
def __init__(self, loop=asyncio.get_event_loop()):
self.loop = loop
self.start_time = time.time()
self.running = True
self.stopped_future = asyncio.Future(loop=self.loop)
self.connections = {}
self.logger = logger
self.memory = collections.defaultdict()
self.data_dir = os.path.abspath('data')
if not os.path.exists(self.data_dir):
logger.debug('Data folder not found, creating.')
os.mkdir(self.data_dir)
self.config = Config(self)
logger.debug('Config system initialised.')
self.plugin_reloading_enabled = self.config.get('reloading', {}).get('plugin_reloading', False)
self.config_reloading_enabled = self.config.get('reloading', {}).get('config_reloading', True)
self.user_agent = self.config.get('user_agent', 'CloudBot/3.0 - CloudBot Refresh <https://github.com/CloudBotIRC/CloudBot/>')
db_path = self.config.get('database', 'sqlite:///cloudbot.db')
self.db_engine = create_engine(db_path)
self.db_factory = sessionmaker(bind=self.db_engine)
self.db_session = scoped_session(self.db_factory)
self.db_metadata = MetaData()
self.db_base = declarative_base(metadata=self.db_metadata, bind=self.db_engine)
if self.config.get('web', {}).get('enabled', False) and web_installed:
self.web = WebInterface(self)
database.metadata = self.db_metadata
database.base = self.db_base
logger.debug('Database system initialised.')
logger.debug('Bot setup completed.')
<DeepExtract>
for config in self.config['connections']:
name = clean_name(config['name'])
nick = config['nick']
server = config['connection']['server']
port = config['connection'].get('port', 6667)
local_bind = (config['connection'].get('bind_addr', False), config['connection'].get('bind_port', 0))
if local_bind[0] is False:
local_bind = False
self.connections[name] = IrcClient(self, name, nick, config=config, channels=config['channels'], server=server, port=port, use_ssl=config['connection'].get('ssl', False), local_bind=local_bind)
logger.debug('[{}] Created connection.'.format(name))
</DeepExtract>
if self.plugin_reloading_enabled:
self.reloader = PluginReloader(self)
self.plugin_manager = PluginManager(self)
|
def __init__(self, loop=asyncio.get_event_loop()):
self.loop = loop
self.start_time = time.time()
self.running = True
self.stopped_future = asyncio.Future(loop=self.loop)
self.connections = {}
self.logger = logger
self.memory = collections.defaultdict()
self.data_dir = os.path.abspath('data')
if not os.path.exists(self.data_dir):
logger.debug('Data folder not found, creating.')
os.mkdir(self.data_dir)
self.config = Config(self)
logger.debug('Config system initialised.')
self.plugin_reloading_enabled = self.config.get('reloading', {}).get('plugin_reloading', False)
self.config_reloading_enabled = self.config.get('reloading', {}).get('config_reloading', True)
self.user_agent = self.config.get('user_agent', 'CloudBot/3.0 - CloudBot Refresh <https://github.com/CloudBotIRC/CloudBot/>')
db_path = self.config.get('database', 'sqlite:///cloudbot.db')
self.db_engine = create_engine(db_path)
self.db_factory = sessionmaker(bind=self.db_engine)
self.db_session = scoped_session(self.db_factory)
self.db_metadata = MetaData()
self.db_base = declarative_base(metadata=self.db_metadata, bind=self.db_engine)
if self.config.get('web', {}).get('enabled', False) and web_installed:
self.web = WebInterface(self)
database.metadata = self.db_metadata
database.base = self.db_base
logger.debug('Database system initialised.')
logger.debug('Bot setup completed.')
for config in self.config['connections']:
name = clean_name(config['name'])
nick = config['nick']
server = config['connection']['server']
port = config['connection'].get('port', 6667)
local_bind = (config['connection'].get('bind_addr', False), config['connection'].get('bind_port', 0))
if local_bind[0] is False:
local_bind = False
self.connections[name] = IrcClient(self, name, nick, config=config, channels=config['channels'], server=server, port=port, use_ssl=config['connection'].get('ssl', False), local_bind=local_bind)
logger.debug('[{}] Created connection.'.format(name))
if self.plugin_reloading_enabled:
self.reloader = PluginReloader(self)
self.plugin_manager = PluginManager(self)
|
CloudBot
|
positive
|
def resume(self, checkpoint_path):
<DeepExtract>
state_dicts = load(checkpoint_path)
for (net_name, net) in self.model.nets.items():
if net_name in state_dicts:
net.set_state_dict(state_dicts[net_name])
self.logger.info('Loaded pretrained weight for net {}'.format(net_name))
else:
self.logger.warning('Can not find state dict of net {}. Skip load pretrained weight for net {}'.format(net_name, net_name))
</DeepExtract>
if state_dicts.get('epoch', None) is not None:
self.start_epoch = state_dicts['epoch'] + 1
self.global_steps = self.iters_per_epoch * state_dicts['epoch']
self.current_iter = state_dicts['epoch'] + 1
for (net_name, net) in self.model.nets.items():
net.set_state_dict(state_dicts[net_name])
for (opt_name, opt) in self.model.optimizers.items():
opt.set_state_dict(state_dicts[opt_name])
|
def resume(self, checkpoint_path):
state_dicts = load(checkpoint_path)
for (net_name, net) in self.model.nets.items():
if net_name in state_dicts:
net.set_state_dict(state_dicts[net_name])
self.logger.info('Loaded pretrained weight for net {}'.format(net_name))
else:
self.logger.warning('Can not find state dict of net {}. Skip load pretrained weight for net {}'.format(net_name, net_name))
if state_dicts.get('epoch', None) is not None:
self.start_epoch = state_dicts['epoch'] + 1
self.global_steps = self.iters_per_epoch * state_dicts['epoch']
self.current_iter = state_dicts['epoch'] + 1
for (net_name, net) in self.model.nets.items():
net.set_state_dict(state_dicts[net_name])
for (opt_name, opt) in self.model.optimizers.items():
opt.set_state_dict(state_dicts[opt_name])
|
-AI-emmmm
|
positive
|
def local_pairwise_distances(x, y, max_distance=9):
"""Computes pairwise squared l2 distances using a local search window.
Optimized implementation using correlation_cost.
Args:
x: Float32 tensor of shape [height, width, feature_dim].
y: Float32 tensor of shape [height, width, feature_dim].
max_distance: Integer, the maximum distance in pixel coordinates
per dimension which is considered to be in the search window.
Returns:
Float32 distances tensor of shape
[height, width, (2 * max_distance + 1) ** 2].
"""
<DeepExtract>
corr_op = Correlation(pad_size=max_distance, kernel_size=1, max_displacement=max_distance, stride1=1, stride2=1, corr_multiply=1)
xs = x.permute(2, 0, 1)
xs = torch.unsqueeze(xs, 0)
ys = y.permute(2, 0, 1)
ys = torch.unsqueeze(ys, 0)
corr = corr_op(xs, ys)
corr = torch.squeeze(corr, 0)
corr = corr.permute(1, 2, 0)
feature_dim = x.size()[-1]
corr *= feature_dim
corr = corr
</DeepExtract>
xs = torch.sum(x * x, 2, keepdim=True)
ys = torch.sum(y * y, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
<DeepExtract>
corr_op = Correlation(pad_size=max_distance, kernel_size=1, max_displacement=max_distance, stride1=1, stride2=1, corr_multiply=1)
xs = ones_ys.permute(2, 0, 1)
xs = torch.unsqueeze(xs, 0)
ys = ys.permute(2, 0, 1)
ys = torch.unsqueeze(ys, 0)
corr = corr_op(xs, ys)
corr = torch.squeeze(corr, 0)
corr = corr.permute(1, 2, 0)
feature_dim = ones_ys.size()[-1]
corr *= feature_dim
ys = corr
</DeepExtract>
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
return d
|
def local_pairwise_distances(x, y, max_distance=9):
"""Computes pairwise squared l2 distances using a local search window.
Optimized implementation using correlation_cost.
Args:
x: Float32 tensor of shape [height, width, feature_dim].
y: Float32 tensor of shape [height, width, feature_dim].
max_distance: Integer, the maximum distance in pixel coordinates
per dimension which is considered to be in the search window.
Returns:
Float32 distances tensor of shape
[height, width, (2 * max_distance + 1) ** 2].
"""
corr_op = Correlation(pad_size=max_distance, kernel_size=1, max_displacement=max_distance, stride1=1, stride2=1, corr_multiply=1)
xs = x.permute(2, 0, 1)
xs = torch.unsqueeze(xs, 0)
ys = y.permute(2, 0, 1)
ys = torch.unsqueeze(ys, 0)
corr = corr_op(xs, ys)
corr = torch.squeeze(corr, 0)
corr = corr.permute(1, 2, 0)
feature_dim = x.size()[-1]
corr *= feature_dim
corr = corr
xs = torch.sum(x * x, 2, keepdim=True)
ys = torch.sum(y * y, 2, keepdim=True)
ones_ys = torch.ones_like(ys)
corr_op = Correlation(pad_size=max_distance, kernel_size=1, max_displacement=max_distance, stride1=1, stride2=1, corr_multiply=1)
xs = ones_ys.permute(2, 0, 1)
xs = torch.unsqueeze(xs, 0)
ys = ys.permute(2, 0, 1)
ys = torch.unsqueeze(ys, 0)
corr = corr_op(xs, ys)
corr = torch.squeeze(corr, 0)
corr = corr.permute(1, 2, 0)
feature_dim = ones_ys.size()[-1]
corr *= feature_dim
ys = corr
d = xs + ys - 2 * corr
tmp = torch.zeros_like(d)
boundary = torch.eq(cross_correlate(ones_ys, ones_ys, max_distance=max_distance), 0)
d = torch.where(boundary, tmp.fill_(float('inf')), d)
return d
|
CVPR2020_MANet
|
positive
|
def test():
import pickle
def checks(x):
o = x.my_ondemand
c = x.my_cached
l = x.my_lazy
assert x.my_ondemand is o
assert x.my_cached is c
assert x.my_lazy is l
print('x.log:', x.log, sep=' ')
assert min(x.log.values()) == max(x.log.values()) == 1
ll = x.my_lazy_list
assert ll == list(range(10)), ll
assert ll is x.my_lazy_list
print(x.log)
assert all((v == 1 for v in x.log.values()))
foo = Foo('XXX')
<DeepExtract>
o = foo.my_ondemand
c = foo.my_cached
l = foo.my_lazy
assert foo.my_ondemand is o
assert foo.my_cached is c
assert foo.my_lazy is l
print('x.log:', foo.log, sep=' ')
assert min(foo.log.values()) == max(foo.log.values()) == 1
ll = foo.my_lazy_list
assert ll == list(range(10)), ll
assert ll is foo.my_lazy_list
print(foo.log)
assert all((v == 1 for v in foo.log.values()))
</DeepExtract>
foo.log.clear()
print('pickling and unpickling..')
foo2 = pickle.loads(pickle.dumps(foo))
assert not foo2.log
|
def test():
import pickle
def checks(x):
o = x.my_ondemand
c = x.my_cached
l = x.my_lazy
assert x.my_ondemand is o
assert x.my_cached is c
assert x.my_lazy is l
print('x.log:', x.log, sep=' ')
assert min(x.log.values()) == max(x.log.values()) == 1
ll = x.my_lazy_list
assert ll == list(range(10)), ll
assert ll is x.my_lazy_list
print(x.log)
assert all((v == 1 for v in x.log.values()))
foo = Foo('XXX')
o = foo.my_ondemand
c = foo.my_cached
l = foo.my_lazy
assert foo.my_ondemand is o
assert foo.my_cached is c
assert foo.my_lazy is l
print('x.log:', foo.log, sep=' ')
assert min(foo.log.values()) == max(foo.log.values()) == 1
ll = foo.my_lazy_list
assert ll == list(range(10)), ll
assert ll is foo.my_lazy_list
print(foo.log)
assert all((v == 1 for v in foo.log.values()))
foo.log.clear()
print('pickling and unpickling..')
foo2 = pickle.loads(pickle.dumps(foo))
assert not foo2.log
|
arsenal
|
positive
|
def parseDetailedMetricsFromThisLog(logFile, classesFromThisLog, movingAverSubeps):
regExprForEachClassAndMetric = [[], []]
sentencesToLookForEachClassAndMetric = [[], []]
regExprForDscFullSeg = '.*ACCURACY:.*Validation.*The Per-Class average DICE Coefficients over all subjects are:.*DICE2='
sentenceForDscFullSeg = 'DICE2='
for val0orTrain1 in [0, 1]:
for classInt in classesFromThisLog:
regExprForClassAllMetrics = []
sentencesForClassAllMetrics = []
for metric_i in [0, 1, 2, 4]:
<DeepExtract>
validationOrTrainingString = VALIDATION_PATT if val0orTrain1 == 0 else TRAINING_PATT
if 1 == 0:
classPrefixString = OVERALLCLASS_PATT
if metric_i == 0:
sentenceToLookFor = MEANACC_OVERALL_SENTENCE
elif metric_i == 1:
sentenceToLookFor = COST_OVERALL_SENTENCE
else:
classPrefixString = CLASS_PREFIX_PATT + str(classInt)
if metric_i == 0:
sentenceToLookFor = MEANACC_SENTENCE
elif metric_i == 1:
sentenceToLookFor = SENS_SENTENCE
elif metric_i == 2:
sentenceToLookFor = PREC_SENTENCE
elif metric_i == 3:
sentenceToLookFor = SPEC_SENTENCE
elif metric_i == 4:
sentenceToLookFor = DSC_SAMPLES_SENTENCE
regExp1 = '.*' + validationOrTrainingString + '.*' + classPrefixString + ':.*' + sentenceToLookFor
(reg_expr, metric) = (regExp1, sentenceToLookFor)
</DeepExtract>
regExprForClassAllMetrics.append(reg_expr)
sentencesForClassAllMetrics.append(metric)
regExprForEachClassAndMetric[val0orTrain1].append(regExprForClassAllMetrics)
sentencesToLookForEachClassAndMetric[val0orTrain1].append(sentencesForClassAllMetrics)
measurementsForEachClassAndMetric = [[], []]
previousMeasurementForEachClassAndMetric = [[], []]
for val0orTrain1 in [0, 1]:
for class_i in xrange(len(classesFromThisLog)):
measurementsForEachClassAndMetric[val0orTrain1].append([])
previousMeasurementForEachClassAndMetric[val0orTrain1].append([])
for metric_i in xrange(0, 5):
measurementsForEachClassAndMetric[val0orTrain1][class_i].append([])
if metric_i == 4:
measurementsForEachClassAndMetric[val0orTrain1][class_i][-1].append(0)
previousMeasurementForEachClassAndMetric[val0orTrain1][class_i].append(0)
f = open(logFile, 'r')
newLine = f.readline()
while newLine:
<DeepExtract>
for val0orTrain1 in xrange(len(regExprForEachClassAndMetric)):
for class_i in xrange(len(regExprForEachClassAndMetric[val0orTrain1])):
for metric_i in xrange(len(regExprForEachClassAndMetric[val0orTrain1][class_i])):
regExp1 = regExprForEachClassAndMetric[val0orTrain1][class_i][metric_i]
matchObj = re.match(regExp1, newLine, re.M | re.I)
if matchObj:
(matchObj, matchVal0Train1, matchClass_i, matchMetric_i) = (matchObj, val0orTrain1, class_i, metric_i)
(matchObj, matchVal0Train1, matchClass_i, matchMetric_i) = (None, None, None, None)
</DeepExtract>
if matchObj:
sentenceToLookFor = sentencesToLookForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i]
restOfLineAfterPattern = newLine[newLine.find(sentenceToLookFor) + len(sentenceToLookFor):]
<DeepExtract>
indexWhereListStartsInThisLine = restOfLineAfterPattern.find('[')
indexWhereListEndsInThisLine = restOfLineAfterPattern.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:]
endOfListFound = False
while endOfListFound == False:
newLine = f.readline()
if newLine:
indexWhereListEndsInThisLine = newLine.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString += newLine[:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString += newLine[:]
theListInString = theListInString.strip()
theListInString = theListInString
</DeepExtract>
<DeepExtract>
numbersOfListInString = theListInString.strip()
numbersOfListInString = numbersOfListInString.lstrip('[')
numbersOfListInString = numbersOfListInString.rstrip(']')
if '' == '':
listOfstringNumbersSplitted = numbersOfListInString.split()
else:
listOfstringNumbersSplitted = numbersOfListInString.split('')
listOfstringNumbersSplitted = listOfstringNumbersSplitted
</DeepExtract>
previousMeasurementForClassAndMetric = previousMeasurementForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i]
<DeepExtract>
listOfAccNumbers = []
for stringNumber in listOfstringNumbersSplitted:
stringNumberStrippedOfWhiteSpace = stringNumber.strip()
parseFloatNumber = float(stringNumberStrippedOfWhiteSpace) if stringNumberStrippedOfWhiteSpace != NA_PATTERN else previousMeasurementForClassAndMetric
previousMeasurementForClassAndMetric = parseFloatNumber
listOfAccNumbers.append(parseFloatNumber)
listOfMeasurements = listOfAccNumbers
</DeepExtract>
previousMeasurementForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i] = listOfMeasurements[-1]
measurementsForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i] += listOfMeasurements
elif re.match(regExprForDscFullSeg, newLine, re.M | re.I):
sentenceToLookFor = sentenceForDscFullSeg
restOfLineAfterPattern = newLine[newLine.find(sentenceToLookFor) + len(sentenceToLookFor):]
<DeepExtract>
indexWhereListStartsInThisLine = restOfLineAfterPattern.find('[')
indexWhereListEndsInThisLine = restOfLineAfterPattern.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:]
endOfListFound = False
while endOfListFound == False:
newLine = f.readline()
if newLine:
indexWhereListEndsInThisLine = newLine.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString += newLine[:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString += newLine[:]
theListInString = theListInString.strip()
theListInString = theListInString
</DeepExtract>
<DeepExtract>
numbersOfListInString = theListInString.strip()
numbersOfListInString = numbersOfListInString.lstrip('[')
numbersOfListInString = numbersOfListInString.rstrip(']')
if '' == '':
listOfstringNumbersSplitted = numbersOfListInString.split()
else:
listOfstringNumbersSplitted = numbersOfListInString.split('')
listOfstringNumbersSplitted = listOfstringNumbersSplitted
</DeepExtract>
for class_i in xrange(len(classesFromThisLog)):
previousMeasurement = previousMeasurementForEachClassAndMetric[0][class_i][4]
dscForTheWantedClassInString = listOfstringNumbersSplitted[classesFromThisLog[class_i]]
<DeepExtract>
listOfAccNumbers = []
for stringNumber in [dscForTheWantedClassInString]:
stringNumberStrippedOfWhiteSpace = stringNumber.strip()
parseFloatNumber = float(stringNumberStrippedOfWhiteSpace) if stringNumberStrippedOfWhiteSpace != NA_PATTERN else previousMeasurement
previousMeasurement = parseFloatNumber
listOfAccNumbers.append(parseFloatNumber)
listOfMeasurements = listOfAccNumbers
</DeepExtract>
previousMeasurementForEachClassAndMetric[0][class_i][4] = listOfMeasurements[-1]
measurementsForEachClassAndMetric[0][class_i][4] += listOfMeasurements
newLine = f.readline()
f.close()
return (measurementsForEachClassAndMetric[0], measurementsForEachClassAndMetric[1])
|
def parseDetailedMetricsFromThisLog(logFile, classesFromThisLog, movingAverSubeps):
regExprForEachClassAndMetric = [[], []]
sentencesToLookForEachClassAndMetric = [[], []]
regExprForDscFullSeg = '.*ACCURACY:.*Validation.*The Per-Class average DICE Coefficients over all subjects are:.*DICE2='
sentenceForDscFullSeg = 'DICE2='
for val0orTrain1 in [0, 1]:
for classInt in classesFromThisLog:
regExprForClassAllMetrics = []
sentencesForClassAllMetrics = []
for metric_i in [0, 1, 2, 4]:
validationOrTrainingString = VALIDATION_PATT if val0orTrain1 == 0 else TRAINING_PATT
if 1 == 0:
classPrefixString = OVERALLCLASS_PATT
if metric_i == 0:
sentenceToLookFor = MEANACC_OVERALL_SENTENCE
elif metric_i == 1:
sentenceToLookFor = COST_OVERALL_SENTENCE
else:
classPrefixString = CLASS_PREFIX_PATT + str(classInt)
if metric_i == 0:
sentenceToLookFor = MEANACC_SENTENCE
elif metric_i == 1:
sentenceToLookFor = SENS_SENTENCE
elif metric_i == 2:
sentenceToLookFor = PREC_SENTENCE
elif metric_i == 3:
sentenceToLookFor = SPEC_SENTENCE
elif metric_i == 4:
sentenceToLookFor = DSC_SAMPLES_SENTENCE
regExp1 = '.*' + validationOrTrainingString + '.*' + classPrefixString + ':.*' + sentenceToLookFor
(reg_expr, metric) = (regExp1, sentenceToLookFor)
regExprForClassAllMetrics.append(reg_expr)
sentencesForClassAllMetrics.append(metric)
regExprForEachClassAndMetric[val0orTrain1].append(regExprForClassAllMetrics)
sentencesToLookForEachClassAndMetric[val0orTrain1].append(sentencesForClassAllMetrics)
measurementsForEachClassAndMetric = [[], []]
previousMeasurementForEachClassAndMetric = [[], []]
for val0orTrain1 in [0, 1]:
for class_i in xrange(len(classesFromThisLog)):
measurementsForEachClassAndMetric[val0orTrain1].append([])
previousMeasurementForEachClassAndMetric[val0orTrain1].append([])
for metric_i in xrange(0, 5):
measurementsForEachClassAndMetric[val0orTrain1][class_i].append([])
if metric_i == 4:
measurementsForEachClassAndMetric[val0orTrain1][class_i][-1].append(0)
previousMeasurementForEachClassAndMetric[val0orTrain1][class_i].append(0)
f = open(logFile, 'r')
newLine = f.readline()
while newLine:
for val0orTrain1 in xrange(len(regExprForEachClassAndMetric)):
for class_i in xrange(len(regExprForEachClassAndMetric[val0orTrain1])):
for metric_i in xrange(len(regExprForEachClassAndMetric[val0orTrain1][class_i])):
regExp1 = regExprForEachClassAndMetric[val0orTrain1][class_i][metric_i]
matchObj = re.match(regExp1, newLine, re.M | re.I)
if matchObj:
(matchObj, matchVal0Train1, matchClass_i, matchMetric_i) = (matchObj, val0orTrain1, class_i, metric_i)
(matchObj, matchVal0Train1, matchClass_i, matchMetric_i) = (None, None, None, None)
if matchObj:
sentenceToLookFor = sentencesToLookForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i]
restOfLineAfterPattern = newLine[newLine.find(sentenceToLookFor) + len(sentenceToLookFor):]
indexWhereListStartsInThisLine = restOfLineAfterPattern.find('[')
indexWhereListEndsInThisLine = restOfLineAfterPattern.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:]
endOfListFound = False
while endOfListFound == False:
newLine = f.readline()
if newLine:
indexWhereListEndsInThisLine = newLine.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString += newLine[:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString += newLine[:]
theListInString = theListInString.strip()
theListInString = theListInString
numbersOfListInString = theListInString.strip()
numbersOfListInString = numbersOfListInString.lstrip('[')
numbersOfListInString = numbersOfListInString.rstrip(']')
if '' == '':
listOfstringNumbersSplitted = numbersOfListInString.split()
else:
listOfstringNumbersSplitted = numbersOfListInString.split('')
listOfstringNumbersSplitted = listOfstringNumbersSplitted
previousMeasurementForClassAndMetric = previousMeasurementForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i]
listOfAccNumbers = []
for stringNumber in listOfstringNumbersSplitted:
stringNumberStrippedOfWhiteSpace = stringNumber.strip()
parseFloatNumber = float(stringNumberStrippedOfWhiteSpace) if stringNumberStrippedOfWhiteSpace != NA_PATTERN else previousMeasurementForClassAndMetric
previousMeasurementForClassAndMetric = parseFloatNumber
listOfAccNumbers.append(parseFloatNumber)
listOfMeasurements = listOfAccNumbers
previousMeasurementForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i] = listOfMeasurements[-1]
measurementsForEachClassAndMetric[matchVal0Train1][matchClass_i][matchMetric_i] += listOfMeasurements
elif re.match(regExprForDscFullSeg, newLine, re.M | re.I):
sentenceToLookFor = sentenceForDscFullSeg
restOfLineAfterPattern = newLine[newLine.find(sentenceToLookFor) + len(sentenceToLookFor):]
indexWhereListStartsInThisLine = restOfLineAfterPattern.find('[')
indexWhereListEndsInThisLine = restOfLineAfterPattern.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString = restOfLineAfterPattern[indexWhereListStartsInThisLine:]
endOfListFound = False
while endOfListFound == False:
newLine = f.readline()
if newLine:
indexWhereListEndsInThisLine = newLine.find(']')
if indexWhereListEndsInThisLine > -1:
theListInString += newLine[:indexWhereListEndsInThisLine + 1]
endOfListFound = True
else:
theListInString += newLine[:]
theListInString = theListInString.strip()
theListInString = theListInString
numbersOfListInString = theListInString.strip()
numbersOfListInString = numbersOfListInString.lstrip('[')
numbersOfListInString = numbersOfListInString.rstrip(']')
if '' == '':
listOfstringNumbersSplitted = numbersOfListInString.split()
else:
listOfstringNumbersSplitted = numbersOfListInString.split('')
listOfstringNumbersSplitted = listOfstringNumbersSplitted
for class_i in xrange(len(classesFromThisLog)):
previousMeasurement = previousMeasurementForEachClassAndMetric[0][class_i][4]
dscForTheWantedClassInString = listOfstringNumbersSplitted[classesFromThisLog[class_i]]
listOfAccNumbers = []
for stringNumber in [dscForTheWantedClassInString]:
stringNumberStrippedOfWhiteSpace = stringNumber.strip()
parseFloatNumber = float(stringNumberStrippedOfWhiteSpace) if stringNumberStrippedOfWhiteSpace != NA_PATTERN else previousMeasurement
previousMeasurement = parseFloatNumber
listOfAccNumbers.append(parseFloatNumber)
listOfMeasurements = listOfAccNumbers
previousMeasurementForEachClassAndMetric[0][class_i][4] = listOfMeasurements[-1]
measurementsForEachClassAndMetric[0][class_i][4] += listOfMeasurements
newLine = f.readline()
f.close()
return (measurementsForEachClassAndMetric[0], measurementsForEachClassAndMetric[1])
|
deepmedic
|
positive
|
def search_users(self, q):
rw = self._user_cache.get(q)
if rw is None:
<DeepExtract>
'/search/users' = self.url_root + '/search/users'
try:
r = get_url('/search/users', params=params)
r.raise_for_status()
rw = r.json()
except Exception as e:
raise APIError('API error')
</DeepExtract>
self._user_cache.set(q, rw)
return rw
|
def search_users(self, q):
rw = self._user_cache.get(q)
if rw is None:
'/search/users' = self.url_root + '/search/users'
try:
r = get_url('/search/users', params=params)
r.raise_for_status()
rw = r.json()
except Exception as e:
raise APIError('API error')
self._user_cache.set(q, rw)
return rw
|
botnet
|
positive
|
def __call__(self, results):
""" Main process.
Args:
results(dict): Data flow used in DavarCustomDataset.
Returns:
dict: output data flow.
"""
<DeepExtract>
for per_ann in ['ann_info', 'ann_info_2']:
if per_ann in results.keys():
ann = results[per_ann]
bboxes_length = len(ann['bboxes']) if 'bboxes' in ann else 1
if self.with_care:
cares = np.array(ann.get('cares', np.ones(bboxes_length)))
else:
cares = np.ones(bboxes_length)
ann['cares'] = cares
results = results
</DeepExtract>
if self.custom_classes is not None:
<DeepExtract>
if ann_idx == 1:
ann = results['ann_info']
custom_classes_list = self.custom_classes
else:
ann = results['ann_info_2']
custom_classes_list = self.custom_classes_2
cares = ann['cares']
tmp_labels = ann.get('labels', None)
for (idx, per_label) in enumerate(tmp_labels):
if per_label[0] in custom_classes_list:
continue
else:
cares[idx] = 0
if ann_idx == 1:
results['ann_info']['cares'] = cares
else:
results['ann_info_2']['cares'] = cares
results = results
</DeepExtract>
if self.with_text:
<DeepExtract>
ann = results['ann_info']
tmp_gt_texts = []
tmp_texts = ann.get('texts', [])
cares = ann['cares']
for (i, text) in enumerate(tmp_texts):
if self.filtered:
text = [c for c in text if c in self.character]
text = ''.join(text)
if self.sensitive == 'upper':
text = text.upper()
elif self.sensitive == 'lower':
text = text.lower()
if len(text) > self.text_max_length:
text = text[:self.text_max_length]
if cares[i] == 1:
tmp_gt_texts.append(text)
ann['cares'] = cares
results['gt_texts'] = tmp_gt_texts
if len(results['gt_texts']) == 1:
results['gt_text'] = tmp_gt_texts[0]
results = results
</DeepExtract>
if self.with_bbox:
<DeepExtract>
if ann_idx == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
cares = ann['cares']
tmp_gt_bboxes = ann['bboxes']
gt_bboxes = []
gt_bboxes_ignore = []
for (i, box) in enumerate(tmp_gt_bboxes):
box_i = np.array(box, dtype=np.double)
x_coords = box_i[0::2]
y_coords = box_i[1::2]
aligned_box = [np.min(x_coords), np.min(y_coords), np.max(x_coords), np.max(y_coords)]
if cares[i] == 1:
gt_bboxes.append(aligned_box)
else:
gt_bboxes_ignore.append(aligned_box)
if len(gt_bboxes) == 0:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
if len(gt_bboxes_ignore) == 0:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
if ann_idx == 1:
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes')
results['bbox_fields'].append('gt_bboxes_ignore')
else:
results['gt_bboxes_2'] = gt_bboxes
results['gt_bboxes_ignore_2'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_2')
results['bbox_fields'].append('gt_bboxes_ignore_2')
results = results
</DeepExtract>
if self.with_label:
<DeepExtract>
if ann_idx == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
tmp_labels = ann.get('labels', None)
if tmp_labels is None:
if 'bboxes' in ann:
tmp_labels = [[1]] * len(ann['bboxes'])
else:
tmp_labels = [[1]]
gt_labels = []
gt_labels_ignore = []
cares = ann['cares']
for (i, label) in enumerate(tmp_labels):
if cares[i] == 1:
gt_labels.append(label[0])
else:
gt_labels_ignore.append(label[0])
if len(gt_labels) > 0 and isinstance(gt_labels[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels[i] = classes_config.index(gt_labels[i]) + self.label_start_index
if len(gt_labels_ignore) > 0 and isinstance(gt_labels_ignore[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels_ignore):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels_ignore[i] = classes_config.index(gt_labels_ignore[i]) + self.label_start_index
if ann_idx == 1:
results['gt_labels'] = gt_labels
results['gt_labels_ignore'] = gt_labels_ignore
else:
results['gt_labels_2'] = gt_labels
results['gt_labels_ignore_2'] = gt_labels_ignore
results = results
</DeepExtract>
if self.with_cbbox:
<DeepExtract>
ann = results['ann_info']
cares = ann['cares']
assert 'cbboxes' in ann
tmp_cbboxes = ann.get('cbboxes', [])
tmp_gt_cbboxes = []
tmp_gt_cbboxes_ignore = []
for (i, cbboxes) in enumerate(tmp_cbboxes):
if cares[i] == 1:
tmp_gt_cbboxes.append(cbboxes)
else:
tmp_gt_cbboxes_ignore.append(cbboxes)
results['gt_cbboxes'] = tmp_gt_cbboxes
results['gt_cbboxes_ignore'] = tmp_gt_cbboxes_ignore
results['cbbox_fields'].append('gt_cbboxes')
results['cbbox_fields'].append('gt_cbboxes_ignore')
results = results
</DeepExtract>
if self.with_cattribute:
<DeepExtract>
ann = results['ann_info']
cattributes = ann.get('cattributes', [])
results['gt_cattributes'] = cattributes
results = results
</DeepExtract>
if self.with_ctexts:
<DeepExtract>
ann = results['ann_info']
tmp_texts = ann.get('ctexts', [])
results['gt_ctexts'] = tmp_texts
results = results
</DeepExtract>
if self.custom_classes_2 is not None:
<DeepExtract>
if 2 == 1:
ann = results['ann_info']
custom_classes_list = self.custom_classes
else:
ann = results['ann_info_2']
custom_classes_list = self.custom_classes_2
cares = ann['cares']
tmp_labels = ann.get('labels', None)
for (idx, per_label) in enumerate(tmp_labels):
if per_label[0] in custom_classes_list:
continue
else:
cares[idx] = 0
if 2 == 1:
results['ann_info']['cares'] = cares
else:
results['ann_info_2']['cares'] = cares
results = results
</DeepExtract>
if self.with_bbox_2:
<DeepExtract>
if 2 == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
cares = ann['cares']
tmp_gt_bboxes = ann['bboxes']
gt_bboxes = []
gt_bboxes_ignore = []
for (i, box) in enumerate(tmp_gt_bboxes):
box_i = np.array(box, dtype=np.double)
x_coords = box_i[0::2]
y_coords = box_i[1::2]
aligned_box = [np.min(x_coords), np.min(y_coords), np.max(x_coords), np.max(y_coords)]
if cares[i] == 1:
gt_bboxes.append(aligned_box)
else:
gt_bboxes_ignore.append(aligned_box)
if len(gt_bboxes) == 0:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
if len(gt_bboxes_ignore) == 0:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
if 2 == 1:
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes')
results['bbox_fields'].append('gt_bboxes_ignore')
else:
results['gt_bboxes_2'] = gt_bboxes
results['gt_bboxes_ignore_2'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_2')
results['bbox_fields'].append('gt_bboxes_ignore_2')
results = results
</DeepExtract>
if self.with_poly_mask_2:
<DeepExtract>
(height, width) = (results['img_info']['height'], results['img_info']['width'])
if 2 == 1:
gt_masks = results['ann_info']['segboxes']
else:
cares = results['ann_info_2']['cares']
ori_masks = results['ann_info_2']['segboxes']
gt_masks = []
for idx in range(len(ori_masks)):
if cares[idx] == 1:
gt_masks.append(ori_masks[idx])
if self.poly2mask:
gt_masks = BitmapMasks([self._poly2mask(mask, height, width) for mask in gt_masks], height, width)
else:
gt_masks = PolygonMasks([self.process_polygons(polygons) for polygons in gt_masks], height, width)
if 2 == 1:
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
else:
results['gt_masks_2'] = gt_masks
results['mask_fields'].append('gt_masks_2')
results = results
</DeepExtract>
if self.with_label_2:
<DeepExtract>
if 2 == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
tmp_labels = ann.get('labels', None)
if tmp_labels is None:
if 'bboxes' in ann:
tmp_labels = [[1]] * len(ann['bboxes'])
else:
tmp_labels = [[1]]
gt_labels = []
gt_labels_ignore = []
cares = ann['cares']
for (i, label) in enumerate(tmp_labels):
if cares[i] == 1:
gt_labels.append(label[0])
else:
gt_labels_ignore.append(label[0])
if len(gt_labels) > 0 and isinstance(gt_labels[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels[i] = classes_config.index(gt_labels[i]) + self.label_start_index
if len(gt_labels_ignore) > 0 and isinstance(gt_labels_ignore[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels_ignore):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels_ignore[i] = classes_config.index(gt_labels_ignore[i]) + self.label_start_index
if 2 == 1:
results['gt_labels'] = gt_labels
results['gt_labels_ignore'] = gt_labels_ignore
else:
results['gt_labels_2'] = gt_labels
results['gt_labels_ignore_2'] = gt_labels_ignore
results = results
</DeepExtract>
return results
|
def __call__(self, results):
""" Main process.
Args:
results(dict): Data flow used in DavarCustomDataset.
Returns:
dict: output data flow.
"""
for per_ann in ['ann_info', 'ann_info_2']:
if per_ann in results.keys():
ann = results[per_ann]
bboxes_length = len(ann['bboxes']) if 'bboxes' in ann else 1
if self.with_care:
cares = np.array(ann.get('cares', np.ones(bboxes_length)))
else:
cares = np.ones(bboxes_length)
ann['cares'] = cares
results = results
if self.custom_classes is not None:
if ann_idx == 1:
ann = results['ann_info']
custom_classes_list = self.custom_classes
else:
ann = results['ann_info_2']
custom_classes_list = self.custom_classes_2
cares = ann['cares']
tmp_labels = ann.get('labels', None)
for (idx, per_label) in enumerate(tmp_labels):
if per_label[0] in custom_classes_list:
continue
else:
cares[idx] = 0
if ann_idx == 1:
results['ann_info']['cares'] = cares
else:
results['ann_info_2']['cares'] = cares
results = results
if self.with_text:
ann = results['ann_info']
tmp_gt_texts = []
tmp_texts = ann.get('texts', [])
cares = ann['cares']
for (i, text) in enumerate(tmp_texts):
if self.filtered:
text = [c for c in text if c in self.character]
text = ''.join(text)
if self.sensitive == 'upper':
text = text.upper()
elif self.sensitive == 'lower':
text = text.lower()
if len(text) > self.text_max_length:
text = text[:self.text_max_length]
if cares[i] == 1:
tmp_gt_texts.append(text)
ann['cares'] = cares
results['gt_texts'] = tmp_gt_texts
if len(results['gt_texts']) == 1:
results['gt_text'] = tmp_gt_texts[0]
results = results
if self.with_bbox:
if ann_idx == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
cares = ann['cares']
tmp_gt_bboxes = ann['bboxes']
gt_bboxes = []
gt_bboxes_ignore = []
for (i, box) in enumerate(tmp_gt_bboxes):
box_i = np.array(box, dtype=np.double)
x_coords = box_i[0::2]
y_coords = box_i[1::2]
aligned_box = [np.min(x_coords), np.min(y_coords), np.max(x_coords), np.max(y_coords)]
if cares[i] == 1:
gt_bboxes.append(aligned_box)
else:
gt_bboxes_ignore.append(aligned_box)
if len(gt_bboxes) == 0:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
if len(gt_bboxes_ignore) == 0:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
if ann_idx == 1:
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes')
results['bbox_fields'].append('gt_bboxes_ignore')
else:
results['gt_bboxes_2'] = gt_bboxes
results['gt_bboxes_ignore_2'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_2')
results['bbox_fields'].append('gt_bboxes_ignore_2')
results = results
if self.with_label:
if ann_idx == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
tmp_labels = ann.get('labels', None)
if tmp_labels is None:
if 'bboxes' in ann:
tmp_labels = [[1]] * len(ann['bboxes'])
else:
tmp_labels = [[1]]
gt_labels = []
gt_labels_ignore = []
cares = ann['cares']
for (i, label) in enumerate(tmp_labels):
if cares[i] == 1:
gt_labels.append(label[0])
else:
gt_labels_ignore.append(label[0])
if len(gt_labels) > 0 and isinstance(gt_labels[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels[i] = classes_config.index(gt_labels[i]) + self.label_start_index
if len(gt_labels_ignore) > 0 and isinstance(gt_labels_ignore[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels_ignore):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels_ignore[i] = classes_config.index(gt_labels_ignore[i]) + self.label_start_index
if ann_idx == 1:
results['gt_labels'] = gt_labels
results['gt_labels_ignore'] = gt_labels_ignore
else:
results['gt_labels_2'] = gt_labels
results['gt_labels_ignore_2'] = gt_labels_ignore
results = results
if self.with_cbbox:
ann = results['ann_info']
cares = ann['cares']
assert 'cbboxes' in ann
tmp_cbboxes = ann.get('cbboxes', [])
tmp_gt_cbboxes = []
tmp_gt_cbboxes_ignore = []
for (i, cbboxes) in enumerate(tmp_cbboxes):
if cares[i] == 1:
tmp_gt_cbboxes.append(cbboxes)
else:
tmp_gt_cbboxes_ignore.append(cbboxes)
results['gt_cbboxes'] = tmp_gt_cbboxes
results['gt_cbboxes_ignore'] = tmp_gt_cbboxes_ignore
results['cbbox_fields'].append('gt_cbboxes')
results['cbbox_fields'].append('gt_cbboxes_ignore')
results = results
if self.with_cattribute:
ann = results['ann_info']
cattributes = ann.get('cattributes', [])
results['gt_cattributes'] = cattributes
results = results
if self.with_ctexts:
ann = results['ann_info']
tmp_texts = ann.get('ctexts', [])
results['gt_ctexts'] = tmp_texts
results = results
if self.custom_classes_2 is not None:
if 2 == 1:
ann = results['ann_info']
custom_classes_list = self.custom_classes
else:
ann = results['ann_info_2']
custom_classes_list = self.custom_classes_2
cares = ann['cares']
tmp_labels = ann.get('labels', None)
for (idx, per_label) in enumerate(tmp_labels):
if per_label[0] in custom_classes_list:
continue
else:
cares[idx] = 0
if 2 == 1:
results['ann_info']['cares'] = cares
else:
results['ann_info_2']['cares'] = cares
results = results
if self.with_bbox_2:
if 2 == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
cares = ann['cares']
tmp_gt_bboxes = ann['bboxes']
gt_bboxes = []
gt_bboxes_ignore = []
for (i, box) in enumerate(tmp_gt_bboxes):
box_i = np.array(box, dtype=np.double)
x_coords = box_i[0::2]
y_coords = box_i[1::2]
aligned_box = [np.min(x_coords), np.min(y_coords), np.max(x_coords), np.max(y_coords)]
if cares[i] == 1:
gt_bboxes.append(aligned_box)
else:
gt_bboxes_ignore.append(aligned_box)
if len(gt_bboxes) == 0:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
if len(gt_bboxes_ignore) == 0:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
else:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
if 2 == 1:
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes')
results['bbox_fields'].append('gt_bboxes_ignore')
else:
results['gt_bboxes_2'] = gt_bboxes
results['gt_bboxes_ignore_2'] = gt_bboxes_ignore
results['bbox_fields'].append('gt_bboxes_2')
results['bbox_fields'].append('gt_bboxes_ignore_2')
results = results
if self.with_poly_mask_2:
(height, width) = (results['img_info']['height'], results['img_info']['width'])
if 2 == 1:
gt_masks = results['ann_info']['segboxes']
else:
cares = results['ann_info_2']['cares']
ori_masks = results['ann_info_2']['segboxes']
gt_masks = []
for idx in range(len(ori_masks)):
if cares[idx] == 1:
gt_masks.append(ori_masks[idx])
if self.poly2mask:
gt_masks = BitmapMasks([self._poly2mask(mask, height, width) for mask in gt_masks], height, width)
else:
gt_masks = PolygonMasks([self.process_polygons(polygons) for polygons in gt_masks], height, width)
if 2 == 1:
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
else:
results['gt_masks_2'] = gt_masks
results['mask_fields'].append('gt_masks_2')
results = results
if self.with_label_2:
if 2 == 1:
ann = results['ann_info']
else:
ann = results['ann_info_2']
tmp_labels = ann.get('labels', None)
if tmp_labels is None:
if 'bboxes' in ann:
tmp_labels = [[1]] * len(ann['bboxes'])
else:
tmp_labels = [[1]]
gt_labels = []
gt_labels_ignore = []
cares = ann['cares']
for (i, label) in enumerate(tmp_labels):
if cares[i] == 1:
gt_labels.append(label[0])
else:
gt_labels_ignore.append(label[0])
if len(gt_labels) > 0 and isinstance(gt_labels[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels[i] = classes_config.index(gt_labels[i]) + self.label_start_index
if len(gt_labels_ignore) > 0 and isinstance(gt_labels_ignore[0], str):
assert results['classes_config'] is not None
assert 'classes' in results['classes_config'] or 'classes_0' in results['classes_config']
for (i, _) in enumerate(gt_labels_ignore):
if 'classes_0' in results['classes_config']:
classes_config = results['classes_config']['classes_0']
else:
classes_config = results['classes_config']['classes']
if self.label_start_index == -1:
assert 'NotLabeled' in classes_config or 'NoLabel' in classes_config
if 'NotLabeled' in classes_config:
notlabeled_index = classes_config.index('NotLabeled')
else:
notlabeled_index = classes_config.index('NoLabel')
if notlabeled_index > 0:
(classes_config[notlabeled_index], classes_config[0]) = (classes_config[0], classes_config[notlabeled_index])
gt_labels_ignore[i] = classes_config.index(gt_labels_ignore[i]) + self.label_start_index
if 2 == 1:
results['gt_labels'] = gt_labels
results['gt_labels_ignore'] = gt_labels_ignore
else:
results['gt_labels_2'] = gt_labels
results['gt_labels_ignore_2'] = gt_labels_ignore
results = results
return results
|
DAVAR-Lab-OCR
|
positive
|
def __init__(self, pbounds, random_state=None):
"""
Parameters
----------
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
<DeepExtract>
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
self.random_state = random_state
</DeepExtract>
self._keys = sorted(pbounds)
self._bounds = np.array([item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])], dtype=np.float)
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=0)
self._cache = {}
|
def __init__(self, pbounds, random_state=None):
"""
Parameters
----------
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
self.random_state = random_state
self._keys = sorted(pbounds)
self._bounds = np.array([item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])], dtype=np.float)
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=0)
self._cache = {}
|
AutoTorch
|
positive
|
@pytest.mark.parametrize('stall_count', [0, 1, 2, 4])
def test_tail_filter_small_diff(stall_count: int):
values = list(range(1000, 1030)) + [1029] * stall_count
ds_in = test_helpers.build_default_region_dataset({CommonFields.CASES: values})
(tail_filter, ds_out) = TailFilter.run(ds_in, [CommonFields.CASES])
<DeepExtract>
assert tail_filter.skipped_too_short == skipped_too_short
assert tail_filter.skipped_na_mean == skipped_na_mean
assert tail_filter.all_good == 1
assert tail_filter.truncated == truncated
assert tail_filter.long_truncated == long_truncated
</DeepExtract>
test_helpers.assert_dataset_like(ds_out, ds_in, drop_na_dates=True)
|
@pytest.mark.parametrize('stall_count', [0, 1, 2, 4])
def test_tail_filter_small_diff(stall_count: int):
values = list(range(1000, 1030)) + [1029] * stall_count
ds_in = test_helpers.build_default_region_dataset({CommonFields.CASES: values})
(tail_filter, ds_out) = TailFilter.run(ds_in, [CommonFields.CASES])
assert tail_filter.skipped_too_short == skipped_too_short
assert tail_filter.skipped_na_mean == skipped_na_mean
assert tail_filter.all_good == 1
assert tail_filter.truncated == truncated
assert tail_filter.long_truncated == long_truncated
test_helpers.assert_dataset_like(ds_out, ds_in, drop_na_dates=True)
|
covid-data-model
|
positive
|
def findRankOfMatrix(self, Matrix):
rank = self.C
for row in range(0, rank, 1):
if Matrix[row][row] != 0:
for col in range(0, self.R, 1):
if col != row:
multiplier = Matrix[col][row] / Matrix[row][row]
for i in range(rank):
Matrix[col][i] -= multiplier * Matrix[row][i]
else:
reduce = True
for i in range(row + 1, self.R, 1):
if Matrix[i][row] != 0:
<DeepExtract>
for i in range(rank):
temp = Matrix[row][i]
Matrix[row][i] = Matrix[i][i]
Matrix[i][i] = temp
</DeepExtract>
reduce = False
break
if reduce:
rank -= 1
for i in range(0, self.R, 1):
Matrix[i][row] = Matrix[i][rank]
row -= 1
return rank
|
def findRankOfMatrix(self, Matrix):
rank = self.C
for row in range(0, rank, 1):
if Matrix[row][row] != 0:
for col in range(0, self.R, 1):
if col != row:
multiplier = Matrix[col][row] / Matrix[row][row]
for i in range(rank):
Matrix[col][i] -= multiplier * Matrix[row][i]
else:
reduce = True
for i in range(row + 1, self.R, 1):
if Matrix[i][row] != 0:
for i in range(rank):
temp = Matrix[row][i]
Matrix[row][i] = Matrix[i][i]
Matrix[i][i] = temp
reduce = False
break
if reduce:
rank -= 1
for i in range(0, self.R, 1):
Matrix[i][row] = Matrix[i][rank]
row -= 1
return rank
|
Competitive-Coding-Platforms
|
positive
|
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.excluded_fields = options.get('excluded_fields')
self.base_manager = options.get('base_manager')
to_process = set()
model_strings = options.get('models', []) or args
if model_strings:
for model_pair in self._handle_model_list(*model_strings):
to_process.add(model_pair)
elif options['auto']:
to_process = self._auto_models()
else:
<DeepExtract>
if self.verbosity >= verbosity_level:
self.stdout.write(self.COMMAND_HINT)
</DeepExtract>
<DeepExtract>
if options['minutes']:
stop_date = timezone.now() - timezone.timedelta(minutes=options['minutes'])
else:
stop_date = None
for (model, history_model) in to_process:
m_qs = history_model.objects
if stop_date:
m_qs = m_qs.filter(history_date__gte=stop_date)
found = m_qs.count()
self.log(f'{model} has {found} historical entries', 2)
if not found:
continue
if self.base_manager:
model_query = model._base_manager.all()
else:
model_query = model._default_manager.all()
if stop_date:
model_query = model_query.filter(pk__in=m_qs.values_list(model._meta.pk.name).distinct())
for o in model_query.iterator():
self._process_instance(o, model, stop_date=stop_date, dry_run=options['dry'])
</DeepExtract>
|
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.excluded_fields = options.get('excluded_fields')
self.base_manager = options.get('base_manager')
to_process = set()
model_strings = options.get('models', []) or args
if model_strings:
for model_pair in self._handle_model_list(*model_strings):
to_process.add(model_pair)
elif options['auto']:
to_process = self._auto_models()
else:
if self.verbosity >= verbosity_level:
self.stdout.write(self.COMMAND_HINT)
if options['minutes']:
stop_date = timezone.now() - timezone.timedelta(minutes=options['minutes'])
else:
stop_date = None
for (model, history_model) in to_process:
m_qs = history_model.objects
if stop_date:
m_qs = m_qs.filter(history_date__gte=stop_date)
found = m_qs.count()
self.log(f'{model} has {found} historical entries', 2)
if not found:
continue
if self.base_manager:
model_query = model._base_manager.all()
else:
model_query = model._default_manager.all()
if stop_date:
model_query = model_query.filter(pk__in=m_qs.values_list(model._meta.pk.name).distinct())
for o in model_query.iterator():
self._process_instance(o, model, stop_date=stop_date, dry_run=options['dry'])
</DeepExtract>
|
django-simple-history
|
positive
|
def _validate(self, *args, **kwargs):
"""
Check that flag are valid. Convert flags to booleans. Default flag to
False. Return a list of errors.
"""
errors = super(BooleanField, self)._validate(*args, **kwargs)
self.about_file_path = kwargs.get('about_file_path')
<DeepExtract>
if self.original_value is None or self.original_value == '':
flag = None
if isinstance(self.original_value, bool):
flag = self.original_value
elif isinstance(self.original_value, str):
self.original_value = self.original_value.strip()
if not self.original_value:
flag = None
self.original_value = self.original_value.lower()
if self.original_value in self.flag_values:
if self.original_value in self.true_flags:
flag = u'yes'
else:
flag = u'no'
else:
flag = False
else:
flag = False
</DeepExtract>
if flag is False:
name = self.name
val = self.original_value
about_file_path = self.about_file_path
flag_values = self.flag_values
msg = u'Path: %(about_file_path)s - Field %(name)s: Invalid flag value: %(val)r is not one of: %(flag_values)s' % locals()
errors.append(Error(ERROR, msg))
self.value = None
elif flag is None:
name = self.name
msg = u'Field %(name)s: field is present but empty. ' % locals()
errors.append(Error(INFO, msg))
self.value = None
elif flag == u'yes' or flag is True:
self.value = True
else:
self.value = False
return errors
|
def _validate(self, *args, **kwargs):
"""
Check that flag are valid. Convert flags to booleans. Default flag to
False. Return a list of errors.
"""
errors = super(BooleanField, self)._validate(*args, **kwargs)
self.about_file_path = kwargs.get('about_file_path')
if self.original_value is None or self.original_value == '':
flag = None
if isinstance(self.original_value, bool):
flag = self.original_value
elif isinstance(self.original_value, str):
self.original_value = self.original_value.strip()
if not self.original_value:
flag = None
self.original_value = self.original_value.lower()
if self.original_value in self.flag_values:
if self.original_value in self.true_flags:
flag = u'yes'
else:
flag = u'no'
else:
flag = False
else:
flag = False
if flag is False:
name = self.name
val = self.original_value
about_file_path = self.about_file_path
flag_values = self.flag_values
msg = u'Path: %(about_file_path)s - Field %(name)s: Invalid flag value: %(val)r is not one of: %(flag_values)s' % locals()
errors.append(Error(ERROR, msg))
self.value = None
elif flag is None:
name = self.name
msg = u'Field %(name)s: field is present but empty. ' % locals()
errors.append(Error(INFO, msg))
self.value = None
elif flag == u'yes' or flag is True:
self.value = True
else:
self.value = False
return errors
|
aboutcode-toolkit
|
positive
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
<DeepExtract>
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
<DeepExtract>
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
</DeepExtract>
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
|
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
|
cascade-rcnn-fpn-faster_rcnn-pytorch1.0
|
positive
|
def main(config: configure_finetuning.FinetuningConfig, split, task_name):
<DeepExtract>
answers = {}
with tf.io.gfile.GFile(os.path.join(config.raw_data_dir(task_name), split + '.jsonl'), 'r') as f:
for (i, line) in enumerate(f):
example = json.loads(line)
if i == 0 and 'header' in example:
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
answers = answers
</DeepExtract>
<DeepExtract>
with tf.io.gfile.GFile(config.qa_preds_file(task_name)) as f:
predictions = json.load(f)
predictions = predictions
</DeepExtract>
return evaluate(answers, predictions, True)
|
def main(config: configure_finetuning.FinetuningConfig, split, task_name):
answers = {}
with tf.io.gfile.GFile(os.path.join(config.raw_data_dir(task_name), split + '.jsonl'), 'r') as f:
for (i, line) in enumerate(f):
example = json.loads(line)
if i == 0 and 'header' in example:
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
answers = answers
with tf.io.gfile.GFile(config.qa_preds_file(task_name)) as f:
predictions = json.load(f)
predictions = predictions
return evaluate(answers, predictions, True)
|
arabert
|
positive
|
def get_messages(session: Session, event: Event, uninvited_contacts: Set[str]) -> List[str]:
messages = []
if uninvited_contacts:
messages.append(f"Forgot to invite {', '.join(uninvited_contacts)} maybe?")
<DeepExtract>
all_events_with_same_name = session.query(Event).filter(Event.owner_id == event.owner_id, Event.title == event.title).all()
pattern = check_diffs(event, all_events_with_same_name)
</DeepExtract>
for weeks_diff in pattern:
messages.append(f'Same event happened {weeks_diff} weeks before too. Want to create another one {weeks_diff} after too?')
return messages
|
def get_messages(session: Session, event: Event, uninvited_contacts: Set[str]) -> List[str]:
messages = []
if uninvited_contacts:
messages.append(f"Forgot to invite {', '.join(uninvited_contacts)} maybe?")
all_events_with_same_name = session.query(Event).filter(Event.owner_id == event.owner_id, Event.title == event.title).all()
pattern = check_diffs(event, all_events_with_same_name)
for weeks_diff in pattern:
messages.append(f'Same event happened {weeks_diff} weeks before too. Want to create another one {weeks_diff} after too?')
return messages
|
calendar
|
positive
|
def where_clause(self, table, column, value, param_counter):
<DeepExtract>
if value.startswith('['):
values = json.loads(value)
else:
values = [v.strip() for v in value.split(',')]
</DeepExtract>
params = [f':p{param_counter + i}' for i in range(len(values))]
sql = f"{escape_sqlite(column)} not in ({', '.join(params)})"
return (sql, values)
|
def where_clause(self, table, column, value, param_counter):
if value.startswith('['):
values = json.loads(value)
else:
values = [v.strip() for v in value.split(',')]
params = [f':p{param_counter + i}' for i in range(len(values))]
sql = f"{escape_sqlite(column)} not in ({', '.join(params)})"
return (sql, values)
|
datasette
|
positive
|
def validate_images(framework_directory):
<DeepExtract>
files = [os.path.join(framework_directory, 'universe', 'config.json'), os.path.join(framework_directory, 'universe', 'marathon.json.mustache')]
dist_dir = os.path.join(framework_directory, 'src', 'main', 'dist')
for (dp, dn, filenames) in os.walk(dist_dir):
for file in filenames:
files.append(os.path.join(dp, file))
files = files
</DeepExtract>
for file in files:
with open(file, 'r') as file:
lines = file.readlines()
bad_image = False
for line in lines:
line = line.strip()
if 'image:' in line:
image_matcher = re.compile('image:\\s?(.*)$', re.IGNORECASE)
match = image_matcher.match(line)
image_path = match.group(1)
env_var_matcher = re.compile('\\{\\{[A-Z0-9_]*\\}\\}')
if not env_var_matcher.match(image_path):
print('Bad image found in {}. It is a direct reference instead of a templated reference: {}\n Export images to resource.json to allow packaging for airgapped clusters.'.format(file, image_path))
bad_image = True
return not bad_image
|
def validate_images(framework_directory):
files = [os.path.join(framework_directory, 'universe', 'config.json'), os.path.join(framework_directory, 'universe', 'marathon.json.mustache')]
dist_dir = os.path.join(framework_directory, 'src', 'main', 'dist')
for (dp, dn, filenames) in os.walk(dist_dir):
for file in filenames:
files.append(os.path.join(dp, file))
files = files
for file in files:
with open(file, 'r') as file:
lines = file.readlines()
bad_image = False
for line in lines:
line = line.strip()
if 'image:' in line:
image_matcher = re.compile('image:\\s?(.*)$', re.IGNORECASE)
match = image_matcher.match(line)
image_path = match.group(1)
env_var_matcher = re.compile('\\{\\{[A-Z0-9_]*\\}\\}')
if not env_var_matcher.match(image_path):
print('Bad image found in {}. It is a direct reference instead of a templated reference: {}\n Export images to resource.json to allow packaging for airgapped clusters.'.format(file, image_path))
bad_image = True
return not bad_image
|
dcos-jenkins-service
|
positive
|
def on_success(self, item=None):
<DeepExtract>
params = {'skip_steps': self.endpoint.get_skip_steps(), 'desired_step': self.endpoint.get_desired_step()}
</DeepExtract>
return self.endpoint.wizard.next_step(**params)
|
def on_success(self, item=None):
params = {'skip_steps': self.endpoint.get_skip_steps(), 'desired_step': self.endpoint.get_desired_step()}
return self.endpoint.wizard.next_step(**params)
|
django-hyperadmin
|
positive
|
def __call__(self, func):
""" Decorate a function to start the profiler on function entry and stop
it on function exit.
"""
self.add_function(func)
if is_generator(func):
<DeepExtract>
@functools.wraps(func)
def wrapper(*args, **kwds):
g = func(*args, **kwds)
self.enable_by_count()
try:
item = next(g)
finally:
self.disable_by_count()
input = (yield item)
while True:
self.enable_by_count()
try:
item = g.send(input)
finally:
self.disable_by_count()
input = (yield item)
wrapper = wrapper
</DeepExtract>
else:
<DeepExtract>
@functools.wraps(func)
def wrapper(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
wrapper = result
wrapper = wrapper
</DeepExtract>
return wrapper
|
def __call__(self, func):
""" Decorate a function to start the profiler on function entry and stop
it on function exit.
"""
self.add_function(func)
if is_generator(func):
@functools.wraps(func)
def wrapper(*args, **kwds):
g = func(*args, **kwds)
self.enable_by_count()
try:
item = next(g)
finally:
self.disable_by_count()
input = (yield item)
while True:
self.enable_by_count()
try:
item = g.send(input)
finally:
self.disable_by_count()
input = (yield item)
wrapper = wrapper
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
self.enable_by_count()
try:
result = func(*args, **kwds)
finally:
self.disable_by_count()
wrapper = result
wrapper = wrapper
return wrapper
|
blenderaddons
|
positive
|
def _parse_yara_rule(yara_rule: str) -> Optional[Mapping[str, Any]]:
<DeepExtract>
name = _match_regex(_NAME_REGEX, yara_rule)
</DeepExtract>
if name is None:
log.error('No name for rule: %s', yara_rule)
return None
<DeepExtract>
description = _match_regex(_DESCRIPTION_REGEX, yara_rule)
</DeepExtract>
if description is None:
log.error('No description for rule: %s', yara_rule)
return None
<DeepExtract>
report = _match_regex(_REPORT_REGEX, yara_rule)
</DeepExtract>
if report is None:
log.debug('No report for rule: %s', name)
<DeepExtract>
last_modified = _match_regex(_LAST_MODIFIED_REGEX, yara_rule)
</DeepExtract>
if last_modified is None:
log.debug('No last modified for rule: %s', name)
return {'name': name, 'description': description, 'report': report, 'last_modified': last_modified, 'rule': yara_rule}
|
def _parse_yara_rule(yara_rule: str) -> Optional[Mapping[str, Any]]:
name = _match_regex(_NAME_REGEX, yara_rule)
if name is None:
log.error('No name for rule: %s', yara_rule)
return None
description = _match_regex(_DESCRIPTION_REGEX, yara_rule)
if description is None:
log.error('No description for rule: %s', yara_rule)
return None
report = _match_regex(_REPORT_REGEX, yara_rule)
if report is None:
log.debug('No report for rule: %s', name)
last_modified = _match_regex(_LAST_MODIFIED_REGEX, yara_rule)
if last_modified is None:
log.debug('No last modified for rule: %s', name)
return {'name': name, 'description': description, 'report': report, 'last_modified': last_modified, 'rule': yara_rule}
|
connectors
|
positive
|
def path_details(self, dimension, path, hierarchy=None):
"""Returns details for `path` in `dimension`. Can be used for
multi-dimensional "breadcrumbs" in a used interface.
Number of SQL queries: 1.
"""
dimension = self.cube.dimension(dimension)
hierarchy = dimension.hierarchy(hierarchy)
cut = PointCut(dimension, path, hierarchy=hierarchy)
cell = Cell(self.cube, [cut])
attributes = []
for level in hierarchy.levels[0:len(path)]:
attributes += level.attributes
<DeepExtract>
attributes = attributes or self.cube.all_fact_attributes
refs = [attr.ref for attr in collect_attributes(attributes, cell)]
context_attributes = self.cube.get_attributes(refs)
context = self._create_context(context_attributes)
if True:
selection = [self.star.fact_key_column]
else:
selection = []
names = [attr.ref for attr in attributes]
selection += context.get_columns(names)
cell_condition = context.condition_for_cell(cell)
statement = sql.expression.select(selection, from_obj=context.star, whereclause=cell_condition)
(statement, labels) = (statement, context.get_labels(statement.columns))
</DeepExtract>
statement = statement.limit(1)
<DeepExtract>
self._log_statement(statement, 'path details')
cursor = self.connectable.execute(statement)
</DeepExtract>
row = cursor.fetchone()
if row:
member = dict(zip(labels, row))
else:
member = None
return member
|
def path_details(self, dimension, path, hierarchy=None):
"""Returns details for `path` in `dimension`. Can be used for
multi-dimensional "breadcrumbs" in a used interface.
Number of SQL queries: 1.
"""
dimension = self.cube.dimension(dimension)
hierarchy = dimension.hierarchy(hierarchy)
cut = PointCut(dimension, path, hierarchy=hierarchy)
cell = Cell(self.cube, [cut])
attributes = []
for level in hierarchy.levels[0:len(path)]:
attributes += level.attributes
attributes = attributes or self.cube.all_fact_attributes
refs = [attr.ref for attr in collect_attributes(attributes, cell)]
context_attributes = self.cube.get_attributes(refs)
context = self._create_context(context_attributes)
if True:
selection = [self.star.fact_key_column]
else:
selection = []
names = [attr.ref for attr in attributes]
selection += context.get_columns(names)
cell_condition = context.condition_for_cell(cell)
statement = sql.expression.select(selection, from_obj=context.star, whereclause=cell_condition)
(statement, labels) = (statement, context.get_labels(statement.columns))
statement = statement.limit(1)
self._log_statement(statement, 'path details')
cursor = self.connectable.execute(statement)
row = cursor.fetchone()
if row:
member = dict(zip(labels, row))
else:
member = None
return member
|
cubes
|
positive
|
def plot2dHistGeneralized(self, names_to_x, names_to_y, filename=None):
"""Plot the 2d histogram for all combinations of x and y"""
n_rows = len(names_to_x)
n_cols = len(names_to_y)
if filename:
self.grdevices.png(file='%s_%s_%s_2dhist.png' % (filename, '-'.join(list(names_to_x.keys())), '-'.join(list(names_to_y.keys()))), width=200 * n_rows, height=400 * n_cols)
ro.r.par(mfrow=ro.IntVector([n_rows, n_cols]))
for x_name in names_to_x:
for y_name in names_to_y:
<DeepExtract>
ro_x = ro.FloatVector(names_to_x[x_name])
ro_y = ro.FloatVector(names_to_y[y_name])
if filename:
self.grdevices.png(file='%s.png' % filename, width=self.widths, height=self.heights)
else:
self.grdevices.png(file='2dHist_%s-%s.png' % (x_name, y_name), width=self.widths, height=self.heights)
ro.globalenv['x'] = ro_x
ro.globalenv['y'] = ro_y
ro.r('regr <- lm(y~x)')
intercept = ro.r('int <- regr$coefficients[1]')[0]
beta = ro.r('beta <- regr$coefficients[2]')[0]
r_squared = ro.r('r_squared <- summary(regr)$r.squared')[0]
n_bins = self._getBins(names_to_x[x_name]) * self._getBins(names_to_y[y_name])
rgb_palette = self.grdevices.colorRampPalette(ro.StrVector(['lightyellow', 'orange']), space='rgb')
ro_colors = ro.StrVector(['white'] + list(rgb_palette(20)))
ro.r.hist2d(ro_x, ro_y, xlab=x_name, ylab=y_name, main='Y ~ %.4g + %.4g x r2: %2.4f' % (intercept, beta, r_squared), col=ro_colors, nbins=n_bins)
ro.r('abline(regr, col="black")')
if filename:
self.grdevices.dev_off()
</DeepExtract>
if filename:
self.grdevices.dev_off()
|
def plot2dHistGeneralized(self, names_to_x, names_to_y, filename=None):
"""Plot the 2d histogram for all combinations of x and y"""
n_rows = len(names_to_x)
n_cols = len(names_to_y)
if filename:
self.grdevices.png(file='%s_%s_%s_2dhist.png' % (filename, '-'.join(list(names_to_x.keys())), '-'.join(list(names_to_y.keys()))), width=200 * n_rows, height=400 * n_cols)
ro.r.par(mfrow=ro.IntVector([n_rows, n_cols]))
for x_name in names_to_x:
for y_name in names_to_y:
ro_x = ro.FloatVector(names_to_x[x_name])
ro_y = ro.FloatVector(names_to_y[y_name])
if filename:
self.grdevices.png(file='%s.png' % filename, width=self.widths, height=self.heights)
else:
self.grdevices.png(file='2dHist_%s-%s.png' % (x_name, y_name), width=self.widths, height=self.heights)
ro.globalenv['x'] = ro_x
ro.globalenv['y'] = ro_y
ro.r('regr <- lm(y~x)')
intercept = ro.r('int <- regr$coefficients[1]')[0]
beta = ro.r('beta <- regr$coefficients[2]')[0]
r_squared = ro.r('r_squared <- summary(regr)$r.squared')[0]
n_bins = self._getBins(names_to_x[x_name]) * self._getBins(names_to_y[y_name])
rgb_palette = self.grdevices.colorRampPalette(ro.StrVector(['lightyellow', 'orange']), space='rgb')
ro_colors = ro.StrVector(['white'] + list(rgb_palette(20)))
ro.r.hist2d(ro_x, ro_y, xlab=x_name, ylab=y_name, main='Y ~ %.4g + %.4g x r2: %2.4f' % (intercept, beta, r_squared), col=ro_colors, nbins=n_bins)
ro.r('abline(regr, col="black")')
if filename:
self.grdevices.dev_off()
if filename:
self.grdevices.dev_off()
|
dlatk
|
positive
|
def render(self, context):
<DeepExtract>
cat = self.category.resolve(context)
if isinstance(cat, basestring):
cat = Category.objects.get_by_tree_path(cat)
cat = cat
</DeepExtract>
pos = Position.objects.get_active_position(cat, self.position, self.nofallback)
if pos:
return pos.render(context, self.nodelist, self.box_type)
return ''
|
def render(self, context):
cat = self.category.resolve(context)
if isinstance(cat, basestring):
cat = Category.objects.get_by_tree_path(cat)
cat = cat
pos = Position.objects.get_active_position(cat, self.position, self.nofallback)
if pos:
return pos.render(context, self.nodelist, self.box_type)
return ''
|
ella
|
positive
|
def seek_to(self, position):
"""Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
"""
<DeepExtract>
self.iterator_id = self.session.get_shard_iterator(stream_arn=self.stream_arn, shard_id=self.shard_id, iterator_type='trim_horizon', sequence_number=sequence_number)
self.iterator_type = 'trim_horizon'
self.sequence_number = sequence_number
self.empty_responses = 0
</DeepExtract>
position = int(position.timestamp())
while not self.exhausted and self.empty_responses < CALLS_TO_REACH_HEAD:
<DeepExtract>
if self.exhausted:
records = []
if self.empty_responses >= CALLS_TO_REACH_HEAD:
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
while self.empty_responses < CALLS_TO_REACH_HEAD and (not self.exhausted):
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
if records:
records = records
records = []
</DeepExtract>
if records and records[-1]['meta']['created_at'].timestamp() >= position:
for (offset, record) in enumerate(reversed(records)):
if record['meta']['created_at'].timestamp() < position:
index = len(records) - offset
return records[index:]
return records
return []
|
def seek_to(self, position):
"""Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time.
Returns the first records at or past ``position``. If the list is empty,
the seek failed to find records, either because the Shard is exhausted or it
reached the HEAD of an open Shard.
:param position: The position in time to move to.
:type position: :class:`~datetime.datetime`
:returns: A list of the first records found after ``position``. May be empty.
"""
self.iterator_id = self.session.get_shard_iterator(stream_arn=self.stream_arn, shard_id=self.shard_id, iterator_type='trim_horizon', sequence_number=sequence_number)
self.iterator_type = 'trim_horizon'
self.sequence_number = sequence_number
self.empty_responses = 0
position = int(position.timestamp())
while not self.exhausted and self.empty_responses < CALLS_TO_REACH_HEAD:
if self.exhausted:
records = []
if self.empty_responses >= CALLS_TO_REACH_HEAD:
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
while self.empty_responses < CALLS_TO_REACH_HEAD and (not self.exhausted):
records = self._apply_get_records_response(self.session.get_stream_records(self.iterator_id))
if records:
records = records
records = []
if records and records[-1]['meta']['created_at'].timestamp() >= position:
for (offset, record) in enumerate(reversed(records)):
if record['meta']['created_at'].timestamp() < position:
index = len(records) - offset
return records[index:]
return records
return []
|
bloop
|
positive
|
def stop_step(self, uuid=None):
<DeepExtract>
uuid = uuid or self._last_item_uuid(item_type=TestStepResult)
step = self._items.pop(uuid, None)
</DeepExtract>
if step and (not step.stop):
step.stop = now()
|
def stop_step(self, uuid=None):
uuid = uuid or self._last_item_uuid(item_type=TestStepResult)
step = self._items.pop(uuid, None)
if step and (not step.stop):
step.stop = now()
|
allure-python
|
positive
|
def action(self, localctx: RuleContext, ruleIndex: int, actionIndex: int):
if self._actions is None:
actions = dict()
actions[2] = self.NEWLINE_action
actions[77] = self.OPEN_PAREN_action
actions[78] = self.CLOSE_PAREN_action
actions[79] = self.OPEN_BRACK_action
actions[80] = self.CLOSE_BRACK_action
actions[81] = self.OPEN_BRACE_action
actions[82] = self.CLOSE_BRACE_action
self._actions = actions
action = self._actions.get(ruleIndex, None)
if action is not None:
<DeepExtract>
if self._actions is None:
actions = dict()
actions[2] = self.NEWLINE_action
actions[77] = self.OPEN_PAREN_action
actions[78] = self.CLOSE_PAREN_action
actions[79] = self.OPEN_BRACK_action
actions[80] = self.CLOSE_BRACK_action
actions[81] = self.OPEN_BRACE_action
actions[82] = self.CLOSE_BRACE_action
self._actions = actions
action = self._actions.get(actionIndex, None)
if action is not None:
action(localctx, actionIndex)
else:
raise Exception('No registered action for:' + str(actionIndex))
</DeepExtract>
else:
raise Exception('No registered action for:' + str(ruleIndex))
|
def action(self, localctx: RuleContext, ruleIndex: int, actionIndex: int):
if self._actions is None:
actions = dict()
actions[2] = self.NEWLINE_action
actions[77] = self.OPEN_PAREN_action
actions[78] = self.CLOSE_PAREN_action
actions[79] = self.OPEN_BRACK_action
actions[80] = self.CLOSE_BRACK_action
actions[81] = self.OPEN_BRACE_action
actions[82] = self.CLOSE_BRACE_action
self._actions = actions
action = self._actions.get(ruleIndex, None)
if action is not None:
if self._actions is None:
actions = dict()
actions[2] = self.NEWLINE_action
actions[77] = self.OPEN_PAREN_action
actions[78] = self.CLOSE_PAREN_action
actions[79] = self.OPEN_BRACK_action
actions[80] = self.CLOSE_BRACK_action
actions[81] = self.OPEN_BRACE_action
actions[82] = self.CLOSE_BRACE_action
self._actions = actions
action = self._actions.get(actionIndex, None)
if action is not None:
action(localctx, actionIndex)
else:
raise Exception('No registered action for:' + str(actionIndex))
else:
raise Exception('No registered action for:' + str(ruleIndex))
|
Basis
|
positive
|
def test_execute_delete_param(tmp_path: pathlib.Path) -> None:
"""Test execute delete param."""
<DeepExtract>
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
</DeepExtract>
section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json'
<DeepExtract>
rows = []
csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv')
with open(csv_path, 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
rows.append(row)
rows = rows
</DeepExtract>
row = rows[3]
assert row[13] == 'allowed_admins_per_account'
row[13] = ''
row[14] = ''
row[15] = ''
row[16] = ''
with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader:
mock_csv_reader.return_value = rows
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.SUCCESS
fp = pathlib.Path(tmp_path) / 'component-definition.json'
cd = ComponentDefinition.oscal_read(fp)
component = cd.components[0]
assert len(component.props) == 59
assert component.props[22].name == 'Parameter_Id'
assert component.props[22].ns == 'http://abc.github.io/compliance-trestle/schemas/oscal/cd'
assert component.props[22].value == 'api_keys_rotated_days'
assert component.props[22].class_ == 'scc_class'
assert component.props[22].remarks == 'rule_set_04'
|
def test_execute_delete_param(tmp_path: pathlib.Path) -> None:
"""Test execute delete param."""
_test_init(tmp_path)
(config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config')
section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json'
rows = []
csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv')
with open(csv_path, 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in csv_reader:
rows.append(row)
rows = rows
row = rows[3]
assert row[13] == 'allowed_admins_per_account'
row[13] = ''
row[14] = ''
row[15] = ''
row[16] = ''
with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader:
mock_csv_reader.return_value = rows
tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section)
retval = tgt.execute()
assert retval == TaskOutcome.SUCCESS
fp = pathlib.Path(tmp_path) / 'component-definition.json'
cd = ComponentDefinition.oscal_read(fp)
component = cd.components[0]
assert len(component.props) == 59
assert component.props[22].name == 'Parameter_Id'
assert component.props[22].ns == 'http://abc.github.io/compliance-trestle/schemas/oscal/cd'
assert component.props[22].value == 'api_keys_rotated_days'
assert component.props[22].class_ == 'scc_class'
assert component.props[22].remarks == 'rule_set_04'
|
compliance-trestle
|
positive
|
def test_ba2e(self):
"""BA2E Implement GreedyMotifSearch with Pseudocounts"""
<DeepExtract>
def count_occurrences_of_bases(motifs):
"""
Create an array containing the count of occurences of
each base at each position, summed over all motifs
"""
matrix = np.ones((len(bases), 3), dtype=int) if True else np.zeros((len(bases), 3), dtype=int)
for kmer in motifs:
for j in range(3):
i = bases.find(kmer[j])
matrix[i, j] += 1
motifs = matrix
def profile(motifs):
motifs = count_occurrences_of_bases(motifs) / float(len(motifs))
def score(motifs):
matrix = count_occurrences_of_bases(motifs)
total = 0
for j in range(3):
m = 0
for i in range(len(bases)):
if m < matrix[i, j]:
m = matrix[i, j]
total += len(bases) - m
motifs = total
bestMotifs = [genome[0:3] for genome in ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG']]
for motif in [['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][0][i:i + 3] for i in range(len(['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][0]) - 3 + 1)]:
motifs = [motif]
for i in range(1, 5):
motifs.append(mostProbable(['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][i], 3, profile(motifs)))
if score(motifs) < score(bestMotifs):
bestMotifs = motifs
motifs = bestMotifs
</DeepExtract>
self.assertEqual(['ATC', 'ATC', 'TTC', 'TTC', 'TTC'], sorted(motifs))
|
def test_ba2e(self):
"""BA2E Implement GreedyMotifSearch with Pseudocounts"""
def count_occurrences_of_bases(motifs):
"""
Create an array containing the count of occurences of
each base at each position, summed over all motifs
"""
matrix = np.ones((len(bases), 3), dtype=int) if True else np.zeros((len(bases), 3), dtype=int)
for kmer in motifs:
for j in range(3):
i = bases.find(kmer[j])
matrix[i, j] += 1
motifs = matrix
def profile(motifs):
motifs = count_occurrences_of_bases(motifs) / float(len(motifs))
def score(motifs):
matrix = count_occurrences_of_bases(motifs)
total = 0
for j in range(3):
m = 0
for i in range(len(bases)):
if m < matrix[i, j]:
m = matrix[i, j]
total += len(bases) - m
motifs = total
bestMotifs = [genome[0:3] for genome in ['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG']]
for motif in [['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][0][i:i + 3] for i in range(len(['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][0]) - 3 + 1)]:
motifs = [motif]
for i in range(1, 5):
motifs.append(mostProbable(['GGCGTTCAGGCA', 'AAGAATCAGTCA', 'CAAGGAGTTCGC', 'CACGTCAATCAC', 'CAATAATATTCG'][i], 3, profile(motifs)))
if score(motifs) < score(bestMotifs):
bestMotifs = motifs
motifs = bestMotifs
self.assertEqual(['ATC', 'ATC', 'TTC', 'TTC', 'TTC'], sorted(motifs))
|
bioinformatics
|
positive
|
def check_if_remote_is_newer(self, remote, local, headers):
"""
Given a remote file location, and the corresponding local file
this will check the datetime stamp on the files to see if the remote
one is newer.
This is a convenience method to be used so that we don't have to
re-fetch files that we already have saved locally
:param remote: URL of file to fetch from remote server
:param local: pathname to save file to locally
:return: True if the remote file is newer and should be downloaded
"""
is_remote_newer = False
LOG.info('Checking if remote file is newer than local \n(%s)', local)
if os.path.exists(local):
LOG.info('Local File exists as %s', local)
else:
LOG.info('Local File does NOT exist as %s', local)
return True
if headers is None:
<DeepExtract>
headers = {'User-Agent': USER_AGENT}
</DeepExtract>
req = urllib.request.Request(remote, headers=headers)
LOG.info('Request header for %s \n\tis: %s', remote, str(req.header_items()))
try:
response = urllib.request.urlopen(req)
except urllib.error.URLError as err:
resp_headers = None
size = 0
last_modified = None
LOG.error('%s\n\tFor: %s', err, remote)
is_remote_newer = None
if is_remote_newer is not None:
resp_headers = response.info()
size = resp_headers.get('Content-Length')
last_modified = resp_headers.get('Last-Modified')
if size is not None and size != '':
size = int(size)
else:
size = 0
fstat = os.stat(local)
LOG.info('Local File date: %s', datetime.utcfromtimestamp(fstat[ST_CTIME]))
if last_modified is not None:
dt_obj = datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
self.remote_file_timestamps[remote] = dt_obj
if dt_obj > datetime.utcfromtimestamp(fstat[ST_CTIME]):
if fstat[ST_SIZE] < size:
LOG.info('New Remote File exists')
is_remote_newer = True
if fstat[ST_SIZE] > size:
LOG.warning('New Remote File exists but it is SMALLER')
is_remote_newer = True
LOG.info('New Remote File has same filesize--will not download')
elif fstat[ST_SIZE] != size:
LOG.info('Remote File is %i \t Local File is %i', size, fstat[ST_SIZE])
is_remote_newer = True
response.close()
return is_remote_newer
|
def check_if_remote_is_newer(self, remote, local, headers):
"""
Given a remote file location, and the corresponding local file
this will check the datetime stamp on the files to see if the remote
one is newer.
This is a convenience method to be used so that we don't have to
re-fetch files that we already have saved locally
:param remote: URL of file to fetch from remote server
:param local: pathname to save file to locally
:return: True if the remote file is newer and should be downloaded
"""
is_remote_newer = False
LOG.info('Checking if remote file is newer than local \n(%s)', local)
if os.path.exists(local):
LOG.info('Local File exists as %s', local)
else:
LOG.info('Local File does NOT exist as %s', local)
return True
if headers is None:
headers = {'User-Agent': USER_AGENT}
req = urllib.request.Request(remote, headers=headers)
LOG.info('Request header for %s \n\tis: %s', remote, str(req.header_items()))
try:
response = urllib.request.urlopen(req)
except urllib.error.URLError as err:
resp_headers = None
size = 0
last_modified = None
LOG.error('%s\n\tFor: %s', err, remote)
is_remote_newer = None
if is_remote_newer is not None:
resp_headers = response.info()
size = resp_headers.get('Content-Length')
last_modified = resp_headers.get('Last-Modified')
if size is not None and size != '':
size = int(size)
else:
size = 0
fstat = os.stat(local)
LOG.info('Local File date: %s', datetime.utcfromtimestamp(fstat[ST_CTIME]))
if last_modified is not None:
dt_obj = datetime.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
self.remote_file_timestamps[remote] = dt_obj
if dt_obj > datetime.utcfromtimestamp(fstat[ST_CTIME]):
if fstat[ST_SIZE] < size:
LOG.info('New Remote File exists')
is_remote_newer = True
if fstat[ST_SIZE] > size:
LOG.warning('New Remote File exists but it is SMALLER')
is_remote_newer = True
LOG.info('New Remote File has same filesize--will not download')
elif fstat[ST_SIZE] != size:
LOG.info('Remote File is %i \t Local File is %i', size, fstat[ST_SIZE])
is_remote_newer = True
response.close()
return is_remote_newer
|
dipper
|
positive
|
def _pretty_report(self, register):
<DeepExtract>
self._hid.write(b'\x00' + register + b'\x00' * (64 - len(register)))
time.sleep(MCP2221_HID_DELAY)
if response:
register = self._hid.read(64)
register = None
</DeepExtract>
print(' 0 1 2 3 4 5 6 7 8 9')
index = 0
for row in range(7):
print('{} : '.format(row), end='')
for _ in range(10):
print('{:02x} '.format(report[index]), end='')
index += 1
if index > 63:
break
print()
|
def _pretty_report(self, register):
self._hid.write(b'\x00' + register + b'\x00' * (64 - len(register)))
time.sleep(MCP2221_HID_DELAY)
if response:
register = self._hid.read(64)
register = None
print(' 0 1 2 3 4 5 6 7 8 9')
index = 0
for row in range(7):
print('{} : '.format(row), end='')
for _ in range(10):
print('{:02x} '.format(report[index]), end='')
index += 1
if index > 63:
break
print()
|
Adafruit_Blinka
|
positive
|
def main():
parser = argparse.ArgumentParser(description='Put a description of your script here')
parser.add_argument('-il', '--input_left', type=str, required=True, help='Input left reads file')
parser.add_argument('-ir', '--input_right', type=str, required=True, help='Input right reads file')
parser.add_argument('-ol', '--output_left', type=str, required=True, help='Output left reads file')
parser.add_argument('-or', '--output_right', type=str, required=True, help='Output right reads file')
parser.add_argument('-os', '--output_singleton', type=str, required=True, help='Output singletons reads file')
args = parser.parse_args()
line_count = 0
current_left_header = None
current_left_cols = list()
current_right_header = None
current_left_cols = list()
global total_reads_read
global left_reads
global right_reads
lifh = open(args.input_left)
rifh = open(args.input_right)
lofh = open(args.output_left, 'wt')
rofh = open(args.output_right, 'wt')
sofh = open(args.output_singleton, 'wt')
for (left_line, right_line) in zip(lifh, rifh):
line_count += 1
if line_count % 4 == 1:
total_reads_read += 2
if current_left_header is not None:
<DeepExtract>
global left_reads
global right_reads
left_reads[current_left_header] = current_left_cols
right_reads[current_right_header] = current_right_cols
if current_left_header in right_reads:
export_reads(current_left_header, current_left_cols, current_left_header, right_reads[current_left_header], lofh, rofh)
del left_reads[current_left_header]
del right_reads[current_left_header]
elif current_right_header in left_reads:
export_reads(current_right_header, left_reads[current_right_header], current_right_header, current_right_cols, lofh, rofh)
del left_reads[current_right_header]
del right_reads[current_right_header]
</DeepExtract>
current_left_header = left_line.lstrip('@').rstrip()
current_left_cols = list()
m = re.match('(.+)\\/1', current_left_header)
if m:
current_left_header = m.group(1)
current_right_header = right_line.lstrip('@').rstrip()
current_right_cols = list()
m = re.match('(.+)\\/1', current_right_header)
if m:
current_right_header = m.group(1)
else:
current_left_cols.append(left_line)
current_right_cols.append(right_line)
if current_left_header is not None:
<DeepExtract>
global left_reads
global right_reads
left_reads[current_left_header] = current_left_cols
right_reads[current_right_header] = current_right_cols
if current_left_header in right_reads:
export_reads(current_left_header, current_left_cols, current_left_header, right_reads[current_left_header], lofh, rofh)
del left_reads[current_left_header]
del right_reads[current_left_header]
elif current_right_header in left_reads:
export_reads(current_right_header, left_reads[current_right_header], current_right_header, current_right_cols, lofh, rofh)
del left_reads[current_right_header]
del right_reads[current_right_header]
</DeepExtract>
for lid in left_reads:
if lid in right_reads:
<DeepExtract>
global left_reads_written
global right_reads_written
lofh.write('@{0}/1\n'.format(lid))
lofh.write(''.join(left_reads[lid]))
left_reads_written += 1
rofh.write('@{0}/2\n'.format(lid))
rofh.write(''.join(right_reads[lid]))
right_reads_written += 1
</DeepExtract>
del right_reads[lid]
else:
<DeepExtract>
global singletons_written
sofh.write('@{0}{1}\n'.format(lid, '/1'))
sofh.write(''.join(left_reads[lid]))
singletons_written += 1
</DeepExtract>
for rid in right_reads:
<DeepExtract>
global singletons_written
sofh.write('@{0}{1}\n'.format(rid, '/2'))
sofh.write(''.join(right_reads[rid]))
singletons_written += 1
</DeepExtract>
print('DEBUG: total reads read: {0}'.format(total_reads_read))
print('DEBUG: left reads written: {0}'.format(left_reads_written))
print('DEBUG: right reads written: {0}'.format(right_reads_written))
print('DEBUG: singletons written: {0}'.format(singletons_written))
|
def main():
parser = argparse.ArgumentParser(description='Put a description of your script here')
parser.add_argument('-il', '--input_left', type=str, required=True, help='Input left reads file')
parser.add_argument('-ir', '--input_right', type=str, required=True, help='Input right reads file')
parser.add_argument('-ol', '--output_left', type=str, required=True, help='Output left reads file')
parser.add_argument('-or', '--output_right', type=str, required=True, help='Output right reads file')
parser.add_argument('-os', '--output_singleton', type=str, required=True, help='Output singletons reads file')
args = parser.parse_args()
line_count = 0
current_left_header = None
current_left_cols = list()
current_right_header = None
current_left_cols = list()
global total_reads_read
global left_reads
global right_reads
lifh = open(args.input_left)
rifh = open(args.input_right)
lofh = open(args.output_left, 'wt')
rofh = open(args.output_right, 'wt')
sofh = open(args.output_singleton, 'wt')
for (left_line, right_line) in zip(lifh, rifh):
line_count += 1
if line_count % 4 == 1:
total_reads_read += 2
if current_left_header is not None:
global left_reads
global right_reads
left_reads[current_left_header] = current_left_cols
right_reads[current_right_header] = current_right_cols
if current_left_header in right_reads:
export_reads(current_left_header, current_left_cols, current_left_header, right_reads[current_left_header], lofh, rofh)
del left_reads[current_left_header]
del right_reads[current_left_header]
elif current_right_header in left_reads:
export_reads(current_right_header, left_reads[current_right_header], current_right_header, current_right_cols, lofh, rofh)
del left_reads[current_right_header]
del right_reads[current_right_header]
current_left_header = left_line.lstrip('@').rstrip()
current_left_cols = list()
m = re.match('(.+)\\/1', current_left_header)
if m:
current_left_header = m.group(1)
current_right_header = right_line.lstrip('@').rstrip()
current_right_cols = list()
m = re.match('(.+)\\/1', current_right_header)
if m:
current_right_header = m.group(1)
else:
current_left_cols.append(left_line)
current_right_cols.append(right_line)
if current_left_header is not None:
global left_reads
global right_reads
left_reads[current_left_header] = current_left_cols
right_reads[current_right_header] = current_right_cols
if current_left_header in right_reads:
export_reads(current_left_header, current_left_cols, current_left_header, right_reads[current_left_header], lofh, rofh)
del left_reads[current_left_header]
del right_reads[current_left_header]
elif current_right_header in left_reads:
export_reads(current_right_header, left_reads[current_right_header], current_right_header, current_right_cols, lofh, rofh)
del left_reads[current_right_header]
del right_reads[current_right_header]
for lid in left_reads:
if lid in right_reads:
global left_reads_written
global right_reads_written
lofh.write('@{0}/1\n'.format(lid))
lofh.write(''.join(left_reads[lid]))
left_reads_written += 1
rofh.write('@{0}/2\n'.format(lid))
rofh.write(''.join(right_reads[lid]))
right_reads_written += 1
del right_reads[lid]
else:
global singletons_written
sofh.write('@{0}{1}\n'.format(lid, '/1'))
sofh.write(''.join(left_reads[lid]))
singletons_written += 1
for rid in right_reads:
global singletons_written
sofh.write('@{0}{1}\n'.format(rid, '/2'))
sofh.write(''.join(right_reads[rid]))
singletons_written += 1
print('DEBUG: total reads read: {0}'.format(total_reads_read))
print('DEBUG: left reads written: {0}'.format(left_reads_written))
print('DEBUG: right reads written: {0}'.format(right_reads_written))
print('DEBUG: singletons written: {0}'.format(singletons_written))
|
biocode
|
positive
|
def _find_gen_dir():
"""Create, if needed, and return a directory where automatically
generated modules will be created.
Usually, this is the directory 'Lib/site-packages/comtypes/gen'.
If the above directory cannot be created, or if it is not a
directory in the file system (when comtypes is imported from a
zip-archive or a zipped egg), or if the current user cannot create
files in this directory, an additional directory is created and
appended to comtypes.gen.__path__ .
For a Python script using comtypes, the additional directory is
'%APPDATA%\\<username>\\Python\\Python25\\comtypes_cache'.
For an executable frozen with py2exe, the additional directory is
'%TEMP%\\comtypes_cache\\<imagebasename>-25'.
"""
<DeepExtract>
try:
import comtypes.gen
logger.info('Imported existing %s', comtypes.gen)
except ImportError:
import comtypes
logger.info('Could not import comtypes.gen, trying to create it.')
try:
comtypes_path = os.path.abspath(os.path.join(comtypes.__path__[0], 'gen'))
if not os.path.isdir(comtypes_path):
os.mkdir(comtypes_path)
logger.info("Created comtypes.gen directory: '%s'", comtypes_path)
comtypes_init = os.path.join(comtypes_path, '__init__.py')
if not os.path.exists(comtypes_init):
logger.info("Writing __init__.py file: '%s'", comtypes_init)
ofi = open(comtypes_init, 'w')
ofi.write('# comtypes.gen package, directory for generated files.\n')
ofi.close()
except (OSError, IOError) as details:
logger.info('Creating comtypes.gen package failed: %s', details)
module = sys.modules['comtypes.gen'] = types.ModuleType('comtypes.gen')
comtypes.gen = module
comtypes.gen.__path__ = []
logger.info('Created a memory-only package.')
</DeepExtract>
from comtypes import gen
<DeepExtract>
gen_path = list(gen.__path__)
</DeepExtract>
if not _is_writeable(gen_path):
ftype = getattr(sys, 'frozen', None)
version_str = '%d%d' % sys.version_info[:2]
if ftype == None:
subdir = 'Python\\Python%s\\comtypes_cache' % version_str
<DeepExtract>
path = ctypes.create_unicode_buffer(MAX_PATH)
SHGetSpecialFolderPath(0, path, CSIDL_APPDATA, True)
basedir = path.value
</DeepExtract>
elif ftype == 'dll':
<DeepExtract>
path = ctypes.create_unicode_buffer(MAX_PATH)
if GetModuleFileName(sys.frozendllhandle, path, MAX_PATH):
path = path.value
raise ctypes.WinError()
</DeepExtract>
base = os.path.splitext(os.path.basename(path))[0]
subdir = 'comtypes_cache\\%s-%s' % (base, version_str)
basedir = tempfile.gettempdir()
else:
base = os.path.splitext(os.path.basename(sys.executable))[0]
subdir = 'comtypes_cache\\%s-%s' % (base, version_str)
basedir = tempfile.gettempdir()
gen_dir = os.path.join(basedir, subdir)
if not os.path.exists(gen_dir):
logger.info("Creating writeable comtypes cache directory: '%s'", gen_dir)
os.makedirs(gen_dir)
gen_path.append(gen_dir)
result = os.path.abspath(gen_path[-1])
logger.info("Using writeable comtypes cache directory: '%s'", result)
return result
|
def _find_gen_dir():
"""Create, if needed, and return a directory where automatically
generated modules will be created.
Usually, this is the directory 'Lib/site-packages/comtypes/gen'.
If the above directory cannot be created, or if it is not a
directory in the file system (when comtypes is imported from a
zip-archive or a zipped egg), or if the current user cannot create
files in this directory, an additional directory is created and
appended to comtypes.gen.__path__ .
For a Python script using comtypes, the additional directory is
'%APPDATA%\\<username>\\Python\\Python25\\comtypes_cache'.
For an executable frozen with py2exe, the additional directory is
'%TEMP%\\comtypes_cache\\<imagebasename>-25'.
"""
try:
import comtypes.gen
logger.info('Imported existing %s', comtypes.gen)
except ImportError:
import comtypes
logger.info('Could not import comtypes.gen, trying to create it.')
try:
comtypes_path = os.path.abspath(os.path.join(comtypes.__path__[0], 'gen'))
if not os.path.isdir(comtypes_path):
os.mkdir(comtypes_path)
logger.info("Created comtypes.gen directory: '%s'", comtypes_path)
comtypes_init = os.path.join(comtypes_path, '__init__.py')
if not os.path.exists(comtypes_init):
logger.info("Writing __init__.py file: '%s'", comtypes_init)
ofi = open(comtypes_init, 'w')
ofi.write('# comtypes.gen package, directory for generated files.\n')
ofi.close()
except (OSError, IOError) as details:
logger.info('Creating comtypes.gen package failed: %s', details)
module = sys.modules['comtypes.gen'] = types.ModuleType('comtypes.gen')
comtypes.gen = module
comtypes.gen.__path__ = []
logger.info('Created a memory-only package.')
from comtypes import gen
gen_path = list(gen.__path__)
if not _is_writeable(gen_path):
ftype = getattr(sys, 'frozen', None)
version_str = '%d%d' % sys.version_info[:2]
if ftype == None:
subdir = 'Python\\Python%s\\comtypes_cache' % version_str
path = ctypes.create_unicode_buffer(MAX_PATH)
SHGetSpecialFolderPath(0, path, CSIDL_APPDATA, True)
basedir = path.value
elif ftype == 'dll':
path = ctypes.create_unicode_buffer(MAX_PATH)
if GetModuleFileName(sys.frozendllhandle, path, MAX_PATH):
path = path.value
raise ctypes.WinError()
base = os.path.splitext(os.path.basename(path))[0]
subdir = 'comtypes_cache\\%s-%s' % (base, version_str)
basedir = tempfile.gettempdir()
else:
base = os.path.splitext(os.path.basename(sys.executable))[0]
subdir = 'comtypes_cache\\%s-%s' % (base, version_str)
basedir = tempfile.gettempdir()
gen_dir = os.path.join(basedir, subdir)
if not os.path.exists(gen_dir):
logger.info("Creating writeable comtypes cache directory: '%s'", gen_dir)
os.makedirs(gen_dir)
gen_path.append(gen_dir)
result = os.path.abspath(gen_path[-1])
logger.info("Using writeable comtypes cache directory: '%s'", result)
return result
|
comtypes
|
positive
|
def _wrapper(wrapped, instance, args, kwargs):
"""
General wrapper for AsyncHTTPClient instrumentation.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:return: None
"""
print_debug('AsyncHTTPClient init')
try:
<DeepExtract>
if not isinstance(*args, HTTPRequest):
*args = HTTPRequest(*args, **kwargs)
(*args, raise_error) = (*args, raise_error)
</DeepExtract>
except Exception:
return wrapped(*args, **kwargs)
print_debug('AsyncHTTPClient setting header')
trace_header = get_epsagon_http_trace_id()
if isinstance(request.headers, HTTPHeaders):
if not request.headers.get(EPSAGON_HEADER):
request.headers.add(EPSAGON_HEADER, trace_header)
elif isinstance(request.headers, dict):
if EPSAGON_HEADER not in request.headers:
request.headers[EPSAGON_HEADER] = trace_header
print_debug('AsyncHTTPClient running wrapper')
return wrapper(TornadoClientEventFactory, wrapped, instance, (request,), {'raise_error': raise_error})
|
def _wrapper(wrapped, instance, args, kwargs):
"""
General wrapper for AsyncHTTPClient instrumentation.
:param wrapped: wrapt's wrapped
:param instance: wrapt's instance
:param args: wrapt's args
:param kwargs: wrapt's kwargs
:return: None
"""
print_debug('AsyncHTTPClient init')
try:
if not isinstance(*args, HTTPRequest):
*args = HTTPRequest(*args, **kwargs)
(*args, raise_error) = (*args, raise_error)
except Exception:
return wrapped(*args, **kwargs)
print_debug('AsyncHTTPClient setting header')
trace_header = get_epsagon_http_trace_id()
if isinstance(request.headers, HTTPHeaders):
if not request.headers.get(EPSAGON_HEADER):
request.headers.add(EPSAGON_HEADER, trace_header)
elif isinstance(request.headers, dict):
if EPSAGON_HEADER not in request.headers:
request.headers[EPSAGON_HEADER] = trace_header
print_debug('AsyncHTTPClient running wrapper')
return wrapper(TornadoClientEventFactory, wrapped, instance, (request,), {'raise_error': raise_error})
|
epsagon-python
|
positive
|
def band_label(self, name_alias):
<DeepExtract>
if name_alias in self._idx:
canonical_name = self._idx[name_alias]
raise ConfigException(f'Unknown band name/alias: {name_alias} in layer {self.product.name}')
</DeepExtract>
return self.read_local_metadata(canonical_name)
|
def band_label(self, name_alias):
if name_alias in self._idx:
canonical_name = self._idx[name_alias]
raise ConfigException(f'Unknown band name/alias: {name_alias} in layer {self.product.name}')
return self.read_local_metadata(canonical_name)
|
datacube-ows
|
positive
|
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.big_square = Square(**self.big_square_kwargs)
self.alpha_1 = TRIANGLE_SIDE_PROPORTION / 4
<DeepExtract>
alpha_1 = self.alpha_1
self.proportion_points = [i * 0.25 + alpha_1 for i in range(4)]
</DeepExtract>
self.inside_point = np.array([self.big_square.point_from_proportion(self.proportion_points[0])[0], self.big_square.point_from_proportion(self.proportion_points[-1])[1], 0])
<DeepExtract>
bs = self.big_square
pp = self.proportion_points
self.triangles = VGroup(*[Polygon(bs.point_from_proportion(pp[i % 4]), bs.point_from_proportion((i + 1) * 0.25), bs.point_from_proportion(pp[(i + 1) % 4]), **self.triangle_kwargs) for i in range(4)])
</DeepExtract>
<DeepExtract>
bs = self.big_square
med_square = Square(side_length=TRIANGLE_SIDE_PROPORTION * BIG_SQUARE_SIDE_LENGTH, **self.small_squares_kwargs)
med_square.align_to(bs, UR)
self.med_square = med_square
</DeepExtract>
<DeepExtract>
bs = self.big_square
small_square = Square(side_length=(1 - TRIANGLE_SIDE_PROPORTION) * BIG_SQUARE_SIDE_LENGTH, **self.small_squares_kwargs)
small_square.align_to(bs, DL)
self.small_square = small_square
</DeepExtract>
self.add(self.big_square, self.triangles, self.med_square, self.small_square)
|
def __init__(self, **kwargs):
VGroup.__init__(self, **kwargs)
self.big_square = Square(**self.big_square_kwargs)
self.alpha_1 = TRIANGLE_SIDE_PROPORTION / 4
alpha_1 = self.alpha_1
self.proportion_points = [i * 0.25 + alpha_1 for i in range(4)]
self.inside_point = np.array([self.big_square.point_from_proportion(self.proportion_points[0])[0], self.big_square.point_from_proportion(self.proportion_points[-1])[1], 0])
bs = self.big_square
pp = self.proportion_points
self.triangles = VGroup(*[Polygon(bs.point_from_proportion(pp[i % 4]), bs.point_from_proportion((i + 1) * 0.25), bs.point_from_proportion(pp[(i + 1) % 4]), **self.triangle_kwargs) for i in range(4)])
bs = self.big_square
med_square = Square(side_length=TRIANGLE_SIDE_PROPORTION * BIG_SQUARE_SIDE_LENGTH, **self.small_squares_kwargs)
med_square.align_to(bs, UR)
self.med_square = med_square
bs = self.big_square
small_square = Square(side_length=(1 - TRIANGLE_SIDE_PROPORTION) * BIG_SQUARE_SIDE_LENGTH, **self.small_squares_kwargs)
small_square.align_to(bs, DL)
self.small_square = small_square
self.add(self.big_square, self.triangles, self.med_square, self.small_square)
|
AnimationsWithManim
|
positive
|
def get_ooc(self, candidates, ignore_attributes=True):
<DeepExtract>
cands = []
for c in candidates:
entities = {}
doc_name = c.get_parent().document.name
abs_start = c.get_parent().abs_char_offsets[0]
span = ((c.implant.char_start + abs_start, c.implant.char_end + abs_start + 1),)
entities['T1'] = Entity('T1', doc_name, 'Implant', span, c.implant.get_attrib_span('words'))
span = ((c.complication.char_start + abs_start, c.complication.char_end + abs_start + 1),)
entities['T2'] = Entity('T2', doc_name, 'Finding', span, c.complication.get_attrib_span('words'))
relation = Relation('R1', doc_name, rela_type='Complication', args=['T1', 'T2'])
relation.init_args(entities)
relation = relation.clone(ignore_attributes=ignore_attributes)
cands.append(relation)
brat_cands = cands
</DeepExtract>
if ignore_attributes:
gold = {c.clone(ignore_attributes=True): self.labels[c] for c in self.labels}
else:
gold = self.labels
return [c for c in gold if c not in brat_cands]
|
def get_ooc(self, candidates, ignore_attributes=True):
cands = []
for c in candidates:
entities = {}
doc_name = c.get_parent().document.name
abs_start = c.get_parent().abs_char_offsets[0]
span = ((c.implant.char_start + abs_start, c.implant.char_end + abs_start + 1),)
entities['T1'] = Entity('T1', doc_name, 'Implant', span, c.implant.get_attrib_span('words'))
span = ((c.complication.char_start + abs_start, c.complication.char_end + abs_start + 1),)
entities['T2'] = Entity('T2', doc_name, 'Finding', span, c.complication.get_attrib_span('words'))
relation = Relation('R1', doc_name, rela_type='Complication', args=['T1', 'T2'])
relation.init_args(entities)
relation = relation.clone(ignore_attributes=ignore_attributes)
cands.append(relation)
brat_cands = cands
if ignore_attributes:
gold = {c.clone(ignore_attributes=True): self.labels[c] for c in self.labels}
else:
gold = self.labels
return [c for c in gold if c not in brat_cands]
|
ehr-rwe
|
positive
|
def execute(self, context):
seed(self.seed)
bpy.ops.mesh.primitive_grid_add(x_subdivisions=self.xsub, y_subdivisions=self.ysub, enter_editmode=True)
obj = bpy.context.edit_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
bm.faces.active = None
for i in range(self.nv):
<DeepExtract>
verts = [vert for vert in bm.verts if fully_connected(vert) and non_corner(vert)]
</DeepExtract>
if len(verts):
vert = choice(verts)
bmesh.ops.dissolve_faces(bm, faces=vert.link_faces, use_verts=False)
for i in range(self.ne):
<DeepExtract>
s = []
for edge in bm.edges:
if vertical(edge) and inside(edge) and non_gap(edge):
s.append(edge)
edges = s
</DeepExtract>
if len(edges):
edge = choice(edges)
bmesh.ops.dissolve_faces(bm, faces=edge.link_faces, use_verts=False)
for edge in get_movable_edges(bm):
x = (random() * 2 - 1) * self.randomedge * (2.0 / self.xsub) * 0.5
edge.verts[0].co.x += x
edge.verts[1].co.x += x
for vert in bm.verts:
x = (noise(vert.co) * 2 - 1) * self.randomvert * (2.0 / self.xsub) * 0.5
y = (noise(vert.co + Vector((11.1, 13.12, 17.14))) * 2 - 1) * self.randomvert * (2.0 / self.ysub) * 0.5
vert.co.x += x
vert.co.y += y
extruded_faces = bmesh.ops.extrude_discrete_faces(bm, faces=bm.faces)['faces']
bm.verts.index_update()
for face in extruded_faces:
vindices = [v.index for v in face.verts]
for vert in face.verts:
for e in vert.link_edges:
overt = e.other_vert(vert)
if overt.index not in vindices:
overt.tag = True
bmesh.ops.delete(bm, geom=[v for v in bm.verts if v.tag], context=1)
for face in bm.faces:
z = (random() * 2 - 1) * self.zrandom
for vert in face.verts:
vert.co.z += z
bmesh.update_edit_mesh(me, True)
bpy.ops.object.editmode_toggle()
obj.scale.x = self.xsub / 10.0
obj.scale.y = self.ysub / 10.0
me.uv_textures.new()
uv_layer = me.uv_layers.active.data
vertex_colors = me.vertex_colors.new().data
for poly in me.polygons:
offset = Vector((random(), random(), 0)) if self.randomuv else Vector((0, 0, 0))
color = [random(), random(), random()]
for loop_index in range(poly.loop_start, poly.loop_start + poly.loop_total):
coords = me.vertices[me.loops[loop_index].vertex_index].co
uv_layer[loop_index].uv = (coords + offset).xy
vertex_colors[loop_index].color = color
bpy.ops.object.modifier_add(type='SOLIDIFY')
bpy.context.object.modifiers['Solidify'].offset = 1
bpy.context.object.modifiers['Solidify'].thickness = 0.1
bpy.ops.object.modifier_add(type='BEVEL')
bpy.context.object.modifiers['Bevel'].width = 0.01
bpy.context.object.modifiers['Bevel'].segments = 2
return {'FINISHED'}
|
def execute(self, context):
seed(self.seed)
bpy.ops.mesh.primitive_grid_add(x_subdivisions=self.xsub, y_subdivisions=self.ysub, enter_editmode=True)
obj = bpy.context.edit_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
bm.faces.active = None
for i in range(self.nv):
verts = [vert for vert in bm.verts if fully_connected(vert) and non_corner(vert)]
if len(verts):
vert = choice(verts)
bmesh.ops.dissolve_faces(bm, faces=vert.link_faces, use_verts=False)
for i in range(self.ne):
s = []
for edge in bm.edges:
if vertical(edge) and inside(edge) and non_gap(edge):
s.append(edge)
edges = s
if len(edges):
edge = choice(edges)
bmesh.ops.dissolve_faces(bm, faces=edge.link_faces, use_verts=False)
for edge in get_movable_edges(bm):
x = (random() * 2 - 1) * self.randomedge * (2.0 / self.xsub) * 0.5
edge.verts[0].co.x += x
edge.verts[1].co.x += x
for vert in bm.verts:
x = (noise(vert.co) * 2 - 1) * self.randomvert * (2.0 / self.xsub) * 0.5
y = (noise(vert.co + Vector((11.1, 13.12, 17.14))) * 2 - 1) * self.randomvert * (2.0 / self.ysub) * 0.5
vert.co.x += x
vert.co.y += y
extruded_faces = bmesh.ops.extrude_discrete_faces(bm, faces=bm.faces)['faces']
bm.verts.index_update()
for face in extruded_faces:
vindices = [v.index for v in face.verts]
for vert in face.verts:
for e in vert.link_edges:
overt = e.other_vert(vert)
if overt.index not in vindices:
overt.tag = True
bmesh.ops.delete(bm, geom=[v for v in bm.verts if v.tag], context=1)
for face in bm.faces:
z = (random() * 2 - 1) * self.zrandom
for vert in face.verts:
vert.co.z += z
bmesh.update_edit_mesh(me, True)
bpy.ops.object.editmode_toggle()
obj.scale.x = self.xsub / 10.0
obj.scale.y = self.ysub / 10.0
me.uv_textures.new()
uv_layer = me.uv_layers.active.data
vertex_colors = me.vertex_colors.new().data
for poly in me.polygons:
offset = Vector((random(), random(), 0)) if self.randomuv else Vector((0, 0, 0))
color = [random(), random(), random()]
for loop_index in range(poly.loop_start, poly.loop_start + poly.loop_total):
coords = me.vertices[me.loops[loop_index].vertex_index].co
uv_layer[loop_index].uv = (coords + offset).xy
vertex_colors[loop_index].color = color
bpy.ops.object.modifier_add(type='SOLIDIFY')
bpy.context.object.modifiers['Solidify'].offset = 1
bpy.context.object.modifiers['Solidify'].thickness = 0.1
bpy.ops.object.modifier_add(type='BEVEL')
bpy.context.object.modifiers['Bevel'].width = 0.01
bpy.context.object.modifiers['Bevel'].segments = 2
return {'FINISHED'}
|
blenderaddons
|
positive
|
def load_test_image(image_filename, test_dir):
if os.path.islink(str(test_dir / image_filename)):
image_path = os.readlink(test_dir / image_filename)
else:
image_path = str(test_dir / image_filename)
image = skimage.io.imread(image_path)
<DeepExtract>
img_crop = np.zeros([image_size[0], image_size[1], 3], dtype=np.float)
img_roi = image[-image_size_crop[0]:, :, :]
if SCALE == 2:
img_resize = skimage.transform.resize(img_roi, (image_size_crop[0] / 2, image_size_crop[1] / 2), order=1, mode='constant', preserve_range=True)
else:
img_resize = img_roi
start_y = int((img_crop.shape[1] - img_resize.shape[1]) / 2)
img_crop[:, start_y:start_y + img_resize.shape[1], :] = img_resize
if flip:
img_crop = np.fliplr(img_crop)
image = img_crop
</DeepExtract>
return image
|
def load_test_image(image_filename, test_dir):
if os.path.islink(str(test_dir / image_filename)):
image_path = os.readlink(test_dir / image_filename)
else:
image_path = str(test_dir / image_filename)
image = skimage.io.imread(image_path)
img_crop = np.zeros([image_size[0], image_size[1], 3], dtype=np.float)
img_roi = image[-image_size_crop[0]:, :, :]
if SCALE == 2:
img_resize = skimage.transform.resize(img_roi, (image_size_crop[0] / 2, image_size_crop[1] / 2), order=1, mode='constant', preserve_range=True)
else:
img_resize = img_roi
start_y = int((img_crop.shape[1] - img_resize.shape[1]) / 2)
img_crop[:, start_y:start_y + img_resize.shape[1], :] = img_resize
if flip:
img_crop = np.fliplr(img_crop)
image = img_crop
return image
|
cvpr-2018-autonomous-driving-autopilot-solution
|
positive
|
def test_0b11111111_0b11111111() -> None:
screen = bytearray(2)
<DeepExtract>
(left_byte, right_byte) = ((0 * 16 + 0) // 8, (0 * 16 + 15) // 8)
(left_mask, right_mask) = (255 >> 0 % 8, 255 >> 15 % 8 + 1 ^ 255)
if left_byte == right_byte:
screen[left_byte] |= left_mask & right_mask
else:
screen[left_byte] |= left_mask
for i in range(left_byte + 1, right_byte):
screen[i] = 255
screen[right_byte] |= right_mask
</DeepExtract>
assert screen == bytearray([255, 255])
|
def test_0b11111111_0b11111111() -> None:
screen = bytearray(2)
(left_byte, right_byte) = ((0 * 16 + 0) // 8, (0 * 16 + 15) // 8)
(left_mask, right_mask) = (255 >> 0 % 8, 255 >> 15 % 8 + 1 ^ 255)
if left_byte == right_byte:
screen[left_byte] |= left_mask & right_mask
else:
screen[left_byte] |= left_mask
for i in range(left_byte + 1, right_byte):
screen[i] = 255
screen[right_byte] |= right_mask
assert screen == bytearray([255, 255])
|
CtCI-6th-Edition-Python
|
positive
|
def update_resources(self):
inventory = Inventory.objects.get(name=self.name)
<DeepExtract>
output = {}
services = glob.glob('{}/*'.format(self.get_base_dir()))
for service in services:
if os.path.exists(service):
service_name = service.split('/')[-1]
try:
readme_data = self.parse_readme_file(service)
except FileNotFoundError as exception:
logger.error(exception)
readme_data = {}
except ApplicationError as exception:
logger.error(exception)
readme_data = {}
output[service_name] = {'path': service, 'metadata': self.parse_metadata_file(service), 'readme': readme_data, 'schemas': self.parse_schema_files(service), 'support_files': self.parse_support_files(service)}
service_formulas = output
</DeepExtract>
for (formula_name, formula) in service_formulas.items():
(res, created) = Resource.objects.get_or_create(uid=formula_name, kind='service_formula', inventory=inventory)
if created:
res.metadata = formula
res.save()
elif res.metadata != formula:
res.metadata = formula
res.save()
logger.info('Processed {} service formulas'.format(len(service_formulas)))
<DeepExtract>
if len(self.class_cache) > 0:
classes = self.class_cache
data = self.walk_classes(ret_classes=True)
return_data = {}
for (name, datum) in data.items():
name = name[1:]
if prefix is None:
return_data[name] = datum
elif name.startswith(prefix):
return_data[name] = datum
if len(self.class_cache) == 0:
self.class_cache = OrderedDict(sorted(return_data.items(), key=lambda t: t[0]))
classes = self.class_cache
</DeepExtract>
for (class_name, class_meta) in classes.items():
cluster_classes = {}
system_classes = {}
service_classes = {}
if '.' not in class_name:
continue
top_name = class_name.split('.')[1]
if class_name.startswith('service.'):
if top_name not in service_classes:
service_classes[top_name] = {}
service_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='service_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
elif class_name.startswith('system.'):
if top_name not in system_classes:
system_classes[top_name] = {}
system_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='system_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
elif class_name.startswith('cluster.'):
if top_name not in cluster_classes:
cluster_classes[top_name] = {}
cluster_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='cluster_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
for (unit, unit_classes) in cluster_classes.items():
(res, created) = Resource.objects.get_or_create(uid=unit, name=unit, kind='cluster_unit', inventory=inventory)
if created:
res.metadata = unit_classes
elif res.metadata != unit_classes:
res.metadata = unit_classes
res.save()
for (unit, unit_classes) in system_classes.items():
(res, created) = Resource.objects.get_or_create(uid=unit, name=unit, kind='system_unit', inventory=inventory)
if created:
res.metadata = unit_classes
elif res.metadata != unit_classes:
res.metadata = unit_classes
res.save()
logger.info('Processed {} classes'.format(len(classes)))
|
def update_resources(self):
inventory = Inventory.objects.get(name=self.name)
output = {}
services = glob.glob('{}/*'.format(self.get_base_dir()))
for service in services:
if os.path.exists(service):
service_name = service.split('/')[-1]
try:
readme_data = self.parse_readme_file(service)
except FileNotFoundError as exception:
logger.error(exception)
readme_data = {}
except ApplicationError as exception:
logger.error(exception)
readme_data = {}
output[service_name] = {'path': service, 'metadata': self.parse_metadata_file(service), 'readme': readme_data, 'schemas': self.parse_schema_files(service), 'support_files': self.parse_support_files(service)}
service_formulas = output
for (formula_name, formula) in service_formulas.items():
(res, created) = Resource.objects.get_or_create(uid=formula_name, kind='service_formula', inventory=inventory)
if created:
res.metadata = formula
res.save()
elif res.metadata != formula:
res.metadata = formula
res.save()
logger.info('Processed {} service formulas'.format(len(service_formulas)))
if len(self.class_cache) > 0:
classes = self.class_cache
data = self.walk_classes(ret_classes=True)
return_data = {}
for (name, datum) in data.items():
name = name[1:]
if prefix is None:
return_data[name] = datum
elif name.startswith(prefix):
return_data[name] = datum
if len(self.class_cache) == 0:
self.class_cache = OrderedDict(sorted(return_data.items(), key=lambda t: t[0]))
classes = self.class_cache
for (class_name, class_meta) in classes.items():
cluster_classes = {}
system_classes = {}
service_classes = {}
if '.' not in class_name:
continue
top_name = class_name.split('.')[1]
if class_name.startswith('service.'):
if top_name not in service_classes:
service_classes[top_name] = {}
service_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='service_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
elif class_name.startswith('system.'):
if top_name not in system_classes:
system_classes[top_name] = {}
system_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='system_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
elif class_name.startswith('cluster.'):
if top_name not in cluster_classes:
cluster_classes[top_name] = {}
cluster_classes[top_name][class_name] = class_meta
(res, created) = Resource.objects.get_or_create(uid=class_name, name=class_name, kind='cluster_class', inventory=inventory)
if created:
res.metadata = class_meta
elif res.metadata != class_meta:
res.metadata = class_meta
res.save()
for (unit, unit_classes) in cluster_classes.items():
(res, created) = Resource.objects.get_or_create(uid=unit, name=unit, kind='cluster_unit', inventory=inventory)
if created:
res.metadata = unit_classes
elif res.metadata != unit_classes:
res.metadata = unit_classes
res.save()
for (unit, unit_classes) in system_classes.items():
(res, created) = Resource.objects.get_or_create(uid=unit, name=unit, kind='system_unit', inventory=inventory)
if created:
res.metadata = unit_classes
elif res.metadata != unit_classes:
res.metadata = unit_classes
res.save()
logger.info('Processed {} classes'.format(len(classes)))
|
architect-api
|
positive
|
def fit(self, X, Y, sampler='variational', tune=500, draws=500, vi_params={'n': 20000, 'method': 'advi', 'callbacks': [CheckParametersConvergence()]}, **kwargs):
"""
Fit a generalized nested logit model on the provided set of queries X and choices Y of those objects. The
provided queries and corresponding preferences are of a fixed size (numpy arrays). For learning this network
the categorical cross entropy loss function for each object :math:`x_i \\in Q` is defined as:
.. math::
C_{i} = -y(i)\\log(P_i) \\enspace,
where :math:`y` is ground-truth discrete choice vector of the objects in the given query set :math:`Q`.
The value :math:`y(i) = 1` if object :math:`x_i` is chosen else :math:`y(i) = 0`.
Parameters
----------
X : numpy array (n_instances, n_objects, n_features)
Feature vectors of the objects
Y : numpy array (n_instances, n_objects)
Choices for given objects in the query
sampler : {‘variational’, ‘metropolis’, ‘nuts’}, string
The sampler used to estimate the posterior mean and mass matrix from the trace
* **variational** : Run inference methods to estimate posterior mean and diagonal mass matrix
* **metropolis** : Use the MAP as starting point and Metropolis-Hastings sampler
* **nuts** : Use the No-U-Turn sampler
vi_params : dict
The parameters for the **variational** inference method
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
**kwargs :
Keyword arguments for the fit function of :meth:`pymc3.fit`or :meth:`pymc3.sample`
"""
self._pre_fit()
(_n_instances, self.n_objects_fit_, self.n_object_features_fit_) = X.shape
if self.n_nests is None:
self.n_nests = self.n_objects_fit_ + int(self.n_objects_fit_ / 2)
<DeepExtract>
self.trace_ = None
self.trace_vi_ = None
self.random_state_ = check_random_state(self.random_state)
self.loss_function_ = likelihood_dict.get(self.loss_function, None)
self.threshold_ = 4300000.0
if np.prod(X.shape) > self.threshold_:
upper_bound = int(self.threshold_ / np.prod(X.shape[1:]))
indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
X = X[indices, :, :]
Y = Y[indices, :]
logger.info('Train Set instances {} objects {} features {}'.format(*X.shape))
with pm.Model() as self.model:
self.Xt_ = theano.shared(X)
self.Yt_ = theano.shared(Y)
shapes = {'weights': self.n_object_features_fit_, 'weights_ik': (self.n_object_features_fit_, self.n_nests)}
weights_dict = create_weight_dictionary(self.model_configuration, shapes)
alpha_ik = tt.dot(self.Xt_, weights_dict['weights_ik'])
alpha_ik = ttu.softmax(alpha_ik, axis=2)
utility = tt.dot(self.Xt_, weights_dict['weights'])
lambda_k = pm.Uniform('lambda_k', self.alpha, 1.0, shape=self.n_nests)
self.p_ = self.get_probabilities(utility, lambda_k, alpha_ik)
LogLikelihood('yl', loss_func=self.loss_function_, p=self.p_, observed=self.Yt_)
logger.info('Model construction completed')
</DeepExtract>
fit_pymc3_model(self, sampler, draws, tune, vi_params, **kwargs)
return self
|
def fit(self, X, Y, sampler='variational', tune=500, draws=500, vi_params={'n': 20000, 'method': 'advi', 'callbacks': [CheckParametersConvergence()]}, **kwargs):
"""
Fit a generalized nested logit model on the provided set of queries X and choices Y of those objects. The
provided queries and corresponding preferences are of a fixed size (numpy arrays). For learning this network
the categorical cross entropy loss function for each object :math:`x_i \\in Q` is defined as:
.. math::
C_{i} = -y(i)\\log(P_i) \\enspace,
where :math:`y` is ground-truth discrete choice vector of the objects in the given query set :math:`Q`.
The value :math:`y(i) = 1` if object :math:`x_i` is chosen else :math:`y(i) = 0`.
Parameters
----------
X : numpy array (n_instances, n_objects, n_features)
Feature vectors of the objects
Y : numpy array (n_instances, n_objects)
Choices for given objects in the query
sampler : {‘variational’, ‘metropolis’, ‘nuts’}, string
The sampler used to estimate the posterior mean and mass matrix from the trace
* **variational** : Run inference methods to estimate posterior mean and diagonal mass matrix
* **metropolis** : Use the MAP as starting point and Metropolis-Hastings sampler
* **nuts** : Use the No-U-Turn sampler
vi_params : dict
The parameters for the **variational** inference method
draws : int
The number of samples to draw. Defaults to 500. The number of tuned samples are discarded by default
tune : int
Number of iterations to tune, defaults to 500. Ignored when using 'SMC'. Samplers adjust
the step sizes, scalings or similar during tuning. Tuning samples will be drawn in addition
to the number specified in the `draws` argument, and will be discarded unless
`discard_tuned_samples` is set to False.
**kwargs :
Keyword arguments for the fit function of :meth:`pymc3.fit`or :meth:`pymc3.sample`
"""
self._pre_fit()
(_n_instances, self.n_objects_fit_, self.n_object_features_fit_) = X.shape
if self.n_nests is None:
self.n_nests = self.n_objects_fit_ + int(self.n_objects_fit_ / 2)
self.trace_ = None
self.trace_vi_ = None
self.random_state_ = check_random_state(self.random_state)
self.loss_function_ = likelihood_dict.get(self.loss_function, None)
self.threshold_ = 4300000.0
if np.prod(X.shape) > self.threshold_:
upper_bound = int(self.threshold_ / np.prod(X.shape[1:]))
indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
X = X[indices, :, :]
Y = Y[indices, :]
logger.info('Train Set instances {} objects {} features {}'.format(*X.shape))
with pm.Model() as self.model:
self.Xt_ = theano.shared(X)
self.Yt_ = theano.shared(Y)
shapes = {'weights': self.n_object_features_fit_, 'weights_ik': (self.n_object_features_fit_, self.n_nests)}
weights_dict = create_weight_dictionary(self.model_configuration, shapes)
alpha_ik = tt.dot(self.Xt_, weights_dict['weights_ik'])
alpha_ik = ttu.softmax(alpha_ik, axis=2)
utility = tt.dot(self.Xt_, weights_dict['weights'])
lambda_k = pm.Uniform('lambda_k', self.alpha, 1.0, shape=self.n_nests)
self.p_ = self.get_probabilities(utility, lambda_k, alpha_ik)
LogLikelihood('yl', loss_func=self.loss_function_, p=self.p_, observed=self.Yt_)
logger.info('Model construction completed')
fit_pymc3_model(self, sampler, draws, tune, vi_params, **kwargs)
return self
|
cs-ranking
|
positive
|
def line_render(all_points, all_widths, all_alphas, force_cpu=True, canvas_size=32, colors=None):
dev = all_points.device
if force_cpu:
all_points = all_points.to('cpu')
all_widths = all_widths.to('cpu')
all_alphas = all_alphas.to('cpu')
if colors is not None:
colors = colors.to('cpu')
all_points = 0.5 * (all_points + 1.0) * canvas_size
eps = 0.0001
all_points = all_points + eps * th.randn_like(all_points)
(bs, num_segments, _, _) = all_points.shape
n_out = 3 if colors is not None else 1
output = th.zeros(bs, n_out, canvas_size, canvas_size, device=all_points.device)
scenes = []
for k in range(bs):
shapes = []
shape_groups = []
for p in range(num_segments):
points = all_points[k, p].contiguous().cpu()
num_ctrl_pts = th.zeros(1, dtype=th.int32)
width = all_widths[k, p].cpu()
alpha = all_alphas[k, p].cpu()
if colors is not None:
color = colors[k, p]
else:
color = th.ones(3, device=alpha.device)
color = th.cat([color, alpha.view(1)])
path = pydiffvg.Path(num_control_points=num_ctrl_pts, points=points, stroke_width=width, is_closed=False)
shapes.append(path)
path_group = pydiffvg.ShapeGroup(shape_ids=th.tensor([len(shapes) - 1]), fill_color=None, stroke_color=color)
shape_groups.append(path_group)
scenes.append((canvas_size, canvas_size, shapes, shape_groups))
<DeepExtract>
if seed is None:
seed = random.randint(0, 1000000)
_render = pydiffvg.RenderFunction.apply
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_size, canvas_size, shapes, shape_groups)
img = _render(canvas_size, canvas_size, 2, 2, seed, None, *scene_args)
raster = img
</DeepExtract>
raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size)
alpha = raster[3:4]
if colors is not None:
image = raster[:3]
alpha = alpha.repeat(3, 1, 1)
else:
image = raster[:1]
image = image * alpha
output[k] = image
output = output.to(dev)
return (output, scenes)
|
def line_render(all_points, all_widths, all_alphas, force_cpu=True, canvas_size=32, colors=None):
dev = all_points.device
if force_cpu:
all_points = all_points.to('cpu')
all_widths = all_widths.to('cpu')
all_alphas = all_alphas.to('cpu')
if colors is not None:
colors = colors.to('cpu')
all_points = 0.5 * (all_points + 1.0) * canvas_size
eps = 0.0001
all_points = all_points + eps * th.randn_like(all_points)
(bs, num_segments, _, _) = all_points.shape
n_out = 3 if colors is not None else 1
output = th.zeros(bs, n_out, canvas_size, canvas_size, device=all_points.device)
scenes = []
for k in range(bs):
shapes = []
shape_groups = []
for p in range(num_segments):
points = all_points[k, p].contiguous().cpu()
num_ctrl_pts = th.zeros(1, dtype=th.int32)
width = all_widths[k, p].cpu()
alpha = all_alphas[k, p].cpu()
if colors is not None:
color = colors[k, p]
else:
color = th.ones(3, device=alpha.device)
color = th.cat([color, alpha.view(1)])
path = pydiffvg.Path(num_control_points=num_ctrl_pts, points=points, stroke_width=width, is_closed=False)
shapes.append(path)
path_group = pydiffvg.ShapeGroup(shape_ids=th.tensor([len(shapes) - 1]), fill_color=None, stroke_color=color)
shape_groups.append(path_group)
scenes.append((canvas_size, canvas_size, shapes, shape_groups))
if seed is None:
seed = random.randint(0, 1000000)
_render = pydiffvg.RenderFunction.apply
scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_size, canvas_size, shapes, shape_groups)
img = _render(canvas_size, canvas_size, 2, 2, seed, None, *scene_args)
raster = img
raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size)
alpha = raster[3:4]
if colors is not None:
image = raster[:3]
alpha = alpha.repeat(3, 1, 1)
else:
image = raster[:1]
image = image * alpha
output[k] = image
output = output.to(dev)
return (output, scenes)
|
diffvg
|
positive
|
def _media_state_changed(self, event):
"""
Called when a libVLC playback state changes.
Broadcasts playback state & fires :attr:`media_state_changed` event.
"""
assert event
<DeepExtract>
track = self.queue.get_current_track()
if track is None:
data = dict(playing=False, artist=None, title=None, progress=None, length=None)
else:
data = dict(loading=self.is_loading, playing=self.is_playing, artist=track.artist, title=track.title, progress=self.get_play_progress_seconds(), length=self.get_length_seconds(), album_name=track.album_name, album_url=track.album_url)
with open('/tmp/clay.json', 'w') as statefile:
statefile.write(json.dumps(data, indent=4))
</DeepExtract>
self.media_state_changed.fire(self.is_loading, self.is_playing)
|
def _media_state_changed(self, event):
"""
Called when a libVLC playback state changes.
Broadcasts playback state & fires :attr:`media_state_changed` event.
"""
assert event
track = self.queue.get_current_track()
if track is None:
data = dict(playing=False, artist=None, title=None, progress=None, length=None)
else:
data = dict(loading=self.is_loading, playing=self.is_playing, artist=track.artist, title=track.title, progress=self.get_play_progress_seconds(), length=self.get_length_seconds(), album_name=track.album_name, album_url=track.album_url)
with open('/tmp/clay.json', 'w') as statefile:
statefile.write(json.dumps(data, indent=4))
self.media_state_changed.fire(self.is_loading, self.is_playing)
|
clay
|
positive
|
def get(self):
next_url = self.request.get('next')
if not re.match('^/[\\w/]*$', next_url):
next_url = '/'
<DeepExtract>
user = users.get_current_user()
if user:
user = models.User(google_user=user)
session_id = self.request.cookies.get('session', '')
if not session_id:
user = None
login = consumer.Login.get_by_key_name(session_id)
if not login:
user = None
user = models.User(openid_user=login.claimed_id)
</DeepExtract>
if user:
user.LogOut(self, next_url)
else:
self.redirect(next_url)
|
def get(self):
next_url = self.request.get('next')
if not re.match('^/[\\w/]*$', next_url):
next_url = '/'
user = users.get_current_user()
if user:
user = models.User(google_user=user)
session_id = self.request.cookies.get('session', '')
if not session_id:
user = None
login = consumer.Login.get_by_key_name(session_id)
if not login:
user = None
user = models.User(openid_user=login.claimed_id)
if user:
user.LogOut(self, next_url)
else:
self.redirect(next_url)
|
contributing
|
positive
|
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
<DeepExtract>
self.form_obj = self.view_form(**self.get_form_datas())
</DeepExtract>
<DeepExtract>
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
</DeepExtract>
if self.valid_forms():
<DeepExtract>
pass
</DeepExtract>
<DeepExtract>
request = self.request
msg = _('The %s was changed successfully.') % self.title
self.message_user(msg, 'success')
if '_redirect' in request.GET:
response = request.GET['_redirect']
else:
response = self.get_redirect_url()
</DeepExtract>
cls_str = str if six.PY3 else basestring
if isinstance(response, cls_str):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
|
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.form_obj = self.view_form(**self.get_form_datas())
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
if self.valid_forms():
pass
request = self.request
msg = _('The %s was changed successfully.') % self.title
self.message_user(msg, 'success')
if '_redirect' in request.GET:
response = request.GET['_redirect']
else:
response = self.get_redirect_url()
cls_str = str if six.PY3 else basestring
if isinstance(response, cls_str):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
|
Django_Blog
|
positive
|
def make_grad_pdm_x(self, dm=None, flatten=False):
"""return jacobian of projected density matrix w.r.t atomic coordinates"""
if dm is None:
dm = self.base.make_rdm1()
if dm.ndim > 2:
dm = dm.sum(0)
t_dm = torch.from_numpy(dm).double()
<DeepExtract>
natm = self.mol.natm
ralst = [ii for ii in range(self.mol.natm) if not self.mol.elements[ii].startswith('X')]
nratm = len(ralst)
shell_sec = [ov.shape[-1] for ov in self._t_ovlp_shells]
gdmx_shells = [torch.zeros([natm, 3, nratm, ss, ss], dtype=float) for ss in shell_sec]
for (gdmx, govx, ovlp) in zip(gdmx_shells, self._t_ipov_shells, self._t_ovlp_shells):
gproj = torch.einsum('xrap,rs,saq->xapq', govx, t_dm, ovlp)
for (ira, ia) in enumerate(ralst):
(bg, ed) = self.mol.aoslice_by_atom()[ia, 2:]
gdmx[ia] -= torch.einsum('xrap,rs,saq->xapq', govx[:, bg:ed], t_dm[bg:ed], ovlp)
gdmx[ia, :, ira] += gproj[:, ira]
gdmx += gdmx.clone().transpose(-1, -2)
t_gdmx_shells = gdmx_shells
</DeepExtract>
if not flatten:
return [s.detach().cpu().numpy() for s in t_gdmx_shells]
else:
return torch.cat([s.flatten(-2) for s in t_gdmx_shells], dim=-1).detach().cpu().numpy()
|
def make_grad_pdm_x(self, dm=None, flatten=False):
"""return jacobian of projected density matrix w.r.t atomic coordinates"""
if dm is None:
dm = self.base.make_rdm1()
if dm.ndim > 2:
dm = dm.sum(0)
t_dm = torch.from_numpy(dm).double()
natm = self.mol.natm
ralst = [ii for ii in range(self.mol.natm) if not self.mol.elements[ii].startswith('X')]
nratm = len(ralst)
shell_sec = [ov.shape[-1] for ov in self._t_ovlp_shells]
gdmx_shells = [torch.zeros([natm, 3, nratm, ss, ss], dtype=float) for ss in shell_sec]
for (gdmx, govx, ovlp) in zip(gdmx_shells, self._t_ipov_shells, self._t_ovlp_shells):
gproj = torch.einsum('xrap,rs,saq->xapq', govx, t_dm, ovlp)
for (ira, ia) in enumerate(ralst):
(bg, ed) = self.mol.aoslice_by_atom()[ia, 2:]
gdmx[ia] -= torch.einsum('xrap,rs,saq->xapq', govx[:, bg:ed], t_dm[bg:ed], ovlp)
gdmx[ia, :, ira] += gproj[:, ira]
gdmx += gdmx.clone().transpose(-1, -2)
t_gdmx_shells = gdmx_shells
if not flatten:
return [s.detach().cpu().numpy() for s in t_gdmx_shells]
else:
return torch.cat([s.flatten(-2) for s in t_gdmx_shells], dim=-1).detach().cpu().numpy()
|
deepks-kit
|
positive
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
<DeepExtract>
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
</DeepExtract>
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
def get_fastbin_targets(proc):
memory_map = open('/proc/{}/maps'.format(proc.pid), 'rb').readlines()
libc = ELF('./libc.so.6')
syms = libc.symbols
writable = []
got_libc_base = False
for x in memory_map:
if 'libc.so.6' in x:
l = x.split(' ')
mem_start = int(l[0].split('-')[0], 16)
mem_end = int(l[0].split('-')[1], 16)
if not got_libc_base:
LIBC = mem_start
got_libc_base = True
prot = l[1]
if 'rw' in prot:
writable.append((mem_start, mem_end))
addrs = []
for (s, e) in writable:
size = e - s
data = proc.leak(s, size)
for i in range(size - 8):
if data[i + 1:i + 8] == '\x00' * 7 and data[i] != '\x00':
addr = i + s
fastbin_size = ord(data[i])
names = []
trimmed_size = fastbin_size & ~7
for x in syms:
if addr <= LIBC + syms[x] <= trimmed_size + addr:
names.append(x)
overwritable_syms = names
addrs.append((addr - LIBC, fastbin_size, overwritable_syms))
return addrs
|
CTF-writeups
|
positive
|
def _add_multilevel_rois(blobs, stage):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
stage_name = '_{}'.format(stage)
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_name):
"""Distribute rois over the different FPN levels."""
target_lvls = fpn.map_rois_to_fpn_levels(blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min, lvl_max)
<DeepExtract>
target_lvls = fpn.map_rois_to_fpn_levels(blobs['rois' + stage_name][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'rois' + stage_name, blobs['rois' + stage_name], target_lvls, lvl_min, lvl_max)
</DeepExtract>
if cfg.MODEL.MASK_ON and cfg.MRCNN.AT_STAGE == stage:
<DeepExtract>
target_lvls = fpn.map_rois_to_fpn_levels(blobs['mask_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'mask_rois', blobs['mask_rois'], target_lvls, lvl_min, lvl_max)
</DeepExtract>
if cfg.MODEL.KEYPOINTS_ON and cfg.KRCNN.AT_STAGE == stage:
<DeepExtract>
target_lvls = fpn.map_rois_to_fpn_levels(blobs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'keypoint_rois', blobs['keypoint_rois'], target_lvls, lvl_min, lvl_max)
</DeepExtract>
|
def _add_multilevel_rois(blobs, stage):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
stage_name = '_{}'.format(stage)
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_name):
"""Distribute rois over the different FPN levels."""
target_lvls = fpn.map_rois_to_fpn_levels(blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min, lvl_max)
target_lvls = fpn.map_rois_to_fpn_levels(blobs['rois' + stage_name][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'rois' + stage_name, blobs['rois' + stage_name], target_lvls, lvl_min, lvl_max)
if cfg.MODEL.MASK_ON and cfg.MRCNN.AT_STAGE == stage:
target_lvls = fpn.map_rois_to_fpn_levels(blobs['mask_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'mask_rois', blobs['mask_rois'], target_lvls, lvl_min, lvl_max)
if cfg.MODEL.KEYPOINTS_ON and cfg.KRCNN.AT_STAGE == stage:
target_lvls = fpn.map_rois_to_fpn_levels(blobs['keypoint_rois'][:, 1:5], lvl_min, lvl_max)
fpn.add_multilevel_roi_blobs(blobs, 'keypoint_rois', blobs['keypoint_rois'], target_lvls, lvl_min, lvl_max)
</DeepExtract>
|
AC-FPN
|
positive
|
def _episode(scraper_module, scraper, url, test=None):
<DeepExtract>
simple_info = {}
simple_info['show_title'] = 'Game of Thrones'
simple_info['episode_title'] = 'The Dragon and the Wolf'
simple_info['year'] = '2011'
simple_info['season_number'] = '7'
simple_info['episode_number'] = '7'
simple_info['show_aliases'] = ''
simple_info['country'] = 'US'
simple_info['no_seasons'] = '7'
all_info = {}
all_info['showInfo'] = {}
all_info['showInfo']['ids'] = {}
all_info['showInfo']['ids']['imdb'] = 'tt0944947'
(simple_info, all_info) = (simple_info, all_info)
</DeepExtract>
<DeepExtract>
if _scraper_sources_dict.get(scraper, None) is None:
_scraper_sources_dict[scraper] = scraper_module.sources(url=url)
scraper_sources = _scraper_sources_dict[scraper]
</DeepExtract>
def scrape():
return scraper_sources.episode(simple_info, all_info)
<DeepExtract>
start = time.time()
results = scrape()
end = time.time()
time_ms = int(round((end - start) * 1000))
(results, time_ms) = (results, time_ms)
</DeepExtract>
if test:
<DeepExtract>
results_count = len(results)
if os.getenv('A4KSCRAPERS_TEST_TOTAL') == '1':
for torrent in results:
total_results[torrent['release_title']] = 1
return
if scraper not in trackers or (scraper in ['btdb', 'torrentz2', 'torrentgalaxy'] and results_count == 0):
tools.log('%s is disabled' % scraper, 'notice')
return
expected_count = 1
if os.getenv('A4KSCRAPERS_TEST_ALL') == '1' and scraper not in ['showrss']:
expected_count = len(get_urls(scraper))
if scraper_sources._request is not None and scraper_sources._request.exc_msg:
tools.log('%s exception: %s' % (scraper, scraper_sources._request.exc_msg), 'notice')
expected_count = 0
test.assertEqual(results_count, expected_count, '%s failed to find torrent' % scraper)
if scraper == 'showrss' or scraper == 'extratorrent' or scraper == 'eztv' or (scraper == 'torrentz2'):
return
for torrent in results:
test.assertIsNotNone(torrent['size'], '%s missing size info' % scraper)
test.assertIsNotNone(torrent['seeds'], '%s missing seeds info' % scraper)
</DeepExtract>
return (results, time_ms)
|
def _episode(scraper_module, scraper, url, test=None):
simple_info = {}
simple_info['show_title'] = 'Game of Thrones'
simple_info['episode_title'] = 'The Dragon and the Wolf'
simple_info['year'] = '2011'
simple_info['season_number'] = '7'
simple_info['episode_number'] = '7'
simple_info['show_aliases'] = ''
simple_info['country'] = 'US'
simple_info['no_seasons'] = '7'
all_info = {}
all_info['showInfo'] = {}
all_info['showInfo']['ids'] = {}
all_info['showInfo']['ids']['imdb'] = 'tt0944947'
(simple_info, all_info) = (simple_info, all_info)
if _scraper_sources_dict.get(scraper, None) is None:
_scraper_sources_dict[scraper] = scraper_module.sources(url=url)
scraper_sources = _scraper_sources_dict[scraper]
def scrape():
return scraper_sources.episode(simple_info, all_info)
start = time.time()
results = scrape()
end = time.time()
time_ms = int(round((end - start) * 1000))
(results, time_ms) = (results, time_ms)
if test:
results_count = len(results)
if os.getenv('A4KSCRAPERS_TEST_TOTAL') == '1':
for torrent in results:
total_results[torrent['release_title']] = 1
return
if scraper not in trackers or (scraper in ['btdb', 'torrentz2', 'torrentgalaxy'] and results_count == 0):
tools.log('%s is disabled' % scraper, 'notice')
return
expected_count = 1
if os.getenv('A4KSCRAPERS_TEST_ALL') == '1' and scraper not in ['showrss']:
expected_count = len(get_urls(scraper))
if scraper_sources._request is not None and scraper_sources._request.exc_msg:
tools.log('%s exception: %s' % (scraper, scraper_sources._request.exc_msg), 'notice')
expected_count = 0
test.assertEqual(results_count, expected_count, '%s failed to find torrent' % scraper)
if scraper == 'showrss' or scraper == 'extratorrent' or scraper == 'eztv' or (scraper == 'torrentz2'):
return
for torrent in results:
test.assertIsNotNone(torrent['size'], '%s missing size info' % scraper)
test.assertIsNotNone(torrent['seeds'], '%s missing seeds info' % scraper)
return (results, time_ms)
|
a4kScrapers
|
positive
|
def get_features(input, step=0, alpha=-1):
(batch_size, n_frames) = input.shape[:2]
<DeepExtract>
if isinstance(input, list):
input = [frames2batch(t) for t in input]
if isinstance(input, tuple):
input = tuple([frames2batch(t) for t in input])
else:
t = input
input = t.reshape(t.shape[0] * t.shape[1], *t.shape[2:])
</DeepExtract>
for i in range(step, -1, -1):
index = self.n_layer - i - 1
if i == step:
out = self.from_rgb[index](input)
if i == 0:
<DeepExtract>
if isinstance(out, list):
frames_out = [batch2frames(t, batch_size, n_frames) for t in out]
elif isinstance(out, tuple):
frames_out = tuple([batch2frames(t, batch_size, n_frames) for t in out])
else:
t = out
frames_out = t.view(batch_size, n_frames, *t.shape[1:])
</DeepExtract>
out_std = torch.sqrt(frames_out.var(0, unbiased=False) + 1e-08)
mean_std = out_std.mean().expand_as(out[:, [0]])
out = torch.cat([out, mean_std], 1)
out = self.progression[index](out)
if i > 0:
if i == step and 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, 2)
skip_rgb = self.from_rgb[index + 1](skip_rgb)
out = (1 - alpha) * skip_rgb + alpha * out
out = out.squeeze(-1).squeeze(-1)
return out
|
def get_features(input, step=0, alpha=-1):
(batch_size, n_frames) = input.shape[:2]
if isinstance(input, list):
input = [frames2batch(t) for t in input]
if isinstance(input, tuple):
input = tuple([frames2batch(t) for t in input])
else:
t = input
input = t.reshape(t.shape[0] * t.shape[1], *t.shape[2:])
for i in range(step, -1, -1):
index = self.n_layer - i - 1
if i == step:
out = self.from_rgb[index](input)
if i == 0:
if isinstance(out, list):
frames_out = [batch2frames(t, batch_size, n_frames) for t in out]
elif isinstance(out, tuple):
frames_out = tuple([batch2frames(t, batch_size, n_frames) for t in out])
else:
t = out
frames_out = t.view(batch_size, n_frames, *t.shape[1:])
out_std = torch.sqrt(frames_out.var(0, unbiased=False) + 1e-08)
mean_std = out_std.mean().expand_as(out[:, [0]])
out = torch.cat([out, mean_std], 1)
out = self.progression[index](out)
if i > 0:
if i == step and 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, 2)
skip_rgb = self.from_rgb[index + 1](skip_rgb)
out = (1 - alpha) * skip_rgb + alpha * out
out = out.squeeze(-1).squeeze(-1)
return out
|
deep-landscape
|
positive
|
def CalculateByteStatistics(dPrevalence=None, data=None):
averageConsecutiveByteDifference = None
if dPrevalence == None:
dPrevalence = {iter: 0 for iter in range(256)}
sumDifferences = 0.0
previous = None
if len(data) > 1:
for byte in data:
<DeepExtract>
if sys.version_info[0] > 2:
byte = byte
else:
byte = ord(byte)
</DeepExtract>
dPrevalence[byte] += 1
if previous != None:
sumDifferences += abs(byte - previous)
previous = byte
averageConsecutiveByteDifference = sumDifferences / float(len(data) - 1)
sumValues = sum(dPrevalence.values())
countNullByte = dPrevalence[0]
countControlBytes = 0
countWhitespaceBytes = 0
countUniqueBytes = 0
for iter in range(1, 33):
if chr(iter) in string.whitespace:
countWhitespaceBytes += dPrevalence[iter]
else:
countControlBytes += dPrevalence[iter]
countControlBytes += dPrevalence[127]
countPrintableBytes = 0
for iter in range(33, 127):
countPrintableBytes += dPrevalence[iter]
countHighBytes = 0
for iter in range(128, 256):
countHighBytes += dPrevalence[iter]
countHexadecimalBytes = 0
countBASE64Bytes = 0
for iter in range(48, 58):
countHexadecimalBytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[iter]
for iter in range(65, 71):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(97, 103):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(65, 91):
countBASE64Bytes += dPrevalence[iter]
for iter in range(97, 123):
countBASE64Bytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[ord('+')] + dPrevalence[ord('/')] + dPrevalence[ord('=')]
entropy = 0.0
for iter in range(256):
if dPrevalence[iter] > 0:
prevalence = float(dPrevalence[iter]) / float(sumValues)
entropy += -prevalence * math.log(prevalence, 2)
countUniqueBytes += 1
return (sumValues, entropy, countUniqueBytes, countNullByte, countControlBytes, countWhitespaceBytes, countPrintableBytes, countHighBytes, countHexadecimalBytes, countBASE64Bytes, averageConsecutiveByteDifference)
|
def CalculateByteStatistics(dPrevalence=None, data=None):
averageConsecutiveByteDifference = None
if dPrevalence == None:
dPrevalence = {iter: 0 for iter in range(256)}
sumDifferences = 0.0
previous = None
if len(data) > 1:
for byte in data:
if sys.version_info[0] > 2:
byte = byte
else:
byte = ord(byte)
dPrevalence[byte] += 1
if previous != None:
sumDifferences += abs(byte - previous)
previous = byte
averageConsecutiveByteDifference = sumDifferences / float(len(data) - 1)
sumValues = sum(dPrevalence.values())
countNullByte = dPrevalence[0]
countControlBytes = 0
countWhitespaceBytes = 0
countUniqueBytes = 0
for iter in range(1, 33):
if chr(iter) in string.whitespace:
countWhitespaceBytes += dPrevalence[iter]
else:
countControlBytes += dPrevalence[iter]
countControlBytes += dPrevalence[127]
countPrintableBytes = 0
for iter in range(33, 127):
countPrintableBytes += dPrevalence[iter]
countHighBytes = 0
for iter in range(128, 256):
countHighBytes += dPrevalence[iter]
countHexadecimalBytes = 0
countBASE64Bytes = 0
for iter in range(48, 58):
countHexadecimalBytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[iter]
for iter in range(65, 71):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(97, 103):
countHexadecimalBytes += dPrevalence[iter]
for iter in range(65, 91):
countBASE64Bytes += dPrevalence[iter]
for iter in range(97, 123):
countBASE64Bytes += dPrevalence[iter]
countBASE64Bytes += dPrevalence[ord('+')] + dPrevalence[ord('/')] + dPrevalence[ord('=')]
entropy = 0.0
for iter in range(256):
if dPrevalence[iter] > 0:
prevalence = float(dPrevalence[iter]) / float(sumValues)
entropy += -prevalence * math.log(prevalence, 2)
countUniqueBytes += 1
return (sumValues, entropy, countUniqueBytes, countNullByte, countControlBytes, countWhitespaceBytes, countPrintableBytes, countHighBytes, countHexadecimalBytes, countBASE64Bytes, averageConsecutiveByteDifference)
|
Beta
|
positive
|
def __init__(self, vocab_file, do_lower_case=True, unk_token='[UNK]', split_token='\x01'):
<DeepExtract>
vocab = collections.OrderedDict()
fin = open(vocab_file)
for (num, line) in enumerate(fin):
items = convert_to_unicode(line.strip()).split('\t')
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
self.vocab = vocab
</DeepExtract>
self.inv_vocab = {v: k for (k, v) in self.vocab.items()}
self.tokenizer = sp.SentencePieceProcessor()
self.tokenizer.Load(vocab_file + '.model')
self.do_lower_case = do_lower_case
self.unk_token = unk_token
self.split_token = split_token
|
def __init__(self, vocab_file, do_lower_case=True, unk_token='[UNK]', split_token='\x01'):
vocab = collections.OrderedDict()
fin = open(vocab_file)
for (num, line) in enumerate(fin):
items = convert_to_unicode(line.strip()).split('\t')
if len(items) > 2:
break
token = items[0]
index = items[1] if len(items) == 2 else num
token = token.strip()
vocab[token] = int(index)
self.vocab = vocab
self.inv_vocab = {v: k for (k, v) in self.vocab.items()}
self.tokenizer = sp.SentencePieceProcessor()
self.tokenizer.Load(vocab_file + '.model')
self.do_lower_case = do_lower_case
self.unk_token = unk_token
self.split_token = split_token
|
ERNIE
|
positive
|
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_' + image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._lidar_2d_path = os.path.join(self._data_root_path, 'lidar_2d')
self._gta_2d_path = os.path.join(self._data_root_path, 'gta')
<DeepExtract>
image_set_file = os.path.join(self._data_root_path, 'ImageSet', self._image_set + '.txt')
assert os.path.exists(image_set_file), 'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
self._image_idx = image_idx
</DeepExtract>
self._perm_idx = None
self._cur_idx = 0
self._shuffle_image_idx()
|
def __init__(self, image_set, data_path, mc):
imdb.__init__(self, 'kitti_' + image_set, mc)
self._image_set = image_set
self._data_root_path = data_path
self._lidar_2d_path = os.path.join(self._data_root_path, 'lidar_2d')
self._gta_2d_path = os.path.join(self._data_root_path, 'gta')
image_set_file = os.path.join(self._data_root_path, 'ImageSet', self._image_set + '.txt')
assert os.path.exists(image_set_file), 'File does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_idx = [x.strip() for x in f.readlines()]
self._image_idx = image_idx
self._perm_idx = None
self._cur_idx = 0
self._shuffle_image_idx()
|
DINK
|
positive
|
def _nn_features_per_object_for_chunk(reference_embeddings, query_embeddings, wrong_label_mask, k_nearest_neighbors):
"""Extracts features for each object using nearest neighbor attention.
Args:
reference_embeddings: Tensor of shape [n_chunk, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings: Tensor of shape [m_chunk, embedding_dim], the embedding
vectors for the query frames.
wrong_label_mask:
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[m_chunk, n_objects, feature_dim].
"""
reference_embeddings_key = reference_embeddings
query_embeddings_key = query_embeddings
<DeepExtract>
embedding_dim = query_embeddings_key.size()[-1]
reference_embeddings_key = reference_embeddings_key.view(-1, embedding_dim)
first_dim = -1
query_embeddings_key = query_embeddings_key.view(first_dim, embedding_dim)
dists = _pairwise_distances(query_embeddings_key, reference_embeddings_key)
dists = dists
</DeepExtract>
dists = torch.unsqueeze(dists, 1) + torch.unsqueeze(wrong_label_mask.float(), 0) * WRONG_LABEL_PADDING_DISTANCE
if k_nearest_neighbors == 1:
(features, _) = torch.min(dists, 2, keepdim=True)
else:
(dists, _) = torch.topk(-dists, k=k_nearest_neighbors, dim=2)
dists = -dists
valid_mask = dists < WRONG_LABEL_PADDING_DISTANCE
masked_dists = dists * valid_mask.float()
pad_dist = torch.max(masked_dists, dim=2, keepdim=True)[0].repeat((1, 1, masked_dists.size()[-1]))
dists = torch.where(valid_mask, dists, pad_dist)
features = torch.mean(dists, dim=2, keepdim=True)
return features
|
def _nn_features_per_object_for_chunk(reference_embeddings, query_embeddings, wrong_label_mask, k_nearest_neighbors):
"""Extracts features for each object using nearest neighbor attention.
Args:
reference_embeddings: Tensor of shape [n_chunk, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings: Tensor of shape [m_chunk, embedding_dim], the embedding
vectors for the query frames.
wrong_label_mask:
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[m_chunk, n_objects, feature_dim].
"""
reference_embeddings_key = reference_embeddings
query_embeddings_key = query_embeddings
embedding_dim = query_embeddings_key.size()[-1]
reference_embeddings_key = reference_embeddings_key.view(-1, embedding_dim)
first_dim = -1
query_embeddings_key = query_embeddings_key.view(first_dim, embedding_dim)
dists = _pairwise_distances(query_embeddings_key, reference_embeddings_key)
dists = dists
dists = torch.unsqueeze(dists, 1) + torch.unsqueeze(wrong_label_mask.float(), 0) * WRONG_LABEL_PADDING_DISTANCE
if k_nearest_neighbors == 1:
(features, _) = torch.min(dists, 2, keepdim=True)
else:
(dists, _) = torch.topk(-dists, k=k_nearest_neighbors, dim=2)
dists = -dists
valid_mask = dists < WRONG_LABEL_PADDING_DISTANCE
masked_dists = dists * valid_mask.float()
pad_dist = torch.max(masked_dists, dim=2, keepdim=True)[0].repeat((1, 1, masked_dists.size()[-1]))
dists = torch.where(valid_mask, dists, pad_dist)
features = torch.mean(dists, dim=2, keepdim=True)
return features
|
CVPR2020_MANet
|
positive
|
def density(self, gene_list, min_distance=None, by_gene=False):
"""
Calculates the density of the non-thresholded network edges
amongst genes within gene_list. Includes parameters to perform
measurements for genes within a certain distance of each other.
This corrects for cis regulatory elements increasing noise
in coexpression network.
Parameters
----------
gene_list : iter of Loci
List of genes from which to calculate density.
min_distance : int (default: None)
Ignore edges between genes less than min_distance
in density calculation.
by_gene : bool (default: False)
Return a per-gene breakdown of density within the subnetwork.
Returns
-------
A network density OR density on a gene-wise basis
"""
<DeepExtract>
num_genes = self.num_genes()
if gene_list is None:
df = self._coex_DataFrame(sig_only=False)
else:
gene_list = set(sorted(gene_list))
ids = np.array([self._expr_index[x.id] for x in gene_list])
if filter_missing_gene_ids:
ids = np.array([x for x in ids if x is not None])
if len(ids) == 0:
df = pd.DataFrame(columns=['score', 'significant', 'distance'])
else:
ids = PCCUP.coex_index(ids, num_genes)
df = self._coex_DataFrame(ids=ids, sig_only=False)
del ids
if min_distance is not None:
df = df[df.distance >= min_distance]
if names_as_index or names_as_cols or trans_locus_only:
names = self._expr.index.values
ids = df.index.values
if len(ids) > 0:
ids = PCCUP.coex_expr_index(ids, num_genes)
df.insert(0, 'gene_a', names[ids[:, 0]])
df.insert(1, 'gene_b', names[ids[:, 1]])
del ids
del names
else:
df.insert(0, 'gene_a', [])
df.insert(0, 'gene_b', [])
if names_as_index and (not names_as_cols):
df = df.set_index(['gene_a', 'gene_b'])
if trans_locus_only:
try:
parents = {x.id: x.attr['parent_locus'] for x in gene_list}
except KeyError as e:
raise KeyError("Each locus must have 'parent_locus' attr set to calculate trans only")
df['trans'] = [parents[gene_a] != parents[gene_b] for (gene_a, gene_b) in zip(df.index.get_level_values(0), df.index.get_level_values(1))]
edges = df
</DeepExtract>
if by_gene == True:
x = pd.DataFrame.from_records(chain(*[((gene_a, score), (gene_b, score)) for (gene_a, gene_b, score, sig, dis) in edges.reset_index().values]), columns=['gene', 'score'])
return x.groupby('gene').agg(np.mean)
else:
if len(edges) == 0:
return np.nan
if len(edges) == 1:
return edges.score[0]
return np.nanmean(edges.score) / (1 / np.sqrt(len(edges)))
|
def density(self, gene_list, min_distance=None, by_gene=False):
"""
Calculates the density of the non-thresholded network edges
amongst genes within gene_list. Includes parameters to perform
measurements for genes within a certain distance of each other.
This corrects for cis regulatory elements increasing noise
in coexpression network.
Parameters
----------
gene_list : iter of Loci
List of genes from which to calculate density.
min_distance : int (default: None)
Ignore edges between genes less than min_distance
in density calculation.
by_gene : bool (default: False)
Return a per-gene breakdown of density within the subnetwork.
Returns
-------
A network density OR density on a gene-wise basis
"""
num_genes = self.num_genes()
if gene_list is None:
df = self._coex_DataFrame(sig_only=False)
else:
gene_list = set(sorted(gene_list))
ids = np.array([self._expr_index[x.id] for x in gene_list])
if filter_missing_gene_ids:
ids = np.array([x for x in ids if x is not None])
if len(ids) == 0:
df = pd.DataFrame(columns=['score', 'significant', 'distance'])
else:
ids = PCCUP.coex_index(ids, num_genes)
df = self._coex_DataFrame(ids=ids, sig_only=False)
del ids
if min_distance is not None:
df = df[df.distance >= min_distance]
if names_as_index or names_as_cols or trans_locus_only:
names = self._expr.index.values
ids = df.index.values
if len(ids) > 0:
ids = PCCUP.coex_expr_index(ids, num_genes)
df.insert(0, 'gene_a', names[ids[:, 0]])
df.insert(1, 'gene_b', names[ids[:, 1]])
del ids
del names
else:
df.insert(0, 'gene_a', [])
df.insert(0, 'gene_b', [])
if names_as_index and (not names_as_cols):
df = df.set_index(['gene_a', 'gene_b'])
if trans_locus_only:
try:
parents = {x.id: x.attr['parent_locus'] for x in gene_list}
except KeyError as e:
raise KeyError("Each locus must have 'parent_locus' attr set to calculate trans only")
df['trans'] = [parents[gene_a] != parents[gene_b] for (gene_a, gene_b) in zip(df.index.get_level_values(0), df.index.get_level_values(1))]
edges = df
if by_gene == True:
x = pd.DataFrame.from_records(chain(*[((gene_a, score), (gene_b, score)) for (gene_a, gene_b, score, sig, dis) in edges.reset_index().values]), columns=['gene', 'score'])
return x.groupby('gene').agg(np.mean)
else:
if len(edges) == 0:
return np.nan
if len(edges) == 1:
return edges.score[0]
return np.nanmean(edges.score) / (1 / np.sqrt(len(edges)))
|
Camoco
|
positive
|
def __init__(self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, normalization_constant=0.5, left_pad=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.dropout = dropout
self.normalization_constant = normalization_constant
self.left_pad = left_pad
<DeepExtract>
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')
convolutions = tuple(extended)
</DeepExtract>
in_channels = convolutions[0][0]
if isinstance(attention, bool):
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
<DeepExtract>
m = nn.Embedding(num_embeddings, embed_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
self.embed_tokens = m
</DeepExtract>
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx, left_pad=self.left_pad) if positional_embeddings else None
<DeepExtract>
m = nn.Linear(embed_dim, in_channels)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / embed_dim))
nn.init.constant_(m.bias, 0)
self.fc1 = nn.utils.weight_norm(m)
</DeepExtract>
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for (i, (out_channels, kernel_size, residual)) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None)
self.convolutions.append(LinearizedConv1d(in_channels, out_channels * 2, kernel_size, padding=kernel_size - 1, dropout=dropout))
self.attention.append(AttentionLayer(out_channels, embed_dim, self.normalization_constant) if attention[i] else None)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=dropout)
else:
<DeepExtract>
m = nn.Linear(in_channels, out_embed_dim)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_channels))
nn.init.constant_(m.bias, 0)
self.fc2 = nn.utils.weight_norm(m)
</DeepExtract>
if share_embed:
assert out_embed_dim == embed_dim, 'Shared embed weights implies same dimensions out_embed_dim={} vs embed_dim={}'.format(out_embed_dim, embed_dim)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
<DeepExtract>
m = nn.Linear(out_embed_dim, num_embeddings)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / out_embed_dim))
nn.init.constant_(m.bias, 0)
self.fc3 = nn.utils.weight_norm(m)
</DeepExtract>
|
def __init__(self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256, max_positions=1024, convolutions=((512, 3),) * 20, attention=True, dropout=0.1, share_embed=False, positional_embeddings=True, adaptive_softmax_cutoff=None, normalization_constant=0.5, left_pad=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.dropout = dropout
self.normalization_constant = normalization_constant
self.left_pad = left_pad
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')
convolutions = tuple(extended)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
m = nn.Embedding(num_embeddings, embed_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
self.embed_tokens = m
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx, left_pad=self.left_pad) if positional_embeddings else None
m = nn.Linear(embed_dim, in_channels)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / embed_dim))
nn.init.constant_(m.bias, 0)
self.fc1 = nn.utils.weight_norm(m)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for (i, (out_channels, kernel_size, residual)) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels) if residual_dim != out_channels else None)
self.convolutions.append(LinearizedConv1d(in_channels, out_channels * 2, kernel_size, padding=kernel_size - 1, dropout=dropout))
self.attention.append(AttentionLayer(out_channels, embed_dim, self.normalization_constant) if attention[i] else None)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff, dropout=dropout)
else:
m = nn.Linear(in_channels, out_embed_dim)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_channels))
nn.init.constant_(m.bias, 0)
self.fc2 = nn.utils.weight_norm(m)
if share_embed:
assert out_embed_dim == embed_dim, 'Shared embed weights implies same dimensions out_embed_dim={} vs embed_dim={}'.format(out_embed_dim, embed_dim)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
m = nn.Linear(out_embed_dim, num_embeddings)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / out_embed_dim))
nn.init.constant_(m.bias, 0)
self.fc3 = nn.utils.weight_norm(m)
</DeepExtract>
|
crosentgec
|
positive
|
def test_returns_file_schemas(self):
<DeepExtract>
values = jp.jsonpath(self.reader.schemas, '$.NewSong.type')
actual_value = values[0]
</DeepExtract>
assert_that(actual_value).is_equal_to('object')
|
def test_returns_file_schemas(self):
values = jp.jsonpath(self.reader.schemas, '$.NewSong.type')
actual_value = values[0]
assert_that(actual_value).is_equal_to('object')
|
behave-restful
|
positive
|
def end(self, tag):
<DeepExtract>
if self._data:
if self._last is not None:
text = string.join(self._data, '')
if self._tail:
assert self._last.tail is None, 'internal error (tail)'
self._last.tail = text
else:
assert self._last.text is None, 'internal error (text)'
self._last.text = text
self._data = []
</DeepExtract>
self._last = self._elem.pop()
assert self._last.tag == tag, 'end tag mismatch (expected %s, got %s)' % (self._last.tag, tag)
self._tail = 1
return self._last
|
def end(self, tag):
if self._data:
if self._last is not None:
text = string.join(self._data, '')
if self._tail:
assert self._last.tail is None, 'internal error (tail)'
self._last.tail = text
else:
assert self._last.text is None, 'internal error (text)'
self._last.text = text
self._data = []
self._last = self._elem.pop()
assert self._last.tag == tag, 'end tag mismatch (expected %s, got %s)' % (self._last.tag, tag)
self._tail = 1
return self._last
|
contributing
|
positive
|
def detect_batch(detect_images, disable_message=False):
""" Pass the batch through detector for consistently sized images
or each image seperately for inconsitently sized images """
<DeepExtract>
dims = set((frame.shape[:2] for frame in detect_images))
can_batch = len(dims) == 1
</DeepExtract>
if can_batch:
batch_detected = self.detector(detect_images, 0)
else:
if self.verbose and (not disable_message):
print('Batch has inconsistently sized images. Processing one image at a time')
batch_detected = dlib.mmod_rectangless([self.detector(detect_image, 0) for detect_image in detect_images])
return batch_detected
|
def detect_batch(detect_images, disable_message=False):
""" Pass the batch through detector for consistently sized images
or each image seperately for inconsitently sized images """
dims = set((frame.shape[:2] for frame in detect_images))
can_batch = len(dims) == 1
if can_batch:
batch_detected = self.detector(detect_images, 0)
else:
if self.verbose and (not disable_message):
print('Batch has inconsistently sized images. Processing one image at a time')
batch_detected = dlib.mmod_rectangless([self.detector(detect_image, 0) for detect_image in detect_images])
return batch_detected
|
DeepFakeTutorial
|
positive
|
def get_file_server_glusterfs_volume_name(sc):
"""Get the glusterfs volume name
:param StorageClusterSettings sc: storage cluster settings
:rtype: str
:return: glusterfs volume name
"""
try:
volname = sc.file_server.server_options['glusterfs']['volume_name']
except KeyError:
<DeepExtract>
volname = _GLUSTER_DEFAULT_VOLNAME
</DeepExtract>
return volname
|
def get_file_server_glusterfs_volume_name(sc):
"""Get the glusterfs volume name
:param StorageClusterSettings sc: storage cluster settings
:rtype: str
:return: glusterfs volume name
"""
try:
volname = sc.file_server.server_options['glusterfs']['volume_name']
except KeyError:
volname = _GLUSTER_DEFAULT_VOLNAME
return volname
|
cortana-intelligence-inventory-optimization
|
positive
|
def _try_finding_latest_app(req_str):
req_str = req_str.strip('~')
req = rez.PackageRequest(req_str)
try:
app_vers = list(self.find(req.name, range_=req.range))
latest = app_vers[-1]
except IndexError:
<DeepExtract>
self.logged.emit(str("No package matched for request '%s', may havebeen excluded by package filter."), logging.ERROR)
</DeepExtract>
latest = model.BrokenPackage(req_str)
app_vers = [latest]
except _missing as e_:
<DeepExtract>
self.logged.emit(str(str(e_)), logging.ERROR)
</DeepExtract>
latest = model.BrokenPackage(req_str)
app_vers = [latest]
app_ranges[req.name] = app_vers
return latest
|
def _try_finding_latest_app(req_str):
req_str = req_str.strip('~')
req = rez.PackageRequest(req_str)
try:
app_vers = list(self.find(req.name, range_=req.range))
latest = app_vers[-1]
except IndexError:
self.logged.emit(str("No package matched for request '%s', may havebeen excluded by package filter."), logging.ERROR)
latest = model.BrokenPackage(req_str)
app_vers = [latest]
except _missing as e_:
self.logged.emit(str(str(e_)), logging.ERROR)
latest = model.BrokenPackage(req_str)
app_vers = [latest]
app_ranges[req.name] = app_vers
return latest
|
allzpark
|
positive
|
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if start + step < 0 or step > stop:
item = iter([IPAddress(self.first, self._module.version)])
else:
start_ip = IPAddress(self.first + start, self._module.version)
end_ip = IPAddress(self.first + stop - step, self._module.version)
<DeepExtract>
start_ip = IPAddress(start_ip)
end_ip = IPAddress(end_ip)
if start_ip.version != end_ip.version:
raise TypeError('start and stop IP versions do not match!')
version = start_ip.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
start_ip = int(start_ip)
stop = int(end_ip)
negative_step = False
if step < 0:
negative_step = True
index = start_ip - step
while True:
index += step
if negative_step:
if not index >= stop:
break
elif not index <= stop:
break
yield IPAddress(index, version)
</DeepExtract>
else:
try:
index = int(index)
if -self.size <= index < 0:
item = IPAddress(self.last + index + 1, self._module.version)
elif 0 <= index <= self.size - 1:
item = IPAddress(self.first + index, self._module.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
|
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if start + step < 0 or step > stop:
item = iter([IPAddress(self.first, self._module.version)])
else:
start_ip = IPAddress(self.first + start, self._module.version)
end_ip = IPAddress(self.first + stop - step, self._module.version)
start_ip = IPAddress(start_ip)
end_ip = IPAddress(end_ip)
if start_ip.version != end_ip.version:
raise TypeError('start and stop IP versions do not match!')
version = start_ip.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
start_ip = int(start_ip)
stop = int(end_ip)
negative_step = False
if step < 0:
negative_step = True
index = start_ip - step
while True:
index += step
if negative_step:
if not index >= stop:
break
elif not index <= stop:
break
yield IPAddress(index, version)
else:
try:
index = int(index)
if -self.size <= index < 0:
item = IPAddress(self.last + index + 1, self._module.version)
elif 0 <= index <= self.size - 1:
item = IPAddress(self.first + index, self._module.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
|
AdvancedCloudFormation
|
positive
|
def fetch_status(self, *args, **kwargs):
<DeepExtract>
if not self.has['fetchStatus']:
raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'fetchStatus'))
</DeepExtract>
raise NotImplementedError('BacktestExchange does not support method fetch_status')
|
def fetch_status(self, *args, **kwargs):
if not self.has['fetchStatus']:
raise NotImplementedError('{}: method not implemented: {}'.format(self.id, 'fetchStatus'))
raise NotImplementedError('BacktestExchange does not support method fetch_status')
|
btrccts
|
positive
|
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
<DeepExtract>
import urllib2, shutil
egg_name = 'setuptools-%s-py%s.egg' % (version, sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto):
try:
from distutils import log
if download_delay:
log.warn('\n---------------------------------------------------------------------------\nThis script requires setuptools version %s to run (even to display\nhelp). I will attempt to download it for you (from\n%s), but\nyou may need to enable firewall access for this script first.\nI will start the download in %d seconds.\n\n(Note: if this machine does not have network access, please obtain the file\n\n %s\n\nand place it in this directory before rerunning this script.)\n---------------------------------------------------------------------------', version, download_base, download_delay, url)
from time import sleep
sleep(download_delay)
log.warn('Downloading %s', url)
src = urllib2.urlopen(url)
data = _validate_md5(egg_name, src.read())
dst = open(saveto, 'wb')
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
egg = os.path.realpath(saveto)
</DeepExtract>
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require('setuptools>=' + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write("The required version of setuptools (>=%s) is not available, and\ncan't be installed while this script is running. Please install\n a more recent version first, using 'easy_install -U setuptools'.\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources']
return do_download()
|
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
import urllib2, shutil
egg_name = 'setuptools-%s-py%s.egg' % (version, sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto):
try:
from distutils import log
if download_delay:
log.warn('\n---------------------------------------------------------------------------\nThis script requires setuptools version %s to run (even to display\nhelp). I will attempt to download it for you (from\n%s), but\nyou may need to enable firewall access for this script first.\nI will start the download in %d seconds.\n\n(Note: if this machine does not have network access, please obtain the file\n\n %s\n\nand place it in this directory before rerunning this script.)\n---------------------------------------------------------------------------', version, download_base, download_delay, url)
from time import sleep
sleep(download_delay)
log.warn('Downloading %s', url)
src = urllib2.urlopen(url)
data = _validate_md5(egg_name, src.read())
dst = open(saveto, 'wb')
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
egg = os.path.realpath(saveto)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require('setuptools>=' + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write("The required version of setuptools (>=%s) is not available, and\ncan't be installed while this script is running. Please install\n a more recent version first, using 'easy_install -U setuptools'.\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources']
return do_download()
|
barbieri-playground
|
positive
|
def get_rank_names(self):
"""
Return MDS daemon names of those daemons holding a rank,
sorted by rank. This includes e.g. up:replay/reconnect
as well as active, but does not include standby or
standby-replay.
"""
<DeepExtract>
status = self.status().get_fsmap(self.id)['mdsmap']
</DeepExtract>
result = []
for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['name'])
return result
|
def get_rank_names(self):
"""
Return MDS daemon names of those daemons holding a rank,
sorted by rank. This includes e.g. up:replay/reconnect
as well as active, but does not include standby or
standby-replay.
"""
status = self.status().get_fsmap(self.id)['mdsmap']
result = []
for mds_status in sorted(status['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['name'])
return result
|
ceph-qa-suite
|
positive
|
def dense_reppoints_target(proposals_list, proposals_pts_list, valid_flag_list, gt_bboxes_list, gt_masks_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True, num_pts=49):
"""Compute refinement and classification targets for points.
Args:
proposals_list(list(list)): Multi level bouding box of each image
proposals_pts_list (list(list)): Multi level points of each image.
valid_flag_list (list(list)): Multi level valid flags of each image.
gt_bboxes_list (list(Tensor)): Ground truth bboxes of each image.
img_metas (list(dict)): Meta info of each image.
cfg (dict): Train sample configs.
num_pts(int) Number of point sets
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == len(proposals_pts_list) == num_imgs
num_level_proposals = [points.size(0) for points in proposals_list[0]]
num_level_proposals_list = [num_level_proposals] * num_imgs
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
proposals_pts_list[i] = torch.cat(proposals_pts_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_mask_gt_index, all_mask_gt, all_mask_gt_label, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(dense_reppoints_target_sinle, proposals_list, proposals_pts_list, num_level_proposals_list, valid_flag_list, gt_bboxes_list, gt_masks_list, gt_bboxes_ignore_list, gt_labels_list, num_pts=num_pts, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs)
if any([labels is None for labels in all_labels]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
<DeepExtract>
all_labels = torch.stack(all_labels, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_labels[:, start:end].squeeze(0))
else:
level_targets.append(all_labels[:, start:end])
start = end
labels_list = level_targets
</DeepExtract>
<DeepExtract>
all_label_weights = torch.stack(all_label_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_label_weights[:, start:end].squeeze(0))
else:
level_targets.append(all_label_weights[:, start:end])
start = end
label_weights_list = level_targets
</DeepExtract>
<DeepExtract>
all_bbox_gt = torch.stack(all_bbox_gt, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_bbox_gt[:, start:end].squeeze(0))
else:
level_targets.append(all_bbox_gt[:, start:end])
start = end
bbox_gt_list = level_targets
</DeepExtract>
<DeepExtract>
all_proposals = torch.stack(all_proposals, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_proposals[:, start:end].squeeze(0))
else:
level_targets.append(all_proposals[:, start:end])
start = end
proposals_list = level_targets
</DeepExtract>
<DeepExtract>
all_proposal_weights = torch.stack(all_proposal_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_proposal_weights[:, start:end].squeeze(0))
else:
level_targets.append(all_proposal_weights[:, start:end])
start = end
proposal_weights_list = level_targets
</DeepExtract>
<DeepExtract>
all_mask_gt_index = torch.stack(all_mask_gt_index, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_mask_gt_index[:, start:end].squeeze(0))
else:
level_targets.append(all_mask_gt_index[:, start:end])
start = end
mask_gt_index_list = level_targets
</DeepExtract>
<DeepExtract>
target_gt_list = []
for lvl in range(len(mask_gt_index_list)):
mask_gt_lvl_list = []
for i in range(mask_gt_index_list[lvl].shape[0]):
index = mask_gt_index_list[lvl][i]
index = index[index > 0]
mask_gt_lvl = all_mask_gt[i][index - 1]
mask_gt_lvl_list.append(mask_gt_lvl)
target_gt_list.append(mask_gt_lvl_list)
mask_gt_list = target_gt_list
</DeepExtract>
<DeepExtract>
target_gt_list = []
for lvl in range(len(mask_gt_index_list)):
mask_gt_lvl_list = []
for i in range(mask_gt_index_list[lvl].shape[0]):
index = mask_gt_index_list[lvl][i]
index = index[index > 0]
mask_gt_lvl = all_mask_gt_label[i][index - 1]
mask_gt_lvl_list.append(mask_gt_lvl)
target_gt_list.append(mask_gt_lvl_list)
mask_gt_label_list = target_gt_list
</DeepExtract>
return (labels_list, label_weights_list, bbox_gt_list, mask_gt_list, mask_gt_label_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
|
def dense_reppoints_target(proposals_list, proposals_pts_list, valid_flag_list, gt_bboxes_list, gt_masks_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True, num_pts=49):
"""Compute refinement and classification targets for points.
Args:
proposals_list(list(list)): Multi level bouding box of each image
proposals_pts_list (list(list)): Multi level points of each image.
valid_flag_list (list(list)): Multi level valid flags of each image.
gt_bboxes_list (list(Tensor)): Ground truth bboxes of each image.
img_metas (list(dict)): Meta info of each image.
cfg (dict): Train sample configs.
num_pts(int) Number of point sets
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == len(proposals_pts_list) == num_imgs
num_level_proposals = [points.size(0) for points in proposals_list[0]]
num_level_proposals_list = [num_level_proposals] * num_imgs
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
proposals_pts_list[i] = torch.cat(proposals_pts_list[i])
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_mask_gt_index, all_mask_gt, all_mask_gt_label, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(dense_reppoints_target_sinle, proposals_list, proposals_pts_list, num_level_proposals_list, valid_flag_list, gt_bboxes_list, gt_masks_list, gt_bboxes_ignore_list, gt_labels_list, num_pts=num_pts, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs)
if any([labels is None for labels in all_labels]):
return None
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
all_labels = torch.stack(all_labels, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_labels[:, start:end].squeeze(0))
else:
level_targets.append(all_labels[:, start:end])
start = end
labels_list = level_targets
all_label_weights = torch.stack(all_label_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_label_weights[:, start:end].squeeze(0))
else:
level_targets.append(all_label_weights[:, start:end])
start = end
label_weights_list = level_targets
all_bbox_gt = torch.stack(all_bbox_gt, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_bbox_gt[:, start:end].squeeze(0))
else:
level_targets.append(all_bbox_gt[:, start:end])
start = end
bbox_gt_list = level_targets
all_proposals = torch.stack(all_proposals, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_proposals[:, start:end].squeeze(0))
else:
level_targets.append(all_proposals[:, start:end])
start = end
proposals_list = level_targets
all_proposal_weights = torch.stack(all_proposal_weights, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_proposal_weights[:, start:end].squeeze(0))
else:
level_targets.append(all_proposal_weights[:, start:end])
start = end
proposal_weights_list = level_targets
all_mask_gt_index = torch.stack(all_mask_gt_index, 0)
level_targets = []
start = 0
for n in num_level_proposals:
end = start + n
if not True:
level_targets.append(all_mask_gt_index[:, start:end].squeeze(0))
else:
level_targets.append(all_mask_gt_index[:, start:end])
start = end
mask_gt_index_list = level_targets
target_gt_list = []
for lvl in range(len(mask_gt_index_list)):
mask_gt_lvl_list = []
for i in range(mask_gt_index_list[lvl].shape[0]):
index = mask_gt_index_list[lvl][i]
index = index[index > 0]
mask_gt_lvl = all_mask_gt[i][index - 1]
mask_gt_lvl_list.append(mask_gt_lvl)
target_gt_list.append(mask_gt_lvl_list)
mask_gt_list = target_gt_list
target_gt_list = []
for lvl in range(len(mask_gt_index_list)):
mask_gt_lvl_list = []
for i in range(mask_gt_index_list[lvl].shape[0]):
index = mask_gt_index_list[lvl][i]
index = index[index > 0]
mask_gt_lvl = all_mask_gt_label[i][index - 1]
mask_gt_lvl_list.append(mask_gt_lvl)
target_gt_list.append(mask_gt_lvl_list)
mask_gt_label_list = target_gt_list
return (labels_list, label_weights_list, bbox_gt_list, mask_gt_list, mask_gt_label_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg)
|
Dense-RepPoints
|
positive
|
def helper(root, result):
if not root:
return
<DeepExtract>
if not root.left:
return
result.append(root.left.val)
helper(root.left.left, result)
helper(root.left.right, result)
</DeepExtract>
<DeepExtract>
if not root.right:
return
result.append(root.right.val)
helper(root.right.left, result)
helper(root.right.right, result)
</DeepExtract>
result.append(root.val)
|
def helper(root, result):
if not root:
return
if not root.left:
return
result.append(root.left.val)
helper(root.left.left, result)
helper(root.left.right, result)
if not root.right:
return
result.append(root.right.val)
helper(root.right.left, result)
helper(root.right.right, result)
result.append(root.val)
|
Algorithm-Implementations
|
positive
|
def all_chunk_coords(self) -> Generator[ChunkCoordinates, None, None]:
"""An iterable of chunk coordinates in world space."""
<DeepExtract>
with self._lock:
if self._sector_manager is not None:
return
self._sector_manager = SectorManager(0, 8192)
self._sector_manager.reserve(Sector(0, 8192))
if os.path.isfile(self._path):
with open(self._path, 'rb+') as handler:
_sanitise_file(handler)
handler.seek(0)
location_table = numpy.fromfile(handler, dtype='>u4', count=1024).reshape(32, 32)
for ((z, x), sector_data) in numpy.ndenumerate(location_table):
if sector_data:
sector_offset = (sector_data >> 8) * 4096
sector_size = (sector_data & 255) * 4096
sector = Sector(sector_offset, sector_offset + sector_size)
self._sector_manager.reserve(sector)
self._chunk_locations[x, z] = sector
</DeepExtract>
for (cx, cz) in list(self._chunk_locations):
yield (cx + self.rx * 32, cz + self.rz * 32)
|
def all_chunk_coords(self) -> Generator[ChunkCoordinates, None, None]:
"""An iterable of chunk coordinates in world space."""
with self._lock:
if self._sector_manager is not None:
return
self._sector_manager = SectorManager(0, 8192)
self._sector_manager.reserve(Sector(0, 8192))
if os.path.isfile(self._path):
with open(self._path, 'rb+') as handler:
_sanitise_file(handler)
handler.seek(0)
location_table = numpy.fromfile(handler, dtype='>u4', count=1024).reshape(32, 32)
for ((z, x), sector_data) in numpy.ndenumerate(location_table):
if sector_data:
sector_offset = (sector_data >> 8) * 4096
sector_size = (sector_data & 255) * 4096
sector = Sector(sector_offset, sector_offset + sector_size)
self._sector_manager.reserve(sector)
self._chunk_locations[x, z] = sector
for (cx, cz) in list(self._chunk_locations):
yield (cx + self.rx * 32, cz + self.rz * 32)
|
Amulet-Core
|
positive
|
def __init__(self, div=20.0, refinement=True, batch_norm=False, md=4, init_deconv_w_bilinear=False, local_corr_type='local_corr', local_gocor_arguments=None, same_local_corr_at_all_levels=True):
super().__init__()
nbr_features = [196, 128, 96, 64, 32, 16, 3]
self.leakyRELU = nn.LeakyReLU(0.1)
self.div = div
self.refinement = refinement
nd = (2 * md + 1) ** 2
dd = np.cumsum([128, 128, 96, 64, 32])
od = nd
<DeepExtract>
if batch_norm:
self.conv6_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.predict_flow6 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
if init_deconv_w_bilinear:
self.deconv6 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
<DeepExtract>
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv6 = deconv_
</DeepExtract>
<DeepExtract>
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat6 = deconv_
</DeepExtract>
od = nd + nbr_features[1] + 4
<DeepExtract>
if batch_norm:
self.conv5_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.predict_flow5 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
if init_deconv_w_bilinear:
self.deconv5 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
<DeepExtract>
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv5 = deconv_
</DeepExtract>
<DeepExtract>
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat5 = deconv_
</DeepExtract>
od = nd + nbr_features[2] + 4
<DeepExtract>
if batch_norm:
self.conv4_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.predict_flow4 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
if init_deconv_w_bilinear:
self.deconv4 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
<DeepExtract>
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv4 = deconv_
</DeepExtract>
<DeepExtract>
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat4 = deconv_
</DeepExtract>
od = nd + nbr_features[3] + 4
<DeepExtract>
if batch_norm:
self.conv3_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.predict_flow3 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
if init_deconv_w_bilinear:
self.deconv3 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
<DeepExtract>
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv3 = deconv_
</DeepExtract>
<DeepExtract>
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat3 = deconv_
</DeepExtract>
od = nd + nbr_features[4] + 4
<DeepExtract>
if batch_norm:
self.conv2_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.predict_flow2 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
<DeepExtract>
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv2 = deconv_
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv1 = nn.Sequential(nn.Conv2d(od + dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv1 = nn.Sequential(nn.Conv2d(od + dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv2 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=2, dilation=2, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv2 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=2, dilation=2, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv3 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=4, dilation=4, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv3 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=4, dilation=4, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv4 = nn.Sequential(nn.Conv2d(128, 96, kernel_size=3, stride=1, padding=8, dilation=8, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv4 = nn.Sequential(nn.Conv2d(128, 96, kernel_size=3, stride=1, padding=8, dilation=8, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv5 = nn.Sequential(nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=16, dilation=16, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv5 = nn.Sequential(nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=16, dilation=16, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.dc_conv6 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv6 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
self.dc_conv7 = nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True)
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv1a = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1a = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv1aa = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1aa = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv1b = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1b = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2a = nn.Sequential(nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2a = nn.Sequential(nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2aa = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2aa = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv2b = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2b = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3a = nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3a = nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3aa = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3aa = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv3b = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3b = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4a = nn.Sequential(nn.Conv2d(64, 96, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4a = nn.Sequential(nn.Conv2d(64, 96, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4aa = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4aa = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv4b = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4b = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5a = nn.Sequential(nn.Conv2d(96, 128, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5a = nn.Sequential(nn.Conv2d(96, 128, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5aa = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5aa = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv5b = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5b = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6aa = nn.Sequential(nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6aa = nn.Sequential(nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6a = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6a = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
<DeepExtract>
if batch_norm:
self.conv6b = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6b = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
</DeepExtract>
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
self.local_corr_type = local_corr_type
if self.local_corr_type == 'LocalGOCor':
self.same_local_corr_at_all_levels = same_local_corr_at_all_levels
if self.same_local_corr_at_all_levels:
initializer = local_gocor.LocalCorrSimpleInitializer()
optimizer = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr = local_gocor.LocalGOCor(filter_initializer=initializer, filter_optimizer=optimizer)
else:
initializer_6 = local_gocor.LocalCorrSimpleInitializer()
optimizer_6 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_6 = local_gocor.LocalGOCor(filter_initializer=initializer_6, filter_optimizer=optimizer_6)
initializer_5 = local_gocor.LocalCorrSimpleInitializer()
optimizer_5 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_5 = local_gocor.LocalGOCor(filter_initializer=initializer_5, filter_optimizer=optimizer_5)
initializer_4 = local_gocor.LocalCorrSimpleInitializer()
optimizer_4 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_4 = local_gocor.LocalGOCor(filter_initializer=initializer_4, filter_optimizer=optimizer_4)
initializer_3 = local_gocor.LocalCorrSimpleInitializer()
optimizer_3 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_3 = local_gocor.LocalGOCor(filter_initializer=initializer_3, filter_optimizer=optimizer_3)
initializer_2 = local_gocor.LocalCorrSimpleInitializer()
optimizer_2 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_2 = local_gocor.LocalGOCor(filter_initializer=initializer_2, filter_optimizer=optimizer_2)
|
def __init__(self, div=20.0, refinement=True, batch_norm=False, md=4, init_deconv_w_bilinear=False, local_corr_type='local_corr', local_gocor_arguments=None, same_local_corr_at_all_levels=True):
super().__init__()
nbr_features = [196, 128, 96, 64, 32, 16, 3]
self.leakyRELU = nn.LeakyReLU(0.1)
self.div = div
self.refinement = refinement
nd = (2 * md + 1) ** 2
dd = np.cumsum([128, 128, 96, 64, 32])
od = nd
if batch_norm:
self.conv6_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
self.predict_flow6 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
if init_deconv_w_bilinear:
self.deconv6 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv6 = deconv_
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat6 = deconv_
od = nd + nbr_features[1] + 4
if batch_norm:
self.conv5_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
self.predict_flow5 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
if init_deconv_w_bilinear:
self.deconv5 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv5 = deconv_
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat5 = deconv_
od = nd + nbr_features[2] + 4
if batch_norm:
self.conv4_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
self.predict_flow4 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
if init_deconv_w_bilinear:
self.deconv4 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv4 = deconv_
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat4 = deconv_
od = nd + nbr_features[3] + 4
if batch_norm:
self.conv3_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
self.predict_flow3 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
if init_deconv_w_bilinear:
self.deconv3 = BilinearConvTranspose2d(2, 2, kernel_size=4, stride=2, padding=1)
else:
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv3 = deconv_
deconv_ = nn.ConvTranspose2d(od + dd[4], 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.upfeat3 = deconv_
od = nd + nbr_features[4] + 4
if batch_norm:
self.conv2_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_0 = nn.Sequential(nn.Conv2d(od, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_1 = nn.Sequential(nn.Conv2d(od + dd[0], 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_2 = nn.Sequential(nn.Conv2d(od + dd[1], 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_3 = nn.Sequential(nn.Conv2d(od + dd[2], 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2_4 = nn.Sequential(nn.Conv2d(od + dd[3], 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
self.predict_flow2 = nn.Conv2d(od + dd[4], 2, kernel_size=3, stride=1, padding=1, bias=True)
deconv_ = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
nn.init.kaiming_normal_(deconv_.weight.data, mode='fan_in')
if deconv_.bias is not None:
deconv_.bias.data.zero_()
self.deconv2 = deconv_
if batch_norm:
self.dc_conv1 = nn.Sequential(nn.Conv2d(od + dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv1 = nn.Sequential(nn.Conv2d(od + dd[4], 128, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.dc_conv2 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=2, dilation=2, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv2 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=2, dilation=2, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.dc_conv3 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=4, dilation=4, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv3 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=4, dilation=4, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.dc_conv4 = nn.Sequential(nn.Conv2d(128, 96, kernel_size=3, stride=1, padding=8, dilation=8, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv4 = nn.Sequential(nn.Conv2d(128, 96, kernel_size=3, stride=1, padding=8, dilation=8, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.dc_conv5 = nn.Sequential(nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=16, dilation=16, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv5 = nn.Sequential(nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=16, dilation=16, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.dc_conv6 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.dc_conv6 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, dilation=1, bias=True), nn.LeakyReLU(0.1))
self.dc_conv7 = nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True)
if batch_norm:
self.conv1a = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1a = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv1aa = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1aa = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv1b = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(16), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv1b = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2a = nn.Sequential(nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2a = nn.Sequential(nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2aa = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2aa = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv2b = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(32), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv2b = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3a = nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3a = nn.Sequential(nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3aa = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3aa = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv3b = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv3b = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4a = nn.Sequential(nn.Conv2d(64, 96, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4a = nn.Sequential(nn.Conv2d(64, 96, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4aa = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4aa = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv4b = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(96), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv4b = nn.Sequential(nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5a = nn.Sequential(nn.Conv2d(96, 128, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5a = nn.Sequential(nn.Conv2d(96, 128, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5aa = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5aa = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv5b = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv5b = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6aa = nn.Sequential(nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6aa = nn.Sequential(nn.Conv2d(128, 196, kernel_size=3, stride=2, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6a = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6a = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
if batch_norm:
self.conv6b = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.BatchNorm2d(196), nn.LeakyReLU(0.1, inplace=True))
else:
self.conv6b = nn.Sequential(nn.Conv2d(196, 196, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True), nn.LeakyReLU(0.1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
self.local_corr_type = local_corr_type
if self.local_corr_type == 'LocalGOCor':
self.same_local_corr_at_all_levels = same_local_corr_at_all_levels
if self.same_local_corr_at_all_levels:
initializer = local_gocor.LocalCorrSimpleInitializer()
optimizer = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr = local_gocor.LocalGOCor(filter_initializer=initializer, filter_optimizer=optimizer)
else:
initializer_6 = local_gocor.LocalCorrSimpleInitializer()
optimizer_6 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_6 = local_gocor.LocalGOCor(filter_initializer=initializer_6, filter_optimizer=optimizer_6)
initializer_5 = local_gocor.LocalCorrSimpleInitializer()
optimizer_5 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_5 = local_gocor.LocalGOCor(filter_initializer=initializer_5, filter_optimizer=optimizer_5)
initializer_4 = local_gocor.LocalCorrSimpleInitializer()
optimizer_4 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_4 = local_gocor.LocalGOCor(filter_initializer=initializer_4, filter_optimizer=optimizer_4)
initializer_3 = local_gocor.LocalCorrSimpleInitializer()
optimizer_3 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_3 = local_gocor.LocalGOCor(filter_initializer=initializer_3, filter_optimizer=optimizer_3)
initializer_2 = local_gocor.LocalCorrSimpleInitializer()
optimizer_2 = define_optimizer_local_corr(local_gocor_arguments)
self.local_corr_2 = local_gocor.LocalGOCor(filter_initializer=initializer_2, filter_optimizer=optimizer_2)
|
DenseMatching
|
positive
|
def _compute_u_sorted(a, sorting):
u_blocks = [[] for _ in range(a._n_blocks[1])]
hbsize = a._reg_shape[1]
if dislib.__gpu_available__:
compute_u_block_func = _compute_u_block_sorted_gpu
else:
compute_u_block_func = _compute_u_block_sorted
for (i, vblock) in enumerate(a._iterator('columns')):
u_block = [object() for _ in range(a._n_blocks[1])]
compute_u_block_func(vblock._blocks, i, hbsize, sorting, u_block)
for j in range(len(u_block)):
u_blocks[j].append(u_block[j])
vbsize = a._reg_shape[0]
final_blocks = Array._get_out_blocks(a._n_blocks)
for (i, u_block) in enumerate(u_blocks):
new_block = [object() for _ in range(a._n_blocks[0])]
<DeepExtract>
u_block = list(filter(lambda a: np.any(a), u_block))
col = np.vstack(u_block).T
local_sorting = []
for i in range(col.shape[1]):
dest_i = np.where(sorting == i * hbsize + i)[0][0] % hbsize
local_sorting.append(dest_i)
col = col[:, local_sorting]
for i in range(len(new_block)):
new_block[i] = col[i * vbsize:(i + 1) * vbsize]
</DeepExtract>
for j in range(len(new_block)):
final_blocks[j][i] = new_block[j]
for elem in u_block:
compss_delete_object(elem)
return Array(final_blocks, a._top_left_shape, a._reg_shape, a.shape, a._sparse)
|
def _compute_u_sorted(a, sorting):
u_blocks = [[] for _ in range(a._n_blocks[1])]
hbsize = a._reg_shape[1]
if dislib.__gpu_available__:
compute_u_block_func = _compute_u_block_sorted_gpu
else:
compute_u_block_func = _compute_u_block_sorted
for (i, vblock) in enumerate(a._iterator('columns')):
u_block = [object() for _ in range(a._n_blocks[1])]
compute_u_block_func(vblock._blocks, i, hbsize, sorting, u_block)
for j in range(len(u_block)):
u_blocks[j].append(u_block[j])
vbsize = a._reg_shape[0]
final_blocks = Array._get_out_blocks(a._n_blocks)
for (i, u_block) in enumerate(u_blocks):
new_block = [object() for _ in range(a._n_blocks[0])]
u_block = list(filter(lambda a: np.any(a), u_block))
col = np.vstack(u_block).T
local_sorting = []
for i in range(col.shape[1]):
dest_i = np.where(sorting == i * hbsize + i)[0][0] % hbsize
local_sorting.append(dest_i)
col = col[:, local_sorting]
for i in range(len(new_block)):
new_block[i] = col[i * vbsize:(i + 1) * vbsize]
for j in range(len(new_block)):
final_blocks[j][i] = new_block[j]
for elem in u_block:
compss_delete_object(elem)
return Array(final_blocks, a._top_left_shape, a._reg_shape, a.shape, a._sparse)
|
dislib
|
positive
|
def im_conv_body_only(model, im, target_scale, target_max_size):
boxes = None
<DeepExtract>
blobs = {}
(blobs['data'], im_scale, blobs['im_info']) = blob_utils.get_image_blob(im, target_scale, target_max_size)
if boxes is not None:
blobs['rois'] = _get_rois_blob(boxes, im_scale)
(inputs, im_scale) = (blobs, im_scale)
</DeepExtract>
conv_body_only = np.ones(1, dtype=np.int)
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']), volatile=True)]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']), volatile=True)]
inputs['conv_body_only'] = [Variable(torch.from_numpy(conv_body_only), volatile=True)]
else:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']))]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']))]
inputs['conv_body_only'] = [Variable(torch.from_numpy(conv_body_only))]
blob_conv = model(**inputs)
return blob_conv
|
def im_conv_body_only(model, im, target_scale, target_max_size):
boxes = None
blobs = {}
(blobs['data'], im_scale, blobs['im_info']) = blob_utils.get_image_blob(im, target_scale, target_max_size)
if boxes is not None:
blobs['rois'] = _get_rois_blob(boxes, im_scale)
(inputs, im_scale) = (blobs, im_scale)
conv_body_only = np.ones(1, dtype=np.int)
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']), volatile=True)]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']), volatile=True)]
inputs['conv_body_only'] = [Variable(torch.from_numpy(conv_body_only), volatile=True)]
else:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']))]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']))]
inputs['conv_body_only'] = [Variable(torch.from_numpy(conv_body_only))]
blob_conv = model(**inputs)
return blob_conv
|
Amodal-Instance-Segmentation-through-KINS-Dataset
|
positive
|
def update_fixture_obj(self, old_obj, model_class, readonly_fields, do_preserve_sideeffect_fields, object_sideeffect_fields, common_sideeffect_fields):
"""
Given a fixture object, update it via stripe
:param model_class:
:param old_obj:
:param readonly_fields:
:return:
"""
old_obj = json.loads(self.unfake_json_ids(json.dumps(old_obj)))
id_ = old_obj['id']
self.stdout.write(f'{model_class.__name__} {id_}', ending='')
if issubclass(model_class, djstripe.models.Account):
<DeepExtract>
obj = djstripe.models.Account().api_retrieve()
(created, obj) = (True, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.BankAccount):
<DeepExtract>
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
create_obj['account_number'] = old_obj['metadata']['djstripe_test_fixture_account_number']
create_obj['object'] = 'bank_account'
obj = stripe.Customer.create_source(customer_id, source=create_obj)
created = True
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.Card):
<DeepExtract>
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
obj = stripe.Customer.create_source(**{'source': 'tok_visa'})
for (k, v) in create_obj.items():
setattr(obj, k, v)
obj.save()
created = True
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.Source):
<DeepExtract>
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
source_obj = djstripe.models.Source._api_create(**{'token': 'tok_visa', 'type': 'card'})
obj = stripe.Customer.create_source(**{'source': source_obj.id})
for (k, v) in create_obj.items():
setattr(obj, k, v)
obj.save()
created = True
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.Invoice):
<DeepExtract>
subscription = djstripe.models.Subscription(id=old_obj['subscription']).api_retrieve()
id_ = subscription['latest_invoice']
try:
obj = djstripe.models.Invoice(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find invoice via subscription'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.Charge):
<DeepExtract>
invoice = djstripe.models.Invoice(id=old_obj['invoice']).api_retrieve()
id_ = invoice['charge']
try:
obj = djstripe.models.Charge(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find charge via invoice'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.PaymentIntent):
<DeepExtract>
invoice = djstripe.models.Invoice(id=old_obj['invoice']).api_retrieve()
id_ = invoice['payment_intent']
try:
obj = djstripe.models.PaymentIntent(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find payment_intent via invoice'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.PaymentMethod):
<DeepExtract>
id_ = old_obj['id']
customer_id = old_obj['customer']
type_ = old_obj['type']
try:
obj = djstripe.models.PaymentMethod(id=id_).api_retrieve()
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
obj = djstripe.models.PaymentMethod()._api_create(type=type_, card={'token': 'tok_visa'})
stripe.PaymentMethod.attach(obj['id'], customer=customer_id, api_key=djstripe_settings.djstripe_settings.STRIPE_SECRET_KEY)
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
created = True
(created, obj) = (created, obj)
</DeepExtract>
elif issubclass(model_class, djstripe.models.BalanceTransaction):
<DeepExtract>
source = old_obj['source']
if source.startswith('ch_'):
charge = djstripe.models.Charge(id=source).api_retrieve()
id_ = get_id_from_stripe_data(charge['balance_transaction'])
try:
obj = djstripe.models.BalanceTransaction(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find balance transaction via source'
(created, obj) = (created, obj)
</DeepExtract>
else:
try:
obj = model_class(id=id_).api_retrieve()
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
if issubclass(model_class, djstripe.models.Subscription):
<DeepExtract>
items = create_obj.get('items', {}).get('data', [])
if len(items):
create_obj.pop('plan', None)
create_obj.pop('quantity', None)
subscription_item_create_fields = {'plan', 'billing_thresholds', 'metadata', 'quantity', 'tax_rates'}
create_items = []
for item in items:
create_item = {k: v for (k, v) in item.items() if k in subscription_item_create_fields}
create_item['plan'] = get_id_from_stripe_data(create_item['plan'])
if create_item.get('tax_rates', []):
create_item['tax_rates'] = [get_id_from_stripe_data(t) for t in create_item['tax_rates']]
create_items.append(create_item)
create_obj['items'] = create_items
else:
create_obj.pop('items', None)
create_obj['plan'] = get_id_from_stripe_data(create_obj['plan'])
if create_obj.get('default_tax_rates', []):
create_obj['default_tax_rates'] = [get_id_from_stripe_data(t) for t in create_obj['default_tax_rates']]
create_obj.pop('tax_percent', None)
create_obj = create_obj
</DeepExtract>
obj = model_class._api_create(**create_obj)
created = True
<DeepExtract>
fake_id = self.get_fake_id(obj)
actual_id = obj['id']
if fake_id:
if fake_id in self.fake_id_map:
assert self.fake_id_map[fake_id] == actual_id, f'Duplicate fake_id {fake_id} - reset your test Stripe data at https://dashboard.stripe.com/account/data'
self.fake_id_map[fake_id] = actual_id
return fake_id
else:
return actual_id
</DeepExtract>
if do_preserve_sideeffect_fields:
<DeepExtract>
object_name = obj.get('object')
sideeffect_fields = object_sideeffect_fields.get(object_name, set()).union(set(common_sideeffect_fields))
old_obj = old_obj or {}
for (f, old_val) in old_obj.items():
try:
new_val = obj[f]
except KeyError:
continue
if isinstance(new_val, stripe.api_resources.ListObject):
for (n, (old_val_item, new_val_item)) in enumerate(zip(old_val.get('data', []), new_val.data)):
new_val.data[n] = self.preserve_old_sideeffect_values(old_obj=old_val_item, new_obj=new_val_item, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields)
elif isinstance(new_val, stripe.stripe_object.StripeObject):
obj[f] = self.preserve_old_sideeffect_values(old_obj=old_val, new_obj=new_val, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields)
elif f in sideeffect_fields and type(old_val) == type(new_val) and (old_val != new_val):
obj[f] = old_val
obj = obj
</DeepExtract>
return (created, obj)
|
def update_fixture_obj(self, old_obj, model_class, readonly_fields, do_preserve_sideeffect_fields, object_sideeffect_fields, common_sideeffect_fields):
"""
Given a fixture object, update it via stripe
:param model_class:
:param old_obj:
:param readonly_fields:
:return:
"""
old_obj = json.loads(self.unfake_json_ids(json.dumps(old_obj)))
id_ = old_obj['id']
self.stdout.write(f'{model_class.__name__} {id_}', ending='')
if issubclass(model_class, djstripe.models.Account):
obj = djstripe.models.Account().api_retrieve()
(created, obj) = (True, obj)
elif issubclass(model_class, djstripe.models.BankAccount):
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
create_obj['account_number'] = old_obj['metadata']['djstripe_test_fixture_account_number']
create_obj['object'] = 'bank_account'
obj = stripe.Customer.create_source(customer_id, source=create_obj)
created = True
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.Card):
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
obj = stripe.Customer.create_source(**{'source': 'tok_visa'})
for (k, v) in create_obj.items():
setattr(obj, k, v)
obj.save()
created = True
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.Source):
customer_id = old_obj['customer']
try:
obj = stripe.Customer.retrieve_source(customer_id, old_obj['id'])
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
source_obj = djstripe.models.Source._api_create(**{'token': 'tok_visa', 'type': 'card'})
obj = stripe.Customer.create_source(**{'source': source_obj.id})
for (k, v) in create_obj.items():
setattr(obj, k, v)
obj.save()
created = True
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.Invoice):
subscription = djstripe.models.Subscription(id=old_obj['subscription']).api_retrieve()
id_ = subscription['latest_invoice']
try:
obj = djstripe.models.Invoice(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find invoice via subscription'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.Charge):
invoice = djstripe.models.Invoice(id=old_obj['invoice']).api_retrieve()
id_ = invoice['charge']
try:
obj = djstripe.models.Charge(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find charge via invoice'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.PaymentIntent):
invoice = djstripe.models.Invoice(id=old_obj['invoice']).api_retrieve()
id_ = invoice['payment_intent']
try:
obj = djstripe.models.PaymentIntent(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find payment_intent via invoice'
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.PaymentMethod):
id_ = old_obj['id']
customer_id = old_obj['customer']
type_ = old_obj['type']
try:
obj = djstripe.models.PaymentMethod(id=id_).api_retrieve()
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
obj = djstripe.models.PaymentMethod()._api_create(type=type_, card={'token': 'tok_visa'})
stripe.PaymentMethod.attach(obj['id'], customer=customer_id, api_key=djstripe_settings.djstripe_settings.STRIPE_SECRET_KEY)
for k in ['metadata']:
if isinstance(obj.get(k), dict):
obj[k].update(old_obj.get(k, {}))
else:
obj[k] = old_obj[k]
obj.save()
created = True
(created, obj) = (created, obj)
elif issubclass(model_class, djstripe.models.BalanceTransaction):
source = old_obj['source']
if source.startswith('ch_'):
charge = djstripe.models.Charge(id=source).api_retrieve()
id_ = get_id_from_stripe_data(charge['balance_transaction'])
try:
obj = djstripe.models.BalanceTransaction(id=id_).api_retrieve()
created = False
self.stdout.write(f' found {id_}')
except InvalidRequestError:
assert False, 'Expected to find balance transaction via source'
(created, obj) = (created, obj)
else:
try:
obj = model_class(id=id_).api_retrieve()
created = False
self.stdout.write(' found')
except InvalidRequestError:
self.stdout.write(' creating')
create_obj = deepcopy(old_obj)
for k in readonly_fields:
create_obj.pop(k, None)
if issubclass(model_class, djstripe.models.Subscription):
items = create_obj.get('items', {}).get('data', [])
if len(items):
create_obj.pop('plan', None)
create_obj.pop('quantity', None)
subscription_item_create_fields = {'plan', 'billing_thresholds', 'metadata', 'quantity', 'tax_rates'}
create_items = []
for item in items:
create_item = {k: v for (k, v) in item.items() if k in subscription_item_create_fields}
create_item['plan'] = get_id_from_stripe_data(create_item['plan'])
if create_item.get('tax_rates', []):
create_item['tax_rates'] = [get_id_from_stripe_data(t) for t in create_item['tax_rates']]
create_items.append(create_item)
create_obj['items'] = create_items
else:
create_obj.pop('items', None)
create_obj['plan'] = get_id_from_stripe_data(create_obj['plan'])
if create_obj.get('default_tax_rates', []):
create_obj['default_tax_rates'] = [get_id_from_stripe_data(t) for t in create_obj['default_tax_rates']]
create_obj.pop('tax_percent', None)
create_obj = create_obj
obj = model_class._api_create(**create_obj)
created = True
fake_id = self.get_fake_id(obj)
actual_id = obj['id']
if fake_id:
if fake_id in self.fake_id_map:
assert self.fake_id_map[fake_id] == actual_id, f'Duplicate fake_id {fake_id} - reset your test Stripe data at https://dashboard.stripe.com/account/data'
self.fake_id_map[fake_id] = actual_id
return fake_id
else:
return actual_id
if do_preserve_sideeffect_fields:
object_name = obj.get('object')
sideeffect_fields = object_sideeffect_fields.get(object_name, set()).union(set(common_sideeffect_fields))
old_obj = old_obj or {}
for (f, old_val) in old_obj.items():
try:
new_val = obj[f]
except KeyError:
continue
if isinstance(new_val, stripe.api_resources.ListObject):
for (n, (old_val_item, new_val_item)) in enumerate(zip(old_val.get('data', []), new_val.data)):
new_val.data[n] = self.preserve_old_sideeffect_values(old_obj=old_val_item, new_obj=new_val_item, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields)
elif isinstance(new_val, stripe.stripe_object.StripeObject):
obj[f] = self.preserve_old_sideeffect_values(old_obj=old_val, new_obj=new_val, object_sideeffect_fields=object_sideeffect_fields, common_sideeffect_fields=common_sideeffect_fields)
elif f in sideeffect_fields and type(old_val) == type(new_val) and (old_val != new_val):
obj[f] = old_val
obj = obj
return (created, obj)
|
dj-stripe
|
positive
|
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
<DeepExtract>
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
<DeepExtract>
bottleneck = resnet_v2.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1)] * 2)]
(output, _) = resnet_v2.resnet_v2(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
</DeepExtract>
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
<DeepExtract>
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
</DeepExtract>
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
|
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
if None in [batch, None, None, 3]:
inputs = tf.placeholder(tf.float32, (batch, None, None, 3))
else:
inputs = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(None), [None, 1]) + np.reshape(np.arange(None), [1, None]), [1, None, None, 1]), [batch, 1, 1, 3]))
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
bottleneck = resnet_v2.bottleneck
blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1)] * 2 + [(4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1)] * 2 + [(8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1)] * 2 + [(16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1)] * 2)]
(output, _) = resnet_v2.resnet_v2(inputs, blocks, None, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, reuse=reuse, scope=scope)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
if None in [batch, height, width, 3]:
images = tf.placeholder(tf.float32, (batch, height, width, 3))
else:
images = tf.to_float(np.tile(np.reshape(np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch, 1, 1, 3]))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
|
ARNet
|
positive
|
def test_commit_transaction(kafka_cluster):
output_topic = kafka_cluster.create_topic('output_topic')
producer = kafka_cluster.producer({'transactional.id': 'example_transactional_id', 'error_cb': prefixed_error_cb('test_commit_transaction')})
producer.init_transactions()
<DeepExtract>
print('=== Producing {} transactional messages to topic {}. ==='.format(100, output_topic))
producer.begin_transaction()
for value in ['test-data{}'.format(i) for i in range(0, 100)]:
producer.produce(output_topic, value, on_delivery=prefixed_delivery_cb(called_by()))
producer.poll(0.0)
producer.flush()
</DeepExtract>
producer.commit_transaction()
assert consume_committed(kafka_cluster.client_conf(), output_topic) == 100
|
def test_commit_transaction(kafka_cluster):
output_topic = kafka_cluster.create_topic('output_topic')
producer = kafka_cluster.producer({'transactional.id': 'example_transactional_id', 'error_cb': prefixed_error_cb('test_commit_transaction')})
producer.init_transactions()
print('=== Producing {} transactional messages to topic {}. ==='.format(100, output_topic))
producer.begin_transaction()
for value in ['test-data{}'.format(i) for i in range(0, 100)]:
producer.produce(output_topic, value, on_delivery=prefixed_delivery_cb(called_by()))
producer.poll(0.0)
producer.flush()
producer.commit_transaction()
assert consume_committed(kafka_cluster.client_conf(), output_topic) == 100
|
confluent-kafka-python
|
positive
|
def serialize_card_comment(comment):
<DeepExtract>
current_request = CrequestMiddleware.get_request()
self.current_user = current_request.user
self.current_member = None
if hasattr(self.current_user, 'member'):
self.current_member = self.current_user.member
if self.serialized_members_by_id is None:
self.serialized_members_by_id = {member.id: self.serialize_member(member) for member in self.board.members.all()}
if self.serialized_members_by_card is None:
self.serialized_members_by_card = CardMemberRelationship.get_members_by_card(self.board, member_cache=self.serialized_members_by_id)
</DeepExtract>
try:
review = comment.review
except CardReview.DoesNotExist:
review = None
try:
valued_card = comment.valued_card
except Card.DoesNotExist:
valued_card = None
serialized_author = self.serialized_members_by_id[comment.author_id]
comment_json = {'id': comment.id, 'uuid': comment.uuid, 'content': comment.content, 'creation_datetime': comment.creation_datetime, 'last_edition_datetime': comment.last_edition_datetime, 'author': serialized_author, 'blocking_card': {'id': comment.blocking_card.id, 'uuid': comment.blocking_card.uuid, 'name': comment.blocking_card.name, 'description': comment.blocking_card.description, 'url': reverse('boards:view_card', args=(self.board.id, comment.blocking_card.id)), 'short_url': comment.blocking_card.short_url, 'position': comment.blocking_card.position} if comment.blocking_card else None, 'valued_card': {'id': valued_card.id, 'uuid': valued_card.uuid, 'name': valued_card.name, 'description': valued_card.description, 'url': reverse('boards:view_card', args=(self.board.id, valued_card.id)), 'short_url': valued_card.short_url, 'position': valued_card.position} if valued_card else None, 'review': self.serialize_card_review(review) if review else None, 'requirement': self.serialize_requirement(comment.requirement) if comment.requirement else None}
return comment_json
|
def serialize_card_comment(comment):
current_request = CrequestMiddleware.get_request()
self.current_user = current_request.user
self.current_member = None
if hasattr(self.current_user, 'member'):
self.current_member = self.current_user.member
if self.serialized_members_by_id is None:
self.serialized_members_by_id = {member.id: self.serialize_member(member) for member in self.board.members.all()}
if self.serialized_members_by_card is None:
self.serialized_members_by_card = CardMemberRelationship.get_members_by_card(self.board, member_cache=self.serialized_members_by_id)
try:
review = comment.review
except CardReview.DoesNotExist:
review = None
try:
valued_card = comment.valued_card
except Card.DoesNotExist:
valued_card = None
serialized_author = self.serialized_members_by_id[comment.author_id]
comment_json = {'id': comment.id, 'uuid': comment.uuid, 'content': comment.content, 'creation_datetime': comment.creation_datetime, 'last_edition_datetime': comment.last_edition_datetime, 'author': serialized_author, 'blocking_card': {'id': comment.blocking_card.id, 'uuid': comment.blocking_card.uuid, 'name': comment.blocking_card.name, 'description': comment.blocking_card.description, 'url': reverse('boards:view_card', args=(self.board.id, comment.blocking_card.id)), 'short_url': comment.blocking_card.short_url, 'position': comment.blocking_card.position} if comment.blocking_card else None, 'valued_card': {'id': valued_card.id, 'uuid': valued_card.uuid, 'name': valued_card.name, 'description': valued_card.description, 'url': reverse('boards:view_card', args=(self.board.id, valued_card.id)), 'short_url': valued_card.short_url, 'position': valued_card.position} if valued_card else None, 'review': self.serialize_card_review(review) if review else None, 'requirement': self.serialize_requirement(comment.requirement) if comment.requirement else None}
return comment_json
|
djanban
|
positive
|
def test_title_1(self):
expected = self.portal.title_1
<DeepExtract>
viewlet = LogoViewlet(self.portal, self.request, None, None)
viewlet.update()
viewlet = viewlet
</DeepExtract>
self.assertEqual(viewlet.title_1(), expected)
|
def test_title_1(self):
expected = self.portal.title_1
viewlet = LogoViewlet(self.portal, self.request, None, None)
viewlet.update()
viewlet = viewlet
self.assertEqual(viewlet.title_1(), expected)
|
brasil.gov.portal
|
positive
|
def _images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'):
"""
Function to log multiple values
Parameters
----------
tag : str
the tag to store the image at
img_tensor : array-like
an array-like object containing the actual image; Must be
convertible to numpy
global_step : int
the global step
walltime :
the overall time
dataformats : str
string specifying the image format
"""
<DeepExtract>
(converted_args, converted_kwargs) = (args, kwargs)
</DeepExtract>
self._writer.add_images(*converted_args, **converted_kwargs)
|
def _images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'):
"""
Function to log multiple values
Parameters
----------
tag : str
the tag to store the image at
img_tensor : array-like
an array-like object containing the actual image; Must be
convertible to numpy
global_step : int
the global step
walltime :
the overall time
dataformats : str
string specifying the image format
"""
(converted_args, converted_kwargs) = (args, kwargs)
self._writer.add_images(*converted_args, **converted_kwargs)
|
delira
|
positive
|
def he_normal(shape, name=None):
""" Reference: He et al., http://arxiv.org/abs/1502.01852
"""
<DeepExtract>
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
fan_out = shape[1] if len(shape) == 2 else shape[0]
(fan_in, fan_out) = (fan_in, fan_out)
</DeepExtract>
s = np.sqrt(2.0 / fan_in)
return normal(shape, s, name=name)
|
def he_normal(shape, name=None):
""" Reference: He et al., http://arxiv.org/abs/1502.01852
"""
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
fan_out = shape[1] if len(shape) == 2 else shape[0]
(fan_in, fan_out) = (fan_in, fan_out)
s = np.sqrt(2.0 / fan_in)
return normal(shape, s, name=name)
|
Deep-Bayesian-Active-Learning
|
positive
|
def write_code(line, comment=''):
<DeepExtract>
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.')
if len(parts) == 1:
(line, comment) = ('_printlist([base])', comment)
elif len(parts) == 2:
(line, comment) = ('_=%s(%r)' % tuple(parts), comment)
else:
(line, comment) = ('_=%s(%r, %s)' % tuple(parts), comment)
if self.lineno <= 2 and (not line.strip()) and ('coding' in comment):
m = re.match('#.*coding[:=]\\s*([-\\w.]+)', comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.')
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
(line, comment) = (line, comment.replace('coding', 'coding*'))
(line, comment) = (line, comment)
</DeepExtract>
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
|
def write_code(line, comment=''):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.')
if len(parts) == 1:
(line, comment) = ('_printlist([base])', comment)
elif len(parts) == 2:
(line, comment) = ('_=%s(%r)' % tuple(parts), comment)
else:
(line, comment) = ('_=%s(%r, %s)' % tuple(parts), comment)
if self.lineno <= 2 and (not line.strip()) and ('coding' in comment):
m = re.match('#.*coding[:=]\\s*([-\\w.]+)', comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.')
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
(line, comment) = (line, comment.replace('coding', 'coding*'))
(line, comment) = (line, comment)
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
|
aws-servicebroker
|
positive
|
def server_udp_post_decrypt(self, buf):
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-5], self.hashfunc).digest()
uid = struct.unpack('<I', buf[-5:-1])[0] ^ struct.unpack('<I', md5data[:4])[0]
uid = struct.pack('<I', uid)
if uid in self.server_info.users:
user_key = self.server_info.users[uid]
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
<DeepExtract>
self.random_client.init_from_bin(md5data)
rand_len = self.random_client.next() % 127
</DeepExtract>
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.decrypt(buf[:-8 - rand_len])
return (out_buf, uid)
|
def server_udp_post_decrypt(self, buf):
mac_key = self.server_info.key
md5data = hmac.new(mac_key, buf[-8:-5], self.hashfunc).digest()
uid = struct.unpack('<I', buf[-5:-1])[0] ^ struct.unpack('<I', md5data[:4])[0]
uid = struct.pack('<I', uid)
if uid in self.server_info.users:
user_key = self.server_info.users[uid]
else:
uid = None
if not self.server_info.users:
user_key = self.server_info.key
else:
user_key = self.server_info.recv_iv
if hmac.new(user_key, buf[:-1], self.hashfunc).digest()[:1] != buf[-1:]:
return (b'', None)
self.random_client.init_from_bin(md5data)
rand_len = self.random_client.next() % 127
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key)) + to_bytes(base64.b64encode(md5data)), 'rc4')
out_buf = encryptor.decrypt(buf[:-8 - rand_len])
return (out_buf, uid)
|
Dockerfiles
|
positive
|
def decode_possible_dx9_shader(fp, off):
fp.seek(off)
ret = io.BytesIO()
def remember(size):
buf = fp.read(size)
ret.write(buf)
return buf
(minor, major, _shader_type) = struct.unpack('<2BH', remember(4))
shader_type = {65535: 'ps', 65534: 'vs', 18008: 'fx'}.get(_shader_type, None)
if not shader_type:
print('Invalid DX9 shader type 0x%x_%i_%i at offset 0x%x' % (_shader_type, major, minor, off))
return (None, None)
if not valid_dx9_target(shader_type, major, minor):
print('Invalid DX9 shader version %s_%i_%i at offset 0x%x' % (shader_type, major, minor, off))
return (None, None)
print('Possible DX9 %s_%i_%i shader at offset 0x%x' % (shader_type, major, minor, off))
while True:
(opcode, data) = struct.unpack('<2H', remember(4))
if opcode == 65534:
ins_len = (data & 32767) * 4
assert not data & 32768
if dx11shaderanalyse.verbosity >= 1:
print(' 0x%04x: %4s' % (fp.tell(), remember(4).decode('ascii')))
<DeepExtract>
buf = fp.read(ins_len - 4)
ret.write(buf)
return buf
</DeepExtract>
else:
<DeepExtract>
buf = fp.read(ins_len)
ret.write(buf)
return buf
</DeepExtract>
elif opcode == 65535:
assert data in (0, 65535)
return (shader_type, ret)
else:
ins_len = (data & 15) * 4
<DeepExtract>
buf = fp.read(ins_len)
ret.write(buf)
return buf
</DeepExtract>
|
def decode_possible_dx9_shader(fp, off):
fp.seek(off)
ret = io.BytesIO()
def remember(size):
buf = fp.read(size)
ret.write(buf)
return buf
(minor, major, _shader_type) = struct.unpack('<2BH', remember(4))
shader_type = {65535: 'ps', 65534: 'vs', 18008: 'fx'}.get(_shader_type, None)
if not shader_type:
print('Invalid DX9 shader type 0x%x_%i_%i at offset 0x%x' % (_shader_type, major, minor, off))
return (None, None)
if not valid_dx9_target(shader_type, major, minor):
print('Invalid DX9 shader version %s_%i_%i at offset 0x%x' % (shader_type, major, minor, off))
return (None, None)
print('Possible DX9 %s_%i_%i shader at offset 0x%x' % (shader_type, major, minor, off))
while True:
(opcode, data) = struct.unpack('<2H', remember(4))
if opcode == 65534:
ins_len = (data & 32767) * 4
assert not data & 32768
if dx11shaderanalyse.verbosity >= 1:
print(' 0x%04x: %4s' % (fp.tell(), remember(4).decode('ascii')))
buf = fp.read(ins_len - 4)
ret.write(buf)
return buf
else:
buf = fp.read(ins_len)
ret.write(buf)
return buf
elif opcode == 65535:
assert data in (0, 65535)
return (shader_type, ret)
else:
ins_len = (data & 15) * 4
buf = fp.read(ins_len)
ret.write(buf)
return buf
</DeepExtract>
|
3d-fixes
|
positive
|
@with_in_memory_connection
def test_add_remove_binding_for_scatter_type(self, conn):
(a, b) = (A(conn), A(conn))
(routing_key, mock_entity_type) = ('foooo', ACTOR_TYPE.SCATTER)
<DeepExtract>
exchange = a.type_to_exchange[mock_entity_type]()
exchange.bind_to = Mock()
exchange.exchange_unbind = Mock()
exchange.declare = Mock()
a.type_to_exchange[mock_entity_type] = Mock(return_value=exchange)
dest_ex = exchange
</DeepExtract>
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key=routing_key, inbox_type=mock_entity_type)
dest_ex.bind_to.assert_called_with(exchange=source_ex, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_ex.exchange_unbind.assert_called_with(exchange=source_ex, routing_key=routing_key)
|
@with_in_memory_connection
def test_add_remove_binding_for_scatter_type(self, conn):
(a, b) = (A(conn), A(conn))
(routing_key, mock_entity_type) = ('foooo', ACTOR_TYPE.SCATTER)
exchange = a.type_to_exchange[mock_entity_type]()
exchange.bind_to = Mock()
exchange.exchange_unbind = Mock()
exchange.declare = Mock()
a.type_to_exchange[mock_entity_type] = Mock(return_value=exchange)
dest_ex = exchange
source_ex = b.outbox
a._add_binding(source_ex.as_dict(), routing_key=routing_key, inbox_type=mock_entity_type)
dest_ex.bind_to.assert_called_with(exchange=source_ex, routing_key=routing_key)
a._remove_binding(source_ex.as_dict(), routing_key, mock_entity_type)
dest_ex.exchange_unbind.assert_called_with(exchange=source_ex, routing_key=routing_key)
|
cell
|
positive
|
def extract_mean(self):
(self.heap[0][0], self.heap[0][1], self.heap[self.heap_size - 1][0], self.heap[self.heap_size - 1][1]) = (self.heap[self.heap_size - 1][0], self.heap[self.heap_size - 1][1], self.heap[0][0], self.heap[0][1])
t_c = self.heap[self.heap_size - 1][0]
t_v = self.heap[self.heap_size - 1][1]
self.heap.pop()
self.heap_size -= 1
<DeepExtract>
lt = 0 * 2 + 1
rt = 0 * 2 + 2
if lt < self.heap_size and self.heap[lt][1] < self.heap[0][1]:
min_index = lt
else:
min_index = 0
if rt < self.heap_size and self.heap[rt][1] < self.heap[min_index][1]:
min_index = rt
if min_index != 0:
(self.heap[0][0], self.heap[min_index][0], self.heap[0][1], self.heap[min_index][1]) = (self.heap[min_index][0], self.heap[0][0], self.heap[min_index][1], self.heap[0][1])
self.heapify(min_index)
</DeepExtract>
return (t_c, t_v)
|
def extract_mean(self):
(self.heap[0][0], self.heap[0][1], self.heap[self.heap_size - 1][0], self.heap[self.heap_size - 1][1]) = (self.heap[self.heap_size - 1][0], self.heap[self.heap_size - 1][1], self.heap[0][0], self.heap[0][1])
t_c = self.heap[self.heap_size - 1][0]
t_v = self.heap[self.heap_size - 1][1]
self.heap.pop()
self.heap_size -= 1
lt = 0 * 2 + 1
rt = 0 * 2 + 2
if lt < self.heap_size and self.heap[lt][1] < self.heap[0][1]:
min_index = lt
else:
min_index = 0
if rt < self.heap_size and self.heap[rt][1] < self.heap[min_index][1]:
min_index = rt
if min_index != 0:
(self.heap[0][0], self.heap[min_index][0], self.heap[0][1], self.heap[min_index][1]) = (self.heap[min_index][0], self.heap[0][0], self.heap[min_index][1], self.heap[0][1])
self.heapify(min_index)
return (t_c, t_v)
|
Competitive-Programming
|
positive
|
def fix_unreal_halo_vpm(tree, args):
if not isinstance(tree, PS3):
raise Exception('Unreal ViewProjectionMatrix halo fix is only applicable to pixel shaders')
<DeepExtract>
try:
match = find_header(tree, unreal_ViewProjectionMatrix_pattern)
except KeyError:
debug_verbose(0, 'Shader does not use %s' % 'ViewProjectionMatrix')
(matrix, results) = (None, None)
matrix = [Register(match.group('constant'))]
for i in range(1, 4):
matrix.append(Register('c%i' % (matrix[0].num + i)))
debug_verbose(0, '%s identified as %s-%s' % ('ViewProjectionMatrix', matrix[0], matrix[3]))
results = []
for i in range(4):
result = scan_shader(tree, matrix[i], write=False, opcode=('mul', 'mad', 'add'))
l = len(result)
if l != 1:
debug('Unsupported: %s[%i] referenced %i times' % ('ViewProjectionMatrix', i, l))
(matrix, results) = (None, None)
results.extend(result)
results = sorted(results, key=lambda x: x.line)
if results[0].instruction.opcode != 'mul' or results[1].instruction.opcode != 'mad' or results[2].instruction.opcode != 'mad' or (results[3].instruction.opcode not in ('mad', 'add')):
debug('Invalid matrix multiply flow: %s' % ' -> '.join([x.instruction.opcode for x in results]))
(matrix, results) = (None, None)
for i in range(3):
if scan_shader(tree, results[i].instruction.args[0].reg, components=results[i].instruction.args[0].swizzle, write=True, start=results[i].line + 1, end=results[i + 1].line):
debug('Intermediate matrix multiply result clobbered')
(matrix, results) = (None, None)
if results[i].instruction.args[0].reg not in [x.reg for x in results[i + 1].instruction.args[1:]]:
debug('Intermediate matrix multiply register not used in following instruction')
(matrix, results) = (None, None)
(matrix, results) = (matrix, results)
</DeepExtract>
if matrix is None:
return
<DeepExtract>
if verbosity >= -1:
return debug(*args, **kwargs)
</DeepExtract>
(line, linepos, instr) = results[3]
<DeepExtract>
resolved_swizzle = asm_hlsl_swizzle(instr.args[0].swizzle, instr.args[1].swizzle)
components = []
for component in 'xyzw':
location = resolved_swizzle.find(component)
components.append(location != -1 and instr.args[0].swizzle[location] or None)
components = components
</DeepExtract>
if components[0] is None or components[3] is None:
<DeepExtract>
if verbosity >= 0:
return debug(*args, **kwargs)
</DeepExtract>
return None
clip_reg_x = Register('%s.%s' % (instr.args[0].reg, components[0]))
clip_reg_w = Register('%s.%s' % (instr.args[0].reg, components[3]))
t = tree._find_free_reg('r', PS3, desired=31)
<DeepExtract>
if hasattr(tree, 'stereo_const'):
(stereo_const, offset) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
0.5 = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, 0.5])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, offset) = (tree.stereo_const, offset)
</DeepExtract>
<DeepExtract>
for p in range(line + offset, len(tree)):
if isinstance(tree[p], NewLine):
pos = p + 1
pos = len(tree) + 1
</DeepExtract>
pos += insert_vanity_comment(args, tree, pos, 'Unreal Engine ViewProjectionMatrix halo fix inserted with')
pos += tree.insert_instr(pos, NewInstruction('texldl', [t, stereo_const.z, tree.stereo_sampler]))
separation = t.x
convergence = t.y
pos += tree.insert_instr(pos, NewInstruction('add', [t.w, clip_reg_w, -convergence]))
pos += tree.insert_instr(pos, NewInstruction('mad', [clip_reg_x, t.w, separation, clip_reg_x]))
pos += tree.insert_instr(pos)
tree.autofixed = True
|
def fix_unreal_halo_vpm(tree, args):
if not isinstance(tree, PS3):
raise Exception('Unreal ViewProjectionMatrix halo fix is only applicable to pixel shaders')
try:
match = find_header(tree, unreal_ViewProjectionMatrix_pattern)
except KeyError:
debug_verbose(0, 'Shader does not use %s' % 'ViewProjectionMatrix')
(matrix, results) = (None, None)
matrix = [Register(match.group('constant'))]
for i in range(1, 4):
matrix.append(Register('c%i' % (matrix[0].num + i)))
debug_verbose(0, '%s identified as %s-%s' % ('ViewProjectionMatrix', matrix[0], matrix[3]))
results = []
for i in range(4):
result = scan_shader(tree, matrix[i], write=False, opcode=('mul', 'mad', 'add'))
l = len(result)
if l != 1:
debug('Unsupported: %s[%i] referenced %i times' % ('ViewProjectionMatrix', i, l))
(matrix, results) = (None, None)
results.extend(result)
results = sorted(results, key=lambda x: x.line)
if results[0].instruction.opcode != 'mul' or results[1].instruction.opcode != 'mad' or results[2].instruction.opcode != 'mad' or (results[3].instruction.opcode not in ('mad', 'add')):
debug('Invalid matrix multiply flow: %s' % ' -> '.join([x.instruction.opcode for x in results]))
(matrix, results) = (None, None)
for i in range(3):
if scan_shader(tree, results[i].instruction.args[0].reg, components=results[i].instruction.args[0].swizzle, write=True, start=results[i].line + 1, end=results[i + 1].line):
debug('Intermediate matrix multiply result clobbered')
(matrix, results) = (None, None)
if results[i].instruction.args[0].reg not in [x.reg for x in results[i + 1].instruction.args[1:]]:
debug('Intermediate matrix multiply register not used in following instruction')
(matrix, results) = (None, None)
(matrix, results) = (matrix, results)
if matrix is None:
return
if verbosity >= -1:
return debug(*args, **kwargs)
(line, linepos, instr) = results[3]
resolved_swizzle = asm_hlsl_swizzle(instr.args[0].swizzle, instr.args[1].swizzle)
components = []
for component in 'xyzw':
location = resolved_swizzle.find(component)
components.append(location != -1 and instr.args[0].swizzle[location] or None)
components = components
if components[0] is None or components[3] is None:
if verbosity >= 0:
return debug(*args, **kwargs)
return None
clip_reg_x = Register('%s.%s' % (instr.args[0].reg, components[0]))
clip_reg_w = Register('%s.%s' % (instr.args[0].reg, components[3]))
t = tree._find_free_reg('r', PS3, desired=31)
if hasattr(tree, 'stereo_const'):
(stereo_const, offset) = (tree.stereo_const, 0)
if isinstance(tree, VertexShader) and args.use_nv_stereo_reg_vs:
tree.stereo_sampler = None
tree.nv_stereo_reg = Register(args.use_nv_stereo_reg_vs)
elif isinstance(tree, VertexShader) and args.stereo_sampler_vs:
tree.stereo_sampler = args.stereo_sampler_vs
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif isinstance(tree, PixelShader) and args.stereo_sampler_ps:
tree.stereo_sampler = args.stereo_sampler_ps
if 's' in tree.reg_types and tree.stereo_sampler in tree.reg_types['s']:
raise StereoSamplerAlreadyInUse(tree.stereo_sampler)
elif 's' in tree.reg_types and tree.def_stereo_sampler in tree.reg_types['s']:
tree.stereo_sampler = tree._find_free_reg('s', None)
debug('WARNING: SHADER ALREADY USES %s! USING %s FOR STEREO SAMPLER INSTEAD!' % (tree.def_stereo_sampler, tree.stereo_sampler))
if isinstance(tree, VertexShader):
acronym = 'VS'
quirk = 257
elif isinstance(tree, PixelShader):
acronym = 'PS'
quirk = 0
else:
raise AssertionError()
if not hasattr(tree, 'ini'):
tree.ini = []
tree.ini.append(('Def%sSampler' % acronym, str(quirk + tree.stereo_sampler.num), 'Shader already uses %s, so use %s instead:' % (tree.def_stereo_sampler, tree.stereo_sampler)))
else:
tree.stereo_sampler = tree.def_stereo_sampler
if args.adjust_multiply and args.adjust_multiply != -1:
0.5 = args.adjust_multiply
tree.stereo_const = tree._find_free_reg('c', None, desired=preferred_stereo_const)
offset = 0
offset += tree.insert_decl()
offset += tree.insert_decl('def', [tree.stereo_const, x, y, z, 0.5])
if tree.stereo_sampler is not None:
offset += tree.insert_decl('dcl_2d', [tree.stereo_sampler])
offset += tree.insert_decl()
(stereo_const, offset) = (tree.stereo_const, offset)
for p in range(line + offset, len(tree)):
if isinstance(tree[p], NewLine):
pos = p + 1
pos = len(tree) + 1
pos += insert_vanity_comment(args, tree, pos, 'Unreal Engine ViewProjectionMatrix halo fix inserted with')
pos += tree.insert_instr(pos, NewInstruction('texldl', [t, stereo_const.z, tree.stereo_sampler]))
separation = t.x
convergence = t.y
pos += tree.insert_instr(pos, NewInstruction('add', [t.w, clip_reg_w, -convergence]))
pos += tree.insert_instr(pos, NewInstruction('mad', [clip_reg_x, t.w, separation, clip_reg_x]))
pos += tree.insert_instr(pos)
tree.autofixed = True
|
3d-fixes
|
positive
|
def materialize_join_graphs(materializable_join_graphs):
to_return = []
for (mjg, filters) in materializable_join_graphs:
attrs_to_project = dpu.obtain_attributes_to_project(filters)
materialized_virtual_schema = dpu.materialize_join_graph_sample(mjg, self, sample_size=1000)
if materialized_virtual_schema is False:
continue
view_metadata = dict()
view_metadata['#join_graphs'] = len(materializable_join_graphs)
<DeepExtract>
nodes = dict()
edges = []
for jp in mjg:
for hop in jp:
label = hop.db_name + ':' + hop.source_name
node_descr = {'id': hash(label), 'label': label}
node_id = hash(label)
if node_id not in nodes:
nodes[node_id] = node_descr
(l, r) = jp
l_label = l.db_name + ':' + l.source_name
r_label = r.db_name + ':' + r.source_name
edge_descr = {'from': hash(l_label), 'to': hash(r_label)}
edges.append(edge_descr)
view_metadata['join_graph'] = {'nodes': list(nodes.values()), 'edges': list(edges)}
</DeepExtract>
to_return.append((materialized_virtual_schema, attrs_to_project, view_metadata))
return to_return
|
def materialize_join_graphs(materializable_join_graphs):
to_return = []
for (mjg, filters) in materializable_join_graphs:
attrs_to_project = dpu.obtain_attributes_to_project(filters)
materialized_virtual_schema = dpu.materialize_join_graph_sample(mjg, self, sample_size=1000)
if materialized_virtual_schema is False:
continue
view_metadata = dict()
view_metadata['#join_graphs'] = len(materializable_join_graphs)
nodes = dict()
edges = []
for jp in mjg:
for hop in jp:
label = hop.db_name + ':' + hop.source_name
node_descr = {'id': hash(label), 'label': label}
node_id = hash(label)
if node_id not in nodes:
nodes[node_id] = node_descr
(l, r) = jp
l_label = l.db_name + ':' + l.source_name
r_label = r.db_name + ':' + r.source_name
edge_descr = {'from': hash(l_label), 'to': hash(r_label)}
edges.append(edge_descr)
view_metadata['join_graph'] = {'nodes': list(nodes.values()), 'edges': list(edges)}
to_return.append((materialized_virtual_schema, attrs_to_project, view_metadata))
return to_return
|
aurum-datadiscovery
|
positive
|
@_if_not_installed('SortSam.jar')
def install_picard(env):
version = env.tool_version
mirror_info = '?use_mirror=voxel'
url = 'http://downloads.sourceforge.net/project/picard/picard-tools/%s/picard-tools-%s.zip' % (version, version)
pkg_name = 'picard'
install_dir = env.system_install
install_cmd = env.safe_sudo if env.use_sudo else env.safe_run
if not env.safe_exists(install_dir):
install_cmd('mkdir -p %s' % install_dir)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
env.safe_run('wget %s%s -O %s' % (url, mirror_info, os.path.split(url)[-1]))
env.safe_run('unzip %s' % os.path.split(url)[-1])
install_cmd('mv picard-tools-%s/*.jar %s' % (version, install_dir))
<DeepExtract>
env.safe_sudo('touch %s/env.sh' % install_dir)
env.safe_sudo('chmod +x %s/env.sh' % install_dir)
_set_default_config(env, install_dir)
</DeepExtract>
jar_dir = os.path.join(env.galaxy_jars_dir, 'picard')
if not env.safe_exists(jar_dir):
install_cmd('mkdir -p %s' % jar_dir)
tool_dir = os.path.join(env.galaxy_tools_dir, pkg_name, 'default')
install_cmd('ln --force --symbolic %s/*.jar %s/.' % (tool_dir, jar_dir))
install_cmd('chown --recursive %s:%s %s' % (env.galaxy_user, env.galaxy_user, jar_dir))
|
@_if_not_installed('SortSam.jar')
def install_picard(env):
version = env.tool_version
mirror_info = '?use_mirror=voxel'
url = 'http://downloads.sourceforge.net/project/picard/picard-tools/%s/picard-tools-%s.zip' % (version, version)
pkg_name = 'picard'
install_dir = env.system_install
install_cmd = env.safe_sudo if env.use_sudo else env.safe_run
if not env.safe_exists(install_dir):
install_cmd('mkdir -p %s' % install_dir)
with _make_tmp_dir() as work_dir:
with cd(work_dir):
env.safe_run('wget %s%s -O %s' % (url, mirror_info, os.path.split(url)[-1]))
env.safe_run('unzip %s' % os.path.split(url)[-1])
install_cmd('mv picard-tools-%s/*.jar %s' % (version, install_dir))
env.safe_sudo('touch %s/env.sh' % install_dir)
env.safe_sudo('chmod +x %s/env.sh' % install_dir)
_set_default_config(env, install_dir)
jar_dir = os.path.join(env.galaxy_jars_dir, 'picard')
if not env.safe_exists(jar_dir):
install_cmd('mkdir -p %s' % jar_dir)
tool_dir = os.path.join(env.galaxy_tools_dir, pkg_name, 'default')
install_cmd('ln --force --symbolic %s/*.jar %s/.' % (tool_dir, jar_dir))
install_cmd('chown --recursive %s:%s %s' % (env.galaxy_user, env.galaxy_user, jar_dir))
|
cloudbiolinux
|
positive
|
def __isub__(self, others):
hasTag = self._hasTag
for other in others:
if hasTag(other) is True:
<DeepExtract>
if self.parentNode:
self.parentNode.removeChild(self)
return True
return False
</DeepExtract>
return self
|
def __isub__(self, others):
hasTag = self._hasTag
for other in others:
if hasTag(other) is True:
if self.parentNode:
self.parentNode.removeChild(self)
return True
return False
return self
|
AdvancedHTMLParser
|
positive
|
@pytest.mark.parametrize('calculation', ('relax', 'vc-relax'))
@pytest.mark.parametrize('settings_key', ('fixed_coords', 'FIXED_COORDS'))
def test_fixed_coords(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, calculation, settings_key):
"""Test the parsing of a successful calculation that has specified a ``fixed_coords`` setting.
The output files of this test were generated for a calculation of a FCC Si supercell where
"""
name = f"fixed_coords_{calculation.replace('-', '')}"
entry_point_calc_job = 'quantumespresso.pw'
entry_point_parser = 'quantumespresso.pw'
<DeepExtract>
def _generate_inputs(calculation_type='scf', parameters=None, settings=None, metadata=None):
structure = generate_structure()
parameters = {'CONTROL': {'calculation': calculation}, **(parameters or {})}
kpoints = orm.KpointsData()
kpoints.set_cell_from_structure(structure)
kpoints.set_kpoints_mesh_from_density(0.15)
inputs = AttributeDict({'structure': generate_structure(), 'kpoints': kpoints, 'parameters': orm.Dict(parameters), 'settings': orm.Dict({settings_key: [[True, True, True], [True, True, False]]}), 'metadata': metadata or {}})
inputs = _generate_inputs
</DeepExtract>
node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, name, inputs)
parser = generate_parser(entry_point_parser)
(_, calcfunction) = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
|
@pytest.mark.parametrize('calculation', ('relax', 'vc-relax'))
@pytest.mark.parametrize('settings_key', ('fixed_coords', 'FIXED_COORDS'))
def test_fixed_coords(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, calculation, settings_key):
"""Test the parsing of a successful calculation that has specified a ``fixed_coords`` setting.
The output files of this test were generated for a calculation of a FCC Si supercell where
"""
name = f"fixed_coords_{calculation.replace('-', '')}"
entry_point_calc_job = 'quantumespresso.pw'
entry_point_parser = 'quantumespresso.pw'
def _generate_inputs(calculation_type='scf', parameters=None, settings=None, metadata=None):
structure = generate_structure()
parameters = {'CONTROL': {'calculation': calculation}, **(parameters or {})}
kpoints = orm.KpointsData()
kpoints.set_cell_from_structure(structure)
kpoints.set_kpoints_mesh_from_density(0.15)
inputs = AttributeDict({'structure': generate_structure(), 'kpoints': kpoints, 'parameters': orm.Dict(parameters), 'settings': orm.Dict({settings_key: [[True, True, True], [True, True, False]]}), 'metadata': metadata or {}})
inputs = _generate_inputs
node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, name, inputs)
parser = generate_parser(entry_point_parser)
(_, calcfunction) = parser.parse_from_node(node, store_provenance=False)
assert calcfunction.is_finished, calcfunction.exception
assert calcfunction.is_finished_ok, calcfunction.exit_message
|
aiida-quantumespresso
|
positive
|
def test_warning_if_remote_return_not_None(self):
<DeepExtract>
async def create():
server = await aiozmq.rpc.serve_pipeline(MyHandler(self.queue, self.loop), bind='tcp://127.0.0.1:*', loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions)
connect = next(iter(server.transport.bindings()))
client = await aiozmq.rpc.connect_pipeline(connect=connect if use_loop else None)
(client, server) = (client, server)
(self.client, self.server) = self.loop.run_until_complete(create())
(client, server) = (self.client, self.server)
</DeepExtract>
async def communicate():
with log_hook('aiozmq.rpc', self.err_queue):
await client.notify.suspicious(1)
ret = await self.queue.get()
self.assertEqual(1, ret)
ret = await self.err_queue.get()
self.assertEqual(logging.WARNING, ret.levelno)
self.assertEqual('Pipeline handler %r returned not None', ret.msg)
self.assertEqual(('suspicious',), ret.args)
self.assertIsNone(ret.exc_info)
async def dummy():
pass
self.loop.run_until_complete(communicate())
self.loop.run_until_complete(dummy())
|
def test_warning_if_remote_return_not_None(self):
async def create():
server = await aiozmq.rpc.serve_pipeline(MyHandler(self.queue, self.loop), bind='tcp://127.0.0.1:*', loop=self.loop if use_loop else None, log_exceptions=log_exceptions, exclude_log_exceptions=exclude_log_exceptions)
connect = next(iter(server.transport.bindings()))
client = await aiozmq.rpc.connect_pipeline(connect=connect if use_loop else None)
(client, server) = (client, server)
(self.client, self.server) = self.loop.run_until_complete(create())
(client, server) = (self.client, self.server)
async def communicate():
with log_hook('aiozmq.rpc', self.err_queue):
await client.notify.suspicious(1)
ret = await self.queue.get()
self.assertEqual(1, ret)
ret = await self.err_queue.get()
self.assertEqual(logging.WARNING, ret.levelno)
self.assertEqual('Pipeline handler %r returned not None', ret.msg)
self.assertEqual(('suspicious',), ret.args)
self.assertIsNone(ret.exc_info)
async def dummy():
pass
self.loop.run_until_complete(communicate())
self.loop.run_until_complete(dummy())
|
aiozmq
|
positive
|
def _get_hyperparams(curvatures, damping, problem):
<DeepExtract>
factory = GridSearchFactory()
grid_search = factory.make_grid_search(curvatures[0], damping, problem)
</DeepExtract>
hyperparams = list(grid_search.get_hyperparams().keys())
return hyperparams
|
def _get_hyperparams(curvatures, damping, problem):
factory = GridSearchFactory()
grid_search = factory.make_grid_search(curvatures[0], damping, problem)
hyperparams = list(grid_search.get_hyperparams().keys())
return hyperparams
|
backpack
|
positive
|
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
<DeepExtract>
words = []
for space_separated_fragment in sentence.strip().split():
l = _WORD_SPLIT.split(space_separated_fragment)
words.extend(l)
words = [w for w in words if w]
</DeepExtract>
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
return [vocabulary.get(_DIGIT_RE.sub(b'0', w), UNK_ID) for w in words]
|
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=False):
"""Convert a string to list of integers representing token-ids.
For example, a sentence "I have a dog" may become tokenized into
["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2,
"a": 4, "dog": 7"} this function will return [1, 2, 4, 7].
Args:
sentence: the sentence in bytes format to convert to token-ids.
vocabulary: a dictionary mapping tokens to integers.
tokenizer: a function to use to tokenize each sentence;
if None, basic_tokenizer will be used.
normalize_digits: Boolean; if true, all digits are replaced by 0s.
Returns:
a list of integers, the token-ids for the sentence.
"""
if tokenizer:
words = tokenizer(sentence)
else:
words = []
for space_separated_fragment in sentence.strip().split():
l = _WORD_SPLIT.split(space_separated_fragment)
words.extend(l)
words = [w for w in words if w]
if not normalize_digits:
return [vocabulary.get(w, UNK_ID) for w in words]
return [vocabulary.get(_DIGIT_RE.sub(b'0', w), UNK_ID) for w in words]
|
DeepAffinity
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.